Lines Matching full:wc
98 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode) argument
99 #define WC_MODE_FUA(wc) ((wc)->writeback_fua) argument
101 #define WC_MODE_PMEM(wc) false argument
102 #define WC_MODE_FUA(wc) false argument
104 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc)) argument
190 struct dm_writecache *wc; member
201 struct dm_writecache *wc; member
210 static void wc_lock(struct dm_writecache *wc) in wc_lock() argument
212 mutex_lock(&wc->lock); in wc_lock()
215 static void wc_unlock(struct dm_writecache *wc) in wc_unlock() argument
217 mutex_unlock(&wc->lock); in wc_unlock()
221 static int persistent_memory_claim(struct dm_writecache *wc) in persistent_memory_claim() argument
231 wc->memory_vmapped = false; in persistent_memory_claim()
233 if (!wc->ssd_dev->dax_dev) { in persistent_memory_claim()
237 s = wc->memory_map_size; in persistent_memory_claim()
248 offset = get_start_sect(wc->ssd_dev->bdev); in persistent_memory_claim()
257 da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); in persistent_memory_claim()
259 wc->memory_map = NULL; in persistent_memory_claim()
264 wc->memory_map = NULL; in persistent_memory_claim()
270 wc->memory_map = NULL; in persistent_memory_claim()
279 daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, in persistent_memory_claim()
296 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); in persistent_memory_claim()
297 if (!wc->memory_map) { in persistent_memory_claim()
302 wc->memory_vmapped = true; in persistent_memory_claim()
307 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT; in persistent_memory_claim()
308 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT; in persistent_memory_claim()
319 static int persistent_memory_claim(struct dm_writecache *wc) in persistent_memory_claim() argument
325 static void persistent_memory_release(struct dm_writecache *wc) in persistent_memory_release() argument
327 if (wc->memory_vmapped) in persistent_memory_release()
328 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT)); in persistent_memory_release()
356 static struct wc_memory_superblock *sb(struct dm_writecache *wc) in sb() argument
358 return wc->memory_map; in sb()
361 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e) in memory_entry() argument
364 return &sb(wc)->entries[e - wc->entries]; in memory_entry()
366 return &sb(wc)->entries[e->index]; in memory_entry()
369 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) in memory_data() argument
371 return (char *)wc->block_start + (e->index << wc->block_size_bits); in memory_data()
374 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) in cache_sector() argument
376 return wc->start_sector + wc->metadata_sectors + in cache_sector()
377 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); in cache_sector()
380 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e) in read_original_sector() argument
385 return le64_to_cpu(memory_entry(wc, e)->original_sector); in read_original_sector()
389 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e) in read_seq_count() argument
394 return le64_to_cpu(memory_entry(wc, e)->seq_count); in read_seq_count()
398 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e) in clear_seq_count() argument
403 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1)); in clear_seq_count()
406 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e, in write_original_sector_seq_count() argument
416 pmem_assign(*memory_entry(wc, e), me); in write_original_sector_seq_count()
419 #define writecache_error(wc, err, msg, arg...) \ argument
421 if (!cmpxchg(&(wc)->error, 0, err)) \
423 wake_up(&(wc)->freelist_wait); \
426 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error))) argument
428 static void writecache_flush_all_metadata(struct dm_writecache *wc) in writecache_flush_all_metadata() argument
430 if (!WC_MODE_PMEM(wc)) in writecache_flush_all_metadata()
431 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size); in writecache_flush_all_metadata()
434 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size) in writecache_flush_region() argument
436 if (!WC_MODE_PMEM(wc)) in writecache_flush_region()
437 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY, in writecache_flush_region()
438 wc->dirty_bitmap); in writecache_flush_region()
441 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
444 struct dm_writecache *wc; member
454 writecache_error(endio->wc, -EIO, "error writing metadata"); in writecache_notify_io()
460 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) in writecache_wait_for_ios() argument
462 wait_event(wc->bio_in_progress_wait[direction], in writecache_wait_for_ios()
463 !atomic_read(&wc->bio_in_progress[direction])); in writecache_wait_for_ios()
466 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) in ssd_commit_flushed() argument
471 wc, in ssd_commit_flushed()
475 unsigned bitmap_bits = wc->dirty_bitmap_size * 8; in ssd_commit_flushed()
480 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i); in ssd_commit_flushed()
483 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i); in ssd_commit_flushed()
485 region.bdev = wc->ssd_dev->bdev; in ssd_commit_flushed()
489 if (unlikely(region.sector >= wc->metadata_sectors)) in ssd_commit_flushed()
491 if (unlikely(region.sector + region.count > wc->metadata_sectors)) in ssd_commit_flushed()
492 region.count = wc->metadata_sectors - region.sector; in ssd_commit_flushed()
494 region.sector += wc->start_sector; in ssd_commit_flushed()
499 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY; in ssd_commit_flushed()
500 req.client = wc->dm_io; in ssd_commit_flushed()
513 writecache_wait_for_ios(wc, WRITE); in ssd_commit_flushed()
515 writecache_disk_flush(wc, wc->ssd_dev); in ssd_commit_flushed()
517 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size); in ssd_commit_flushed()
520 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) in writecache_commit_flushed() argument
522 if (WC_MODE_PMEM(wc)) in writecache_commit_flushed()
525 ssd_commit_flushed(wc, wait_for_ios); in writecache_commit_flushed()
528 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) in writecache_disk_flush() argument
541 req.client = wc->dm_io; in writecache_disk_flush()
546 writecache_error(wc, r, "error flushing metadata: %d", r); in writecache_disk_flush()
552 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc, in writecache_find_entry() argument
556 struct rb_node *node = wc->tree.rb_node; in writecache_find_entry()
563 if (read_original_sector(wc, e) == block) in writecache_find_entry()
565 node = (read_original_sector(wc, e) >= block ? in writecache_find_entry()
571 if (read_original_sector(wc, e) >= block) { in writecache_find_entry()
593 if (read_original_sector(wc, e2) != block) in writecache_find_entry()
599 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins) in writecache_insert_entry() argument
602 struct rb_node **node = &wc->tree.rb_node, *parent = NULL; in writecache_insert_entry()
607 if (read_original_sector(wc, e) > read_original_sector(wc, ins)) in writecache_insert_entry()
613 rb_insert_color(&ins->rb_node, &wc->tree); in writecache_insert_entry()
614 list_add(&ins->lru, &wc->lru); in writecache_insert_entry()
617 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e) in writecache_unlink() argument
620 rb_erase(&e->rb_node, &wc->tree); in writecache_unlink()
623 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e) in writecache_add_to_freelist() argument
625 if (WC_MODE_SORT_FREELIST(wc)) { in writecache_add_to_freelist()
626 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL; in writecache_add_to_freelist()
628 wc->current_free = e; in writecache_add_to_freelist()
637 rb_insert_color(&e->rb_node, &wc->freetree); in writecache_add_to_freelist()
639 list_add_tail(&e->lru, &wc->freelist); in writecache_add_to_freelist()
641 wc->freelist_size++; in writecache_add_to_freelist()
644 static inline void writecache_verify_watermark(struct dm_writecache *wc) in writecache_verify_watermark() argument
646 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) in writecache_verify_watermark()
647 queue_work(wc->writeback_wq, &wc->writeback_work); in writecache_verify_watermark()
650 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc) in writecache_pop_from_freelist() argument
654 if (WC_MODE_SORT_FREELIST(wc)) { in writecache_pop_from_freelist()
656 if (unlikely(!wc->current_free)) in writecache_pop_from_freelist()
658 e = wc->current_free; in writecache_pop_from_freelist()
660 rb_erase(&e->rb_node, &wc->freetree); in writecache_pop_from_freelist()
662 next = rb_first(&wc->freetree); in writecache_pop_from_freelist()
663 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL; in writecache_pop_from_freelist()
665 if (unlikely(list_empty(&wc->freelist))) in writecache_pop_from_freelist()
667 e = container_of(wc->freelist.next, struct wc_entry, lru); in writecache_pop_from_freelist()
670 wc->freelist_size--; in writecache_pop_from_freelist()
672 writecache_verify_watermark(wc); in writecache_pop_from_freelist()
677 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e) in writecache_free_entry() argument
679 writecache_unlink(wc, e); in writecache_free_entry()
680 writecache_add_to_freelist(wc, e); in writecache_free_entry()
681 clear_seq_count(wc, e); in writecache_free_entry()
682 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); in writecache_free_entry()
683 if (unlikely(waitqueue_active(&wc->freelist_wait))) in writecache_free_entry()
684 wake_up(&wc->freelist_wait); in writecache_free_entry()
687 static void writecache_wait_on_freelist(struct dm_writecache *wc) in writecache_wait_on_freelist() argument
691 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE); in writecache_wait_on_freelist()
692 wc_unlock(wc); in writecache_wait_on_freelist()
694 finish_wait(&wc->freelist_wait, &wait); in writecache_wait_on_freelist()
695 wc_lock(wc); in writecache_wait_on_freelist()
698 static void writecache_poison_lists(struct dm_writecache *wc) in writecache_poison_lists() argument
703 memset(&wc->tree, -1, sizeof wc->tree); in writecache_poison_lists()
704 wc->lru.next = LIST_POISON1; in writecache_poison_lists()
705 wc->lru.prev = LIST_POISON2; in writecache_poison_lists()
706 wc->freelist.next = LIST_POISON1; in writecache_poison_lists()
707 wc->freelist.prev = LIST_POISON2; in writecache_poison_lists()
710 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e) in writecache_flush_entry() argument
712 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); in writecache_flush_entry()
713 if (WC_MODE_PMEM(wc)) in writecache_flush_entry()
714 writecache_flush_region(wc, memory_data(wc, e), wc->block_size); in writecache_flush_entry()
717 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e) in writecache_entry_is_committed() argument
719 return read_seq_count(wc, e) < wc->seq_count; in writecache_entry_is_committed()
722 static void writecache_flush(struct dm_writecache *wc) in writecache_flush() argument
727 wc->uncommitted_blocks = 0; in writecache_flush()
728 del_timer(&wc->autocommit_timer); in writecache_flush()
730 if (list_empty(&wc->lru)) in writecache_flush()
733 e = container_of(wc->lru.next, struct wc_entry, lru); in writecache_flush()
734 if (writecache_entry_is_committed(wc, e)) { in writecache_flush()
735 if (wc->overwrote_committed) { in writecache_flush()
736 writecache_wait_for_ios(wc, WRITE); in writecache_flush()
737 writecache_disk_flush(wc, wc->ssd_dev); in writecache_flush()
738 wc->overwrote_committed = false; in writecache_flush()
743 writecache_flush_entry(wc, e); in writecache_flush()
744 if (unlikely(e->lru.next == &wc->lru)) in writecache_flush()
747 if (writecache_entry_is_committed(wc, e2)) in writecache_flush()
752 writecache_commit_flushed(wc, true); in writecache_flush()
754 wc->seq_count++; in writecache_flush()
755 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count)); in writecache_flush()
756 writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count); in writecache_flush()
757 writecache_commit_flushed(wc, false); in writecache_flush()
759 wc->overwrote_committed = false; in writecache_flush()
768 if (read_original_sector(wc, e2) == read_original_sector(wc, e) && in writecache_flush()
770 writecache_free_entry(wc, e2); in writecache_flush()
774 if (unlikely(e->lru.prev == &wc->lru)) in writecache_flush()
781 writecache_commit_flushed(wc, false); in writecache_flush()
786 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work); in writecache_flush_work() local
788 wc_lock(wc); in writecache_flush_work()
789 writecache_flush(wc); in writecache_flush_work()
790 wc_unlock(wc); in writecache_flush_work()
795 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer); in writecache_autocommit_timer() local
796 if (!writecache_has_error(wc)) in writecache_autocommit_timer()
797 queue_work(wc->writeback_wq, &wc->flush_work); in writecache_autocommit_timer()
800 static void writecache_schedule_autocommit(struct dm_writecache *wc) in writecache_schedule_autocommit() argument
802 if (!timer_pending(&wc->autocommit_timer)) in writecache_schedule_autocommit()
803 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies); in writecache_schedule_autocommit()
806 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end) in writecache_discard() argument
811 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ); in writecache_discard()
815 while (read_original_sector(wc, e) < end) { in writecache_discard()
820 writecache_wait_for_ios(wc, READ); in writecache_discard()
821 writecache_wait_for_ios(wc, WRITE); in writecache_discard()
824 if (!writecache_entry_is_committed(wc, e)) in writecache_discard()
825 wc->uncommitted_blocks--; in writecache_discard()
826 writecache_free_entry(wc, e); in writecache_discard()
836 writecache_commit_flushed(wc, false); in writecache_discard()
839 static bool writecache_wait_for_writeback(struct dm_writecache *wc) in writecache_wait_for_writeback() argument
841 if (wc->writeback_size) { in writecache_wait_for_writeback()
842 writecache_wait_on_freelist(wc); in writecache_wait_for_writeback()
850 struct dm_writecache *wc = ti->private; in writecache_suspend() local
853 del_timer_sync(&wc->autocommit_timer); in writecache_suspend()
855 wc_lock(wc); in writecache_suspend()
856 writecache_flush(wc); in writecache_suspend()
857 flush_on_suspend = wc->flush_on_suspend; in writecache_suspend()
859 wc->flush_on_suspend = false; in writecache_suspend()
860 wc->writeback_all++; in writecache_suspend()
861 queue_work(wc->writeback_wq, &wc->writeback_work); in writecache_suspend()
863 wc_unlock(wc); in writecache_suspend()
865 drain_workqueue(wc->writeback_wq); in writecache_suspend()
867 wc_lock(wc); in writecache_suspend()
869 wc->writeback_all--; in writecache_suspend()
870 while (writecache_wait_for_writeback(wc)); in writecache_suspend()
872 if (WC_MODE_PMEM(wc)) in writecache_suspend()
873 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); in writecache_suspend()
875 writecache_poison_lists(wc); in writecache_suspend()
877 wc_unlock(wc); in writecache_suspend()
880 static int writecache_alloc_entries(struct dm_writecache *wc) in writecache_alloc_entries() argument
884 if (wc->entries) in writecache_alloc_entries()
886 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); in writecache_alloc_entries()
887 if (!wc->entries) in writecache_alloc_entries()
889 for (b = 0; b < wc->n_blocks; b++) { in writecache_alloc_entries()
890 struct wc_entry *e = &wc->entries[b]; in writecache_alloc_entries()
899 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) in writecache_read_metadata() argument
904 region.bdev = wc->ssd_dev->bdev; in writecache_read_metadata()
905 region.sector = wc->start_sector; in writecache_read_metadata()
910 req.mem.ptr.vma = (char *)wc->memory_map; in writecache_read_metadata()
911 req.client = wc->dm_io; in writecache_read_metadata()
919 struct dm_writecache *wc = ti->private; in writecache_resume() local
925 wc_lock(wc); in writecache_resume()
927 if (WC_MODE_PMEM(wc)) { in writecache_resume()
928 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); in writecache_resume()
930 r = writecache_read_metadata(wc, wc->metadata_sectors); in writecache_resume()
933 writecache_error(wc, r, "unable to read metadata: %d", r); in writecache_resume()
935 memset((char *)wc->memory_map + sb_entries_offset, -1, in writecache_resume()
936 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); in writecache_resume()
940 wc->tree = RB_ROOT; in writecache_resume()
941 INIT_LIST_HEAD(&wc->lru); in writecache_resume()
942 if (WC_MODE_SORT_FREELIST(wc)) { in writecache_resume()
943 wc->freetree = RB_ROOT; in writecache_resume()
944 wc->current_free = NULL; in writecache_resume()
946 INIT_LIST_HEAD(&wc->freelist); in writecache_resume()
948 wc->freelist_size = 0; in writecache_resume()
950 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t)); in writecache_resume()
952 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r); in writecache_resume()
955 wc->seq_count = le64_to_cpu(sb_seq_count); in writecache_resume()
958 for (b = 0; b < wc->n_blocks; b++) { in writecache_resume()
959 struct wc_entry *e = &wc->entries[b]; in writecache_resume()
961 if (writecache_has_error(wc)) { in writecache_resume()
966 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry)); in writecache_resume()
968 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d", in writecache_resume()
979 for (b = 0; b < wc->n_blocks; b++) { in writecache_resume()
980 struct wc_entry *e = &wc->entries[b]; in writecache_resume()
981 if (!writecache_entry_is_committed(wc, e)) { in writecache_resume()
982 if (read_seq_count(wc, e) != -1) { in writecache_resume()
984 clear_seq_count(wc, e); in writecache_resume()
987 writecache_add_to_freelist(wc, e); in writecache_resume()
991 old = writecache_find_entry(wc, read_original_sector(wc, e), 0); in writecache_resume()
993 writecache_insert_entry(wc, e); in writecache_resume()
995 if (read_seq_count(wc, old) == read_seq_count(wc, e)) { in writecache_resume()
996 writecache_error(wc, -EINVAL, in writecache_resume()
998 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e), in writecache_resume()
999 (unsigned long long)read_seq_count(wc, e)); in writecache_resume()
1001 if (read_seq_count(wc, old) > read_seq_count(wc, e)) { in writecache_resume()
1004 writecache_free_entry(wc, old); in writecache_resume()
1005 writecache_insert_entry(wc, e); in writecache_resume()
1014 writecache_flush_all_metadata(wc); in writecache_resume()
1015 writecache_commit_flushed(wc, false); in writecache_resume()
1018 writecache_verify_watermark(wc); in writecache_resume()
1020 wc_unlock(wc); in writecache_resume()
1023 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc) in process_flush_mesg() argument
1028 wc_lock(wc); in process_flush_mesg()
1029 if (dm_suspended(wc->ti)) { in process_flush_mesg()
1030 wc_unlock(wc); in process_flush_mesg()
1033 if (writecache_has_error(wc)) { in process_flush_mesg()
1034 wc_unlock(wc); in process_flush_mesg()
1038 writecache_flush(wc); in process_flush_mesg()
1039 wc->writeback_all++; in process_flush_mesg()
1040 queue_work(wc->writeback_wq, &wc->writeback_work); in process_flush_mesg()
1041 wc_unlock(wc); in process_flush_mesg()
1043 flush_workqueue(wc->writeback_wq); in process_flush_mesg()
1045 wc_lock(wc); in process_flush_mesg()
1046 wc->writeback_all--; in process_flush_mesg()
1047 if (writecache_has_error(wc)) { in process_flush_mesg()
1048 wc_unlock(wc); in process_flush_mesg()
1051 wc_unlock(wc); in process_flush_mesg()
1056 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc) in process_flush_on_suspend_mesg() argument
1061 wc_lock(wc); in process_flush_on_suspend_mesg()
1062 wc->flush_on_suspend = true; in process_flush_on_suspend_mesg()
1063 wc_unlock(wc); in process_flush_on_suspend_mesg()
1072 struct dm_writecache *wc = ti->private; in writecache_message() local
1075 r = process_flush_mesg(argc, argv, wc); in writecache_message()
1077 r = process_flush_on_suspend_mesg(argc, argv, wc); in writecache_message()
1084 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data) in bio_copy_block() argument
1090 unsigned remaining_size = wc->block_size; in bio_copy_block()
1104 writecache_error(wc, r, "hardware memory error when reading data: %d", r); in bio_copy_block()
1122 struct dm_writecache *wc = data; in writecache_flush_thread() local
1127 wc_lock(wc); in writecache_flush_thread()
1128 bio = bio_list_pop(&wc->flush_list); in writecache_flush_thread()
1131 wc_unlock(wc); in writecache_flush_thread()
1143 writecache_discard(wc, bio->bi_iter.bi_sector, in writecache_flush_thread()
1145 wc_unlock(wc); in writecache_flush_thread()
1146 bio_set_dev(bio, wc->dev->bdev); in writecache_flush_thread()
1149 writecache_flush(wc); in writecache_flush_thread()
1150 wc_unlock(wc); in writecache_flush_thread()
1151 if (writecache_has_error(wc)) in writecache_flush_thread()
1160 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio) in writecache_offload_bio() argument
1162 if (bio_list_empty(&wc->flush_list)) in writecache_offload_bio()
1163 wake_up_process(wc->flush_thread); in writecache_offload_bio()
1164 bio_list_add(&wc->flush_list, bio); in writecache_offload_bio()
1170 struct dm_writecache *wc = ti->private; in writecache_map() local
1174 wc_lock(wc); in writecache_map()
1177 if (writecache_has_error(wc)) in writecache_map()
1179 if (WC_MODE_PMEM(wc)) { in writecache_map()
1180 writecache_flush(wc); in writecache_map()
1181 if (writecache_has_error(wc)) in writecache_map()
1185 writecache_offload_bio(wc, bio); in writecache_map()
1193 (wc->block_size / 512 - 1)) != 0)) { in writecache_map()
1196 bio->bi_iter.bi_size, wc->block_size); in writecache_map()
1201 if (writecache_has_error(wc)) in writecache_map()
1203 if (WC_MODE_PMEM(wc)) { in writecache_map()
1204 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio)); in writecache_map()
1207 writecache_offload_bio(wc, bio); in writecache_map()
1214 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map()
1215 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { in writecache_map()
1216 if (WC_MODE_PMEM(wc)) { in writecache_map()
1217 bio_copy_block(wc, bio, memory_data(wc, e)); in writecache_map()
1222 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT); in writecache_map()
1223 bio_set_dev(bio, wc->ssd_dev->bdev); in writecache_map()
1224 bio->bi_iter.bi_sector = cache_sector(wc, e); in writecache_map()
1225 if (!writecache_entry_is_committed(wc, e)) in writecache_map()
1226 writecache_wait_for_ios(wc, WRITE); in writecache_map()
1232 read_original_sector(wc, e) - bio->bi_iter.bi_sector; in writecache_map()
1241 if (writecache_has_error(wc)) in writecache_map()
1243 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); in writecache_map()
1245 if (!writecache_entry_is_committed(wc, e)) in writecache_map()
1247 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) { in writecache_map()
1248 wc->overwrote_committed = true; in writecache_map()
1252 e = writecache_pop_from_freelist(wc); in writecache_map()
1254 writecache_wait_on_freelist(wc); in writecache_map()
1257 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count); in writecache_map()
1258 writecache_insert_entry(wc, e); in writecache_map()
1259 wc->uncommitted_blocks++; in writecache_map()
1261 if (WC_MODE_PMEM(wc)) { in writecache_map()
1262 bio_copy_block(wc, bio, memory_data(wc, e)); in writecache_map()
1264 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT); in writecache_map()
1265 bio_set_dev(bio, wc->ssd_dev->bdev); in writecache_map()
1266 bio->bi_iter.bi_sector = cache_sector(wc, e); in writecache_map()
1267 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) { in writecache_map()
1268 wc->uncommitted_blocks = 0; in writecache_map()
1269 queue_work(wc->writeback_wq, &wc->flush_work); in writecache_map()
1271 writecache_schedule_autocommit(wc); in writecache_map()
1278 wc->uncommitted_blocks >= wc->autocommit_blocks)) in writecache_map()
1279 writecache_flush(wc); in writecache_map()
1281 writecache_schedule_autocommit(wc); in writecache_map()
1286 bio_set_dev(bio, wc->dev->bdev); in writecache_map()
1287 wc_unlock(wc); in writecache_map()
1293 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]); in writecache_map()
1294 wc_unlock(wc); in writecache_map()
1298 wc_unlock(wc); in writecache_map()
1303 wc_unlock(wc); in writecache_map()
1307 wc_unlock(wc); in writecache_map()
1314 struct dm_writecache *wc = ti->private; in writecache_end_io() local
1318 if (atomic_dec_and_test(&wc->bio_in_progress[dir])) in writecache_end_io()
1319 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir]))) in writecache_end_io()
1320 wake_up(&wc->bio_in_progress_wait[dir]); in writecache_end_io()
1328 struct dm_writecache *wc = ti->private; in writecache_iterate_devices() local
1330 return fn(ti, wc->dev, 0, ti->len, data); in writecache_iterate_devices()
1335 struct dm_writecache *wc = ti->private; in writecache_io_hints() local
1337 if (limits->logical_block_size < wc->block_size) in writecache_io_hints()
1338 limits->logical_block_size = wc->block_size; in writecache_io_hints()
1340 if (limits->physical_block_size < wc->block_size) in writecache_io_hints()
1341 limits->physical_block_size = wc->block_size; in writecache_io_hints()
1343 if (limits->io_min < wc->block_size) in writecache_io_hints()
1344 limits->io_min = wc->block_size; in writecache_io_hints()
1351 struct dm_writecache *wc = wb->wc; in writecache_writeback_endio() local
1354 raw_spin_lock_irqsave(&wc->endio_list_lock, flags); in writecache_writeback_endio()
1355 if (unlikely(list_empty(&wc->endio_list))) in writecache_writeback_endio()
1356 wake_up_process(wc->endio_thread); in writecache_writeback_endio()
1357 list_add_tail(&wb->endio_entry, &wc->endio_list); in writecache_writeback_endio()
1358 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags); in writecache_writeback_endio()
1364 struct dm_writecache *wc = c->wc; in writecache_copy_endio() local
1368 raw_spin_lock_irq(&wc->endio_list_lock); in writecache_copy_endio()
1369 if (unlikely(list_empty(&wc->endio_list))) in writecache_copy_endio()
1370 wake_up_process(wc->endio_thread); in writecache_copy_endio()
1371 list_add_tail(&c->endio_entry, &wc->endio_list); in writecache_copy_endio()
1372 raw_spin_unlock_irq(&wc->endio_list_lock); in writecache_copy_endio()
1375 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list) in __writecache_endio_pmem() argument
1387 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status), in __writecache_endio_pmem()
1395 if (!writecache_has_error(wc)) in __writecache_endio_pmem()
1396 writecache_free_entry(wc, e); in __writecache_endio_pmem()
1397 BUG_ON(!wc->writeback_size); in __writecache_endio_pmem()
1398 wc->writeback_size--; in __writecache_endio_pmem()
1401 writecache_commit_flushed(wc, false); in __writecache_endio_pmem()
1402 wc_unlock(wc); in __writecache_endio_pmem()
1403 wc_lock(wc); in __writecache_endio_pmem()
1414 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list) in __writecache_endio_ssd() argument
1424 writecache_error(wc, c->error, "copy error"); in __writecache_endio_ssd()
1431 if (!writecache_has_error(wc)) in __writecache_endio_ssd()
1432 writecache_free_entry(wc, e); in __writecache_endio_ssd()
1434 BUG_ON(!wc->writeback_size); in __writecache_endio_ssd()
1435 wc->writeback_size--; in __writecache_endio_ssd()
1438 mempool_free(c, &wc->copy_pool); in __writecache_endio_ssd()
1444 struct dm_writecache *wc = data; in writecache_endio_thread() local
1449 raw_spin_lock_irq(&wc->endio_list_lock); in writecache_endio_thread()
1450 if (!list_empty(&wc->endio_list)) in writecache_endio_thread()
1453 raw_spin_unlock_irq(&wc->endio_list_lock); in writecache_endio_thread()
1465 list = wc->endio_list; in writecache_endio_thread()
1467 INIT_LIST_HEAD(&wc->endio_list); in writecache_endio_thread()
1468 raw_spin_unlock_irq(&wc->endio_list_lock); in writecache_endio_thread()
1470 if (!WC_MODE_FUA(wc)) in writecache_endio_thread()
1471 writecache_disk_flush(wc, wc->dev); in writecache_endio_thread()
1473 wc_lock(wc); in writecache_endio_thread()
1475 if (WC_MODE_PMEM(wc)) { in writecache_endio_thread()
1476 __writecache_endio_pmem(wc, &list); in writecache_endio_thread()
1478 __writecache_endio_ssd(wc, &list); in writecache_endio_thread()
1479 writecache_wait_for_ios(wc, READ); in writecache_endio_thread()
1482 writecache_commit_flushed(wc, false); in writecache_endio_thread()
1484 wc_unlock(wc); in writecache_endio_thread()
1492 struct dm_writecache *wc = wb->wc; in wc_add_block() local
1493 unsigned block_size = wc->block_size; in wc_add_block()
1494 void *address = memory_data(wc, e); in wc_add_block()
1506 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl) in __writeback_throttle() argument
1508 if (unlikely(wc->max_writeback_jobs)) { in __writeback_throttle()
1509 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) { in __writeback_throttle()
1510 wc_lock(wc); in __writeback_throttle()
1511 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs) in __writeback_throttle()
1512 writecache_wait_on_freelist(wc); in __writeback_throttle()
1513 wc_unlock(wc); in __writeback_throttle()
1519 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl) in __writecache_writeback_pmem() argument
1533 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); in __writecache_writeback_pmem()
1535 wb->wc = wc; in __writecache_writeback_pmem()
1537 bio_set_dev(&wb->bio, wc->dev->bdev); in __writecache_writeback_pmem()
1538 wb->bio.bi_iter.bi_sector = read_original_sector(wc, e); in __writecache_writeback_pmem()
1555 if (read_original_sector(wc, f) != in __writecache_writeback_pmem()
1556 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) in __writecache_writeback_pmem()
1565 bio_set_op_attrs(&wb->bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA); in __writecache_writeback_pmem()
1566 if (writecache_has_error(wc)) { in __writecache_writeback_pmem()
1573 __writeback_throttle(wc, wbl); in __writecache_writeback_pmem()
1577 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl) in __writecache_writeback_ssd() argument
1590 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT); in __writecache_writeback_ssd()
1592 from.bdev = wc->ssd_dev->bdev; in __writecache_writeback_ssd()
1593 from.sector = cache_sector(wc, e); in __writecache_writeback_ssd()
1595 to.bdev = wc->dev->bdev; in __writecache_writeback_ssd()
1596 to.sector = read_original_sector(wc, e); in __writecache_writeback_ssd()
1599 c = mempool_alloc(&wc->copy_pool, GFP_NOIO); in __writecache_writeback_ssd()
1600 c->wc = wc; in __writecache_writeback_ssd()
1604 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) { in __writecache_writeback_ssd()
1612 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); in __writecache_writeback_ssd()
1614 __writeback_throttle(wc, wbl); in __writecache_writeback_ssd()
1620 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work); in writecache_writeback() local
1628 wc_lock(wc); in writecache_writeback()
1630 if (writecache_has_error(wc)) { in writecache_writeback()
1631 wc_unlock(wc); in writecache_writeback()
1635 if (unlikely(wc->writeback_all)) { in writecache_writeback()
1636 if (writecache_wait_for_writeback(wc)) in writecache_writeback()
1640 if (wc->overwrote_committed) { in writecache_writeback()
1641 writecache_wait_for_ios(wc, WRITE); in writecache_writeback()
1648 while (!list_empty(&wc->lru) && in writecache_writeback()
1649 (wc->writeback_all || in writecache_writeback()
1650 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) { in writecache_writeback()
1654 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) { in writecache_writeback()
1655 queue_work(wc->writeback_wq, &wc->writeback_work); in writecache_writeback()
1659 e = container_of(wc->lru.prev, struct wc_entry, lru); in writecache_writeback()
1661 if (unlikely(!writecache_entry_is_committed(wc, e))) { in writecache_writeback()
1662 writecache_flush(wc); in writecache_writeback()
1667 if (unlikely(read_original_sector(wc, f) == in writecache_writeback()
1668 read_original_sector(wc, e))) { in writecache_writeback()
1676 wc->writeback_size++; in writecache_writeback()
1690 if (read_original_sector(wc, g) == in writecache_writeback()
1691 read_original_sector(wc, f)) { in writecache_writeback()
1695 if (read_original_sector(wc, g) != in writecache_writeback()
1696 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT)) in writecache_writeback()
1700 if (unlikely(!writecache_entry_is_committed(wc, g))) in writecache_writeback()
1703 if (!WC_MODE_PMEM(wc)) { in writecache_writeback()
1709 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all)) in writecache_writeback()
1712 wc->writeback_size++; in writecache_writeback()
1727 list_splice_tail(&skipped, &wc->lru); in writecache_writeback()
1733 writecache_wait_for_writeback(wc); in writecache_writeback()
1736 wc_unlock(wc); in writecache_writeback()
1740 if (WC_MODE_PMEM(wc)) in writecache_writeback()
1741 __writecache_writeback_pmem(wc, &wbl); in writecache_writeback()
1743 __writecache_writeback_ssd(wc, &wbl); in writecache_writeback()
1747 if (unlikely(wc->writeback_all)) { in writecache_writeback()
1748 wc_lock(wc); in writecache_writeback()
1749 while (writecache_wait_for_writeback(wc)); in writecache_writeback()
1750 wc_unlock(wc); in writecache_writeback()
1789 static int init_memory(struct dm_writecache *wc) in init_memory() argument
1794 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL); in init_memory()
1798 r = writecache_alloc_entries(wc); in init_memory()
1802 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++) in init_memory()
1803 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0)); in init_memory()
1804 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION)); in init_memory()
1805 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size)); in init_memory()
1806 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); in init_memory()
1807 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); in init_memory()
1809 for (b = 0; b < wc->n_blocks; b++) { in init_memory()
1810 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); in init_memory()
1814 writecache_flush_all_metadata(wc); in init_memory()
1815 writecache_commit_flushed(wc, false); in init_memory()
1816 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC)); in init_memory()
1817 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic); in init_memory()
1818 writecache_commit_flushed(wc, false); in init_memory()
1825 struct dm_writecache *wc = ti->private; in writecache_dtr() local
1827 if (!wc) in writecache_dtr()
1830 if (wc->endio_thread) in writecache_dtr()
1831 kthread_stop(wc->endio_thread); in writecache_dtr()
1833 if (wc->flush_thread) in writecache_dtr()
1834 kthread_stop(wc->flush_thread); in writecache_dtr()
1836 bioset_exit(&wc->bio_set); in writecache_dtr()
1838 mempool_exit(&wc->copy_pool); in writecache_dtr()
1840 if (wc->writeback_wq) in writecache_dtr()
1841 destroy_workqueue(wc->writeback_wq); in writecache_dtr()
1843 if (wc->dev) in writecache_dtr()
1844 dm_put_device(ti, wc->dev); in writecache_dtr()
1846 if (wc->ssd_dev) in writecache_dtr()
1847 dm_put_device(ti, wc->ssd_dev); in writecache_dtr()
1849 if (wc->entries) in writecache_dtr()
1850 vfree(wc->entries); in writecache_dtr()
1852 if (wc->memory_map) { in writecache_dtr()
1853 if (WC_MODE_PMEM(wc)) in writecache_dtr()
1854 persistent_memory_release(wc); in writecache_dtr()
1856 vfree(wc->memory_map); in writecache_dtr()
1859 if (wc->dm_kcopyd) in writecache_dtr()
1860 dm_kcopyd_client_destroy(wc->dm_kcopyd); in writecache_dtr()
1862 if (wc->dm_io) in writecache_dtr()
1863 dm_io_client_destroy(wc->dm_io); in writecache_dtr()
1865 if (wc->dirty_bitmap) in writecache_dtr()
1866 vfree(wc->dirty_bitmap); in writecache_dtr()
1868 kfree(wc); in writecache_dtr()
1873 struct dm_writecache *wc; in writecache_ctr() local
1892 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL); in writecache_ctr()
1893 if (!wc) { in writecache_ctr()
1898 ti->private = wc; in writecache_ctr()
1899 wc->ti = ti; in writecache_ctr()
1901 mutex_init(&wc->lock); in writecache_ctr()
1902 writecache_poison_lists(wc); in writecache_ctr()
1903 init_waitqueue_head(&wc->freelist_wait); in writecache_ctr()
1904 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0); in writecache_ctr()
1907 atomic_set(&wc->bio_in_progress[i], 0); in writecache_ctr()
1908 init_waitqueue_head(&wc->bio_in_progress_wait[i]); in writecache_ctr()
1911 wc->dm_io = dm_io_client_create(); in writecache_ctr()
1912 if (IS_ERR(wc->dm_io)) { in writecache_ctr()
1913 r = PTR_ERR(wc->dm_io); in writecache_ctr()
1915 wc->dm_io = NULL; in writecache_ctr()
1919 wc->writeback_wq = alloc_workqueue("writecache-writeabck", WQ_MEM_RECLAIM, 1); in writecache_ctr()
1920 if (!wc->writeback_wq) { in writecache_ctr()
1925 INIT_WORK(&wc->writeback_work, writecache_writeback); in writecache_ctr()
1926 INIT_WORK(&wc->flush_work, writecache_flush_work); in writecache_ctr()
1928 raw_spin_lock_init(&wc->endio_list_lock); in writecache_ctr()
1929 INIT_LIST_HEAD(&wc->endio_list); in writecache_ctr()
1930 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio"); in writecache_ctr()
1931 if (IS_ERR(wc->endio_thread)) { in writecache_ctr()
1932 r = PTR_ERR(wc->endio_thread); in writecache_ctr()
1933 wc->endio_thread = NULL; in writecache_ctr()
1937 wake_up_process(wc->endio_thread); in writecache_ctr()
1947 wc->pmem_mode = false; in writecache_ctr()
1950 wc->pmem_mode = true; in writecache_ctr()
1951 wc->writeback_fua = true; in writecache_ctr()
1966 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
1967 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE, in writecache_ctr()
1975 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct)); in writecache_ctr()
1988 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev); in writecache_ctr()
2001 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev); in writecache_ctr()
2006 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); in writecache_ctr()
2014 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 || in writecache_ctr()
2015 wc->block_size < 512 || wc->block_size > PAGE_SIZE || in writecache_ctr()
2016 (wc->block_size & (wc->block_size - 1))) { in writecache_ctr()
2021 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || in writecache_ctr()
2022 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { in writecache_ctr()
2027 wc->block_size_bits = __ffs(wc->block_size); in writecache_ctr()
2029 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; in writecache_ctr()
2030 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM; in writecache_ctr()
2031 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC); in writecache_ctr()
2047 wc->start_sector = start_sector; in writecache_ctr()
2048 if (wc->start_sector != start_sector || in writecache_ctr()
2049 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) in writecache_ctr()
2057 wc->high_wm_percent_set = true; in writecache_ctr()
2064 wc->low_wm_percent_set = true; in writecache_ctr()
2067 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1) in writecache_ctr()
2069 wc->max_writeback_jobs_set = true; in writecache_ctr()
2072 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1) in writecache_ctr()
2074 wc->autocommit_blocks_set = true; in writecache_ctr()
2082 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs); in writecache_ctr()
2083 wc->autocommit_time_set = true; in writecache_ctr()
2085 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
2086 wc->writeback_fua = true; in writecache_ctr()
2087 wc->writeback_fua_set = true; in writecache_ctr()
2090 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
2091 wc->writeback_fua = false; in writecache_ctr()
2092 wc->writeback_fua_set = true; in writecache_ctr()
2108 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
2109 r = persistent_memory_claim(wc); in writecache_ctr()
2118 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; in writecache_ctr()
2120 bio_list_init(&wc->flush_list); in writecache_ctr()
2121 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); in writecache_ctr()
2122 if (IS_ERR(wc->flush_thread)) { in writecache_ctr()
2123 r = PTR_ERR(wc->flush_thread); in writecache_ctr()
2124 wc->flush_thread = NULL; in writecache_ctr()
2128 wake_up_process(wc->flush_thread); in writecache_ctr()
2130 r = calculate_memory_size(wc->memory_map_size, wc->block_size, in writecache_ctr()
2137 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) + in writecache_ctr()
2146 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits); in writecache_ctr()
2147 if (!wc->memory_map) { in writecache_ctr()
2153 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle); in writecache_ctr()
2154 if (IS_ERR(wc->dm_kcopyd)) { in writecache_ctr()
2155 r = PTR_ERR(wc->dm_kcopyd); in writecache_ctr()
2157 wc->dm_kcopyd = NULL; in writecache_ctr()
2161 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT); in writecache_ctr()
2162 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) / in writecache_ctr()
2164 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size); in writecache_ctr()
2165 if (!wc->dirty_bitmap) { in writecache_ctr()
2171 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); in writecache_ctr()
2178 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock)); in writecache_ctr()
2184 r = init_memory(wc); in writecache_ctr()
2189 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock)); in writecache_ctr()
2208 if (le32_to_cpu(s.block_size) != wc->block_size) { in writecache_ctr()
2214 wc->n_blocks = le64_to_cpu(s.n_blocks); in writecache_ctr()
2216 offset = wc->n_blocks * sizeof(struct wc_memory_entry); in writecache_ctr()
2217 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) { in writecache_ctr()
2226 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1); in writecache_ctr()
2227 data_size = wc->n_blocks * (size_t)wc->block_size; in writecache_ctr()
2228 if (!offset || (data_size / wc->block_size != wc->n_blocks) || in writecache_ctr()
2231 if (offset + data_size > wc->memory_map_size) { in writecache_ctr()
2237 wc->metadata_sectors = offset >> SECTOR_SHIFT; in writecache_ctr()
2238 wc->block_start = (char *)sb(wc) + offset; in writecache_ctr()
2240 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent); in writecache_ctr()
2243 wc->freelist_high_watermark = x; in writecache_ctr()
2244 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent); in writecache_ctr()
2247 wc->freelist_low_watermark = x; in writecache_ctr()
2249 r = writecache_alloc_entries(wc); in writecache_ctr()
2259 if (WC_MODE_PMEM(wc)) in writecache_ctr()
2260 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); in writecache_ctr()
2275 struct dm_writecache *wc = ti->private; in writecache_status() local
2282 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc), in writecache_status()
2283 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size, in writecache_status()
2284 (unsigned long long)wc->writeback_size); in writecache_status()
2287 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's', in writecache_status()
2288 wc->dev->name, wc->ssd_dev->name, wc->block_size); in writecache_status()
2290 if (wc->start_sector) in writecache_status()
2292 if (wc->high_wm_percent_set) in writecache_status()
2294 if (wc->low_wm_percent_set) in writecache_status()
2296 if (wc->max_writeback_jobs_set) in writecache_status()
2298 if (wc->autocommit_blocks_set) in writecache_status()
2300 if (wc->autocommit_time_set) in writecache_status()
2302 if (wc->writeback_fua_set) in writecache_status()
2306 if (wc->start_sector) in writecache_status()
2307 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector); in writecache_status()
2308 if (wc->high_wm_percent_set) { in writecache_status()
2309 x = (uint64_t)wc->freelist_high_watermark * 100; in writecache_status()
2310 x += wc->n_blocks / 2; in writecache_status()
2311 do_div(x, (size_t)wc->n_blocks); in writecache_status()
2314 if (wc->low_wm_percent_set) { in writecache_status()
2315 x = (uint64_t)wc->freelist_low_watermark * 100; in writecache_status()
2316 x += wc->n_blocks / 2; in writecache_status()
2317 do_div(x, (size_t)wc->n_blocks); in writecache_status()
2320 if (wc->max_writeback_jobs_set) in writecache_status()
2321 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs); in writecache_status()
2322 if (wc->autocommit_blocks_set) in writecache_status()
2323 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks); in writecache_status()
2324 if (wc->autocommit_time_set) in writecache_status()
2325 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies)); in writecache_status()
2326 if (wc->writeback_fua_set) in writecache_status()
2327 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no"); in writecache_status()