• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Red Hat. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
18 #include <linux/delay.h>
19 #include "dm-io-tracker.h"
20 
21 #define DM_MSG_PREFIX "writecache"
22 
23 #define HIGH_WATERMARK			50
24 #define LOW_WATERMARK			45
25 #define MAX_WRITEBACK_JOBS		min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
26 #define ENDIO_LATENCY			16
27 #define WRITEBACK_LATENCY		64
28 #define AUTOCOMMIT_BLOCKS_SSD		65536
29 #define AUTOCOMMIT_BLOCKS_PMEM		64
30 #define AUTOCOMMIT_MSEC			1000
31 #define MAX_AGE_DIV			16
32 #define MAX_AGE_UNSPECIFIED		-1UL
33 #define PAUSE_WRITEBACK			(HZ * 3)
34 
35 #define BITMAP_GRANULARITY	65536
36 #if BITMAP_GRANULARITY < PAGE_SIZE
37 #undef BITMAP_GRANULARITY
38 #define BITMAP_GRANULARITY	PAGE_SIZE
39 #endif
40 
41 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
42 #define DM_WRITECACHE_HAS_PMEM
43 #endif
44 
45 #ifdef DM_WRITECACHE_HAS_PMEM
46 #define pmem_assign(dest, src)					\
47 do {								\
48 	typeof(dest) uniq = (src);				\
49 	memcpy_flushcache(&(dest), &uniq, sizeof(dest));	\
50 } while (0)
51 #else
52 #define pmem_assign(dest, src)	((dest) = (src))
53 #endif
54 
55 #if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM)
56 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
57 #endif
58 
59 #define MEMORY_SUPERBLOCK_MAGIC		0x23489321
60 #define MEMORY_SUPERBLOCK_VERSION	1
61 
62 struct wc_memory_entry {
63 	__le64 original_sector;
64 	__le64 seq_count;
65 };
66 
67 struct wc_memory_superblock {
68 	union {
69 		struct {
70 			__le32 magic;
71 			__le32 version;
72 			__le32 block_size;
73 			__le32 pad;
74 			__le64 n_blocks;
75 			__le64 seq_count;
76 		};
77 		__le64 padding[8];
78 	};
79 	struct wc_memory_entry entries[];
80 };
81 
82 struct wc_entry {
83 	struct rb_node rb_node;
84 	struct list_head lru;
85 	unsigned short wc_list_contiguous;
86 	bool write_in_progress
87 #if BITS_PER_LONG == 64
88 		:1
89 #endif
90 	;
91 	unsigned long index
92 #if BITS_PER_LONG == 64
93 		:47
94 #endif
95 	;
96 	unsigned long age;
97 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
98 	uint64_t original_sector;
99 	uint64_t seq_count;
100 #endif
101 };
102 
103 #ifdef DM_WRITECACHE_HAS_PMEM
104 #define WC_MODE_PMEM(wc)			((wc)->pmem_mode)
105 #define WC_MODE_FUA(wc)				((wc)->writeback_fua)
106 #else
107 #define WC_MODE_PMEM(wc)			false
108 #define WC_MODE_FUA(wc)				false
109 #endif
110 #define WC_MODE_SORT_FREELIST(wc)		(!WC_MODE_PMEM(wc))
111 
112 struct dm_writecache {
113 	struct mutex lock;
114 	struct list_head lru;
115 	union {
116 		struct list_head freelist;
117 		struct {
118 			struct rb_root freetree;
119 			struct wc_entry *current_free;
120 		};
121 	};
122 	struct rb_root tree;
123 
124 	size_t freelist_size;
125 	size_t writeback_size;
126 	size_t freelist_high_watermark;
127 	size_t freelist_low_watermark;
128 	unsigned long max_age;
129 	unsigned long pause;
130 
131 	unsigned uncommitted_blocks;
132 	unsigned autocommit_blocks;
133 	unsigned max_writeback_jobs;
134 
135 	int error;
136 
137 	unsigned long autocommit_jiffies;
138 	struct timer_list autocommit_timer;
139 	struct wait_queue_head freelist_wait;
140 
141 	struct timer_list max_age_timer;
142 
143 	atomic_t bio_in_progress[2];
144 	struct wait_queue_head bio_in_progress_wait[2];
145 
146 	struct dm_target *ti;
147 	struct dm_dev *dev;
148 	struct dm_dev *ssd_dev;
149 	sector_t start_sector;
150 	void *memory_map;
151 	uint64_t memory_map_size;
152 	size_t metadata_sectors;
153 	size_t n_blocks;
154 	uint64_t seq_count;
155 	sector_t data_device_sectors;
156 	void *block_start;
157 	struct wc_entry *entries;
158 	unsigned block_size;
159 	unsigned char block_size_bits;
160 
161 	bool pmem_mode:1;
162 	bool writeback_fua:1;
163 
164 	bool overwrote_committed:1;
165 	bool memory_vmapped:1;
166 
167 	bool start_sector_set:1;
168 	bool high_wm_percent_set:1;
169 	bool low_wm_percent_set:1;
170 	bool max_writeback_jobs_set:1;
171 	bool autocommit_blocks_set:1;
172 	bool autocommit_time_set:1;
173 	bool max_age_set:1;
174 	bool writeback_fua_set:1;
175 	bool flush_on_suspend:1;
176 	bool cleaner:1;
177 	bool cleaner_set:1;
178 	bool metadata_only:1;
179 	bool pause_set:1;
180 
181 	unsigned high_wm_percent_value;
182 	unsigned low_wm_percent_value;
183 	unsigned autocommit_time_value;
184 	unsigned max_age_value;
185 	unsigned pause_value;
186 
187 	unsigned writeback_all;
188 	struct workqueue_struct *writeback_wq;
189 	struct work_struct writeback_work;
190 	struct work_struct flush_work;
191 
192 	struct dm_io_tracker iot;
193 
194 	struct dm_io_client *dm_io;
195 
196 	raw_spinlock_t endio_list_lock;
197 	struct list_head endio_list;
198 	struct task_struct *endio_thread;
199 
200 	struct task_struct *flush_thread;
201 	struct bio_list flush_list;
202 
203 	struct dm_kcopyd_client *dm_kcopyd;
204 	unsigned long *dirty_bitmap;
205 	unsigned dirty_bitmap_size;
206 
207 	struct bio_set bio_set;
208 	mempool_t copy_pool;
209 
210 	struct {
211 		unsigned long long reads;
212 		unsigned long long read_hits;
213 		unsigned long long writes;
214 		unsigned long long write_hits_uncommitted;
215 		unsigned long long write_hits_committed;
216 		unsigned long long writes_around;
217 		unsigned long long writes_allocate;
218 		unsigned long long writes_blocked_on_freelist;
219 		unsigned long long flushes;
220 		unsigned long long discards;
221 	} stats;
222 };
223 
224 #define WB_LIST_INLINE		16
225 
226 struct writeback_struct {
227 	struct list_head endio_entry;
228 	struct dm_writecache *wc;
229 	struct wc_entry **wc_list;
230 	unsigned wc_list_n;
231 	struct wc_entry *wc_list_inline[WB_LIST_INLINE];
232 	struct bio bio;
233 };
234 
235 struct copy_struct {
236 	struct list_head endio_entry;
237 	struct dm_writecache *wc;
238 	struct wc_entry *e;
239 	unsigned n_entries;
240 	int error;
241 };
242 
243 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
244 					    "A percentage of time allocated for data copying");
245 
wc_lock(struct dm_writecache * wc)246 static void wc_lock(struct dm_writecache *wc)
247 {
248 	mutex_lock(&wc->lock);
249 }
250 
wc_unlock(struct dm_writecache * wc)251 static void wc_unlock(struct dm_writecache *wc)
252 {
253 	mutex_unlock(&wc->lock);
254 }
255 
256 #ifdef DM_WRITECACHE_HAS_PMEM
persistent_memory_claim(struct dm_writecache * wc)257 static int persistent_memory_claim(struct dm_writecache *wc)
258 {
259 	int r;
260 	loff_t s;
261 	long p, da;
262 	pfn_t pfn;
263 	int id;
264 	struct page **pages;
265 	sector_t offset;
266 
267 	wc->memory_vmapped = false;
268 
269 	s = wc->memory_map_size;
270 	p = s >> PAGE_SHIFT;
271 	if (!p) {
272 		r = -EINVAL;
273 		goto err1;
274 	}
275 	if (p != s >> PAGE_SHIFT) {
276 		r = -EOVERFLOW;
277 		goto err1;
278 	}
279 
280 	offset = get_start_sect(wc->ssd_dev->bdev);
281 	if (offset & (PAGE_SIZE / 512 - 1)) {
282 		r = -EINVAL;
283 		goto err1;
284 	}
285 	offset >>= PAGE_SHIFT - 9;
286 
287 	id = dax_read_lock();
288 
289 	da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
290 	if (da < 0) {
291 		wc->memory_map = NULL;
292 		r = da;
293 		goto err2;
294 	}
295 	if (!pfn_t_has_page(pfn)) {
296 		wc->memory_map = NULL;
297 		r = -EOPNOTSUPP;
298 		goto err2;
299 	}
300 	if (da != p) {
301 		long i;
302 		wc->memory_map = NULL;
303 		pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
304 		if (!pages) {
305 			r = -ENOMEM;
306 			goto err2;
307 		}
308 		i = 0;
309 		do {
310 			long daa;
311 			daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
312 						NULL, &pfn);
313 			if (daa <= 0) {
314 				r = daa ? daa : -EINVAL;
315 				goto err3;
316 			}
317 			if (!pfn_t_has_page(pfn)) {
318 				r = -EOPNOTSUPP;
319 				goto err3;
320 			}
321 			while (daa-- && i < p) {
322 				pages[i++] = pfn_t_to_page(pfn);
323 				pfn.val++;
324 				if (!(i & 15))
325 					cond_resched();
326 			}
327 		} while (i < p);
328 		wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
329 		if (!wc->memory_map) {
330 			r = -ENOMEM;
331 			goto err3;
332 		}
333 		kvfree(pages);
334 		wc->memory_vmapped = true;
335 	}
336 
337 	dax_read_unlock(id);
338 
339 	wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
340 	wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
341 
342 	return 0;
343 err3:
344 	kvfree(pages);
345 err2:
346 	dax_read_unlock(id);
347 err1:
348 	return r;
349 }
350 #else
persistent_memory_claim(struct dm_writecache * wc)351 static int persistent_memory_claim(struct dm_writecache *wc)
352 {
353 	return -EOPNOTSUPP;
354 }
355 #endif
356 
persistent_memory_release(struct dm_writecache * wc)357 static void persistent_memory_release(struct dm_writecache *wc)
358 {
359 	if (wc->memory_vmapped)
360 		vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
361 }
362 
persistent_memory_page(void * addr)363 static struct page *persistent_memory_page(void *addr)
364 {
365 	if (is_vmalloc_addr(addr))
366 		return vmalloc_to_page(addr);
367 	else
368 		return virt_to_page(addr);
369 }
370 
persistent_memory_page_offset(void * addr)371 static unsigned persistent_memory_page_offset(void *addr)
372 {
373 	return (unsigned long)addr & (PAGE_SIZE - 1);
374 }
375 
persistent_memory_flush_cache(void * ptr,size_t size)376 static void persistent_memory_flush_cache(void *ptr, size_t size)
377 {
378 	if (is_vmalloc_addr(ptr))
379 		flush_kernel_vmap_range(ptr, size);
380 }
381 
persistent_memory_invalidate_cache(void * ptr,size_t size)382 static void persistent_memory_invalidate_cache(void *ptr, size_t size)
383 {
384 	if (is_vmalloc_addr(ptr))
385 		invalidate_kernel_vmap_range(ptr, size);
386 }
387 
sb(struct dm_writecache * wc)388 static struct wc_memory_superblock *sb(struct dm_writecache *wc)
389 {
390 	return wc->memory_map;
391 }
392 
memory_entry(struct dm_writecache * wc,struct wc_entry * e)393 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
394 {
395 	return &sb(wc)->entries[e->index];
396 }
397 
memory_data(struct dm_writecache * wc,struct wc_entry * e)398 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
399 {
400 	return (char *)wc->block_start + (e->index << wc->block_size_bits);
401 }
402 
cache_sector(struct dm_writecache * wc,struct wc_entry * e)403 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
404 {
405 	return wc->start_sector + wc->metadata_sectors +
406 		((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
407 }
408 
read_original_sector(struct dm_writecache * wc,struct wc_entry * e)409 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
410 {
411 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
412 	return e->original_sector;
413 #else
414 	return le64_to_cpu(memory_entry(wc, e)->original_sector);
415 #endif
416 }
417 
read_seq_count(struct dm_writecache * wc,struct wc_entry * e)418 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
419 {
420 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
421 	return e->seq_count;
422 #else
423 	return le64_to_cpu(memory_entry(wc, e)->seq_count);
424 #endif
425 }
426 
clear_seq_count(struct dm_writecache * wc,struct wc_entry * e)427 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
428 {
429 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
430 	e->seq_count = -1;
431 #endif
432 	pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
433 }
434 
write_original_sector_seq_count(struct dm_writecache * wc,struct wc_entry * e,uint64_t original_sector,uint64_t seq_count)435 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
436 					    uint64_t original_sector, uint64_t seq_count)
437 {
438 	struct wc_memory_entry me;
439 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
440 	e->original_sector = original_sector;
441 	e->seq_count = seq_count;
442 #endif
443 	me.original_sector = cpu_to_le64(original_sector);
444 	me.seq_count = cpu_to_le64(seq_count);
445 	pmem_assign(*memory_entry(wc, e), me);
446 }
447 
448 #define writecache_error(wc, err, msg, arg...)				\
449 do {									\
450 	if (!cmpxchg(&(wc)->error, 0, err))				\
451 		DMERR(msg, ##arg);					\
452 	wake_up(&(wc)->freelist_wait);					\
453 } while (0)
454 
455 #define writecache_has_error(wc)	(unlikely(READ_ONCE((wc)->error)))
456 
writecache_flush_all_metadata(struct dm_writecache * wc)457 static void writecache_flush_all_metadata(struct dm_writecache *wc)
458 {
459 	if (!WC_MODE_PMEM(wc))
460 		memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
461 }
462 
writecache_flush_region(struct dm_writecache * wc,void * ptr,size_t size)463 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
464 {
465 	if (!WC_MODE_PMEM(wc))
466 		__set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
467 			  wc->dirty_bitmap);
468 }
469 
470 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
471 
472 struct io_notify {
473 	struct dm_writecache *wc;
474 	struct completion c;
475 	atomic_t count;
476 };
477 
writecache_notify_io(unsigned long error,void * context)478 static void writecache_notify_io(unsigned long error, void *context)
479 {
480 	struct io_notify *endio = context;
481 
482 	if (unlikely(error != 0))
483 		writecache_error(endio->wc, -EIO, "error writing metadata");
484 	BUG_ON(atomic_read(&endio->count) <= 0);
485 	if (atomic_dec_and_test(&endio->count))
486 		complete(&endio->c);
487 }
488 
writecache_wait_for_ios(struct dm_writecache * wc,int direction)489 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
490 {
491 	wait_event(wc->bio_in_progress_wait[direction],
492 		   !atomic_read(&wc->bio_in_progress[direction]));
493 }
494 
ssd_commit_flushed(struct dm_writecache * wc,bool wait_for_ios)495 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
496 {
497 	struct dm_io_region region;
498 	struct dm_io_request req;
499 	struct io_notify endio = {
500 		wc,
501 		COMPLETION_INITIALIZER_ONSTACK(endio.c),
502 		ATOMIC_INIT(1),
503 	};
504 	unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
505 	unsigned i = 0;
506 
507 	while (1) {
508 		unsigned j;
509 		i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
510 		if (unlikely(i == bitmap_bits))
511 			break;
512 		j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
513 
514 		region.bdev = wc->ssd_dev->bdev;
515 		region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
516 		region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
517 
518 		if (unlikely(region.sector >= wc->metadata_sectors))
519 			break;
520 		if (unlikely(region.sector + region.count > wc->metadata_sectors))
521 			region.count = wc->metadata_sectors - region.sector;
522 
523 		region.sector += wc->start_sector;
524 		atomic_inc(&endio.count);
525 		req.bi_op = REQ_OP_WRITE;
526 		req.bi_op_flags = REQ_SYNC;
527 		req.mem.type = DM_IO_VMA;
528 		req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
529 		req.client = wc->dm_io;
530 		req.notify.fn = writecache_notify_io;
531 		req.notify.context = &endio;
532 
533 		/* writing via async dm-io (implied by notify.fn above) won't return an error */
534 	        (void) dm_io(&req, 1, &region, NULL);
535 		i = j;
536 	}
537 
538 	writecache_notify_io(0, &endio);
539 	wait_for_completion_io(&endio.c);
540 
541 	if (wait_for_ios)
542 		writecache_wait_for_ios(wc, WRITE);
543 
544 	writecache_disk_flush(wc, wc->ssd_dev);
545 
546 	memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
547 }
548 
ssd_commit_superblock(struct dm_writecache * wc)549 static void ssd_commit_superblock(struct dm_writecache *wc)
550 {
551 	int r;
552 	struct dm_io_region region;
553 	struct dm_io_request req;
554 
555 	region.bdev = wc->ssd_dev->bdev;
556 	region.sector = 0;
557 	region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT;
558 
559 	if (unlikely(region.sector + region.count > wc->metadata_sectors))
560 		region.count = wc->metadata_sectors - region.sector;
561 
562 	region.sector += wc->start_sector;
563 
564 	req.bi_op = REQ_OP_WRITE;
565 	req.bi_op_flags = REQ_SYNC | REQ_FUA;
566 	req.mem.type = DM_IO_VMA;
567 	req.mem.ptr.vma = (char *)wc->memory_map;
568 	req.client = wc->dm_io;
569 	req.notify.fn = NULL;
570 	req.notify.context = NULL;
571 
572 	r = dm_io(&req, 1, &region, NULL);
573 	if (unlikely(r))
574 		writecache_error(wc, r, "error writing superblock");
575 }
576 
writecache_commit_flushed(struct dm_writecache * wc,bool wait_for_ios)577 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
578 {
579 	if (WC_MODE_PMEM(wc))
580 		pmem_wmb();
581 	else
582 		ssd_commit_flushed(wc, wait_for_ios);
583 }
584 
writecache_disk_flush(struct dm_writecache * wc,struct dm_dev * dev)585 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
586 {
587 	int r;
588 	struct dm_io_region region;
589 	struct dm_io_request req;
590 
591 	region.bdev = dev->bdev;
592 	region.sector = 0;
593 	region.count = 0;
594 	req.bi_op = REQ_OP_WRITE;
595 	req.bi_op_flags = REQ_PREFLUSH;
596 	req.mem.type = DM_IO_KMEM;
597 	req.mem.ptr.addr = NULL;
598 	req.client = wc->dm_io;
599 	req.notify.fn = NULL;
600 
601 	r = dm_io(&req, 1, &region, NULL);
602 	if (unlikely(r))
603 		writecache_error(wc, r, "error flushing metadata: %d", r);
604 }
605 
606 #define WFE_RETURN_FOLLOWING	1
607 #define WFE_LOWEST_SEQ		2
608 
writecache_find_entry(struct dm_writecache * wc,uint64_t block,int flags)609 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
610 					      uint64_t block, int flags)
611 {
612 	struct wc_entry *e;
613 	struct rb_node *node = wc->tree.rb_node;
614 
615 	if (unlikely(!node))
616 		return NULL;
617 
618 	while (1) {
619 		e = container_of(node, struct wc_entry, rb_node);
620 		if (read_original_sector(wc, e) == block)
621 			break;
622 
623 		node = (read_original_sector(wc, e) >= block ?
624 			e->rb_node.rb_left : e->rb_node.rb_right);
625 		if (unlikely(!node)) {
626 			if (!(flags & WFE_RETURN_FOLLOWING))
627 				return NULL;
628 			if (read_original_sector(wc, e) >= block) {
629 				return e;
630 			} else {
631 				node = rb_next(&e->rb_node);
632 				if (unlikely(!node))
633 					return NULL;
634 				e = container_of(node, struct wc_entry, rb_node);
635 				return e;
636 			}
637 		}
638 	}
639 
640 	while (1) {
641 		struct wc_entry *e2;
642 		if (flags & WFE_LOWEST_SEQ)
643 			node = rb_prev(&e->rb_node);
644 		else
645 			node = rb_next(&e->rb_node);
646 		if (unlikely(!node))
647 			return e;
648 		e2 = container_of(node, struct wc_entry, rb_node);
649 		if (read_original_sector(wc, e2) != block)
650 			return e;
651 		e = e2;
652 	}
653 }
654 
writecache_insert_entry(struct dm_writecache * wc,struct wc_entry * ins)655 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
656 {
657 	struct wc_entry *e;
658 	struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
659 
660 	while (*node) {
661 		e = container_of(*node, struct wc_entry, rb_node);
662 		parent = &e->rb_node;
663 		if (read_original_sector(wc, e) > read_original_sector(wc, ins))
664 			node = &parent->rb_left;
665 		else
666 			node = &parent->rb_right;
667 	}
668 	rb_link_node(&ins->rb_node, parent, node);
669 	rb_insert_color(&ins->rb_node, &wc->tree);
670 	list_add(&ins->lru, &wc->lru);
671 	ins->age = jiffies;
672 }
673 
writecache_unlink(struct dm_writecache * wc,struct wc_entry * e)674 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
675 {
676 	list_del(&e->lru);
677 	rb_erase(&e->rb_node, &wc->tree);
678 }
679 
writecache_add_to_freelist(struct dm_writecache * wc,struct wc_entry * e)680 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
681 {
682 	if (WC_MODE_SORT_FREELIST(wc)) {
683 		struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
684 		if (unlikely(!*node))
685 			wc->current_free = e;
686 		while (*node) {
687 			parent = *node;
688 			if (&e->rb_node < *node)
689 				node = &parent->rb_left;
690 			else
691 				node = &parent->rb_right;
692 		}
693 		rb_link_node(&e->rb_node, parent, node);
694 		rb_insert_color(&e->rb_node, &wc->freetree);
695 	} else {
696 		list_add_tail(&e->lru, &wc->freelist);
697 	}
698 	wc->freelist_size++;
699 }
700 
writecache_verify_watermark(struct dm_writecache * wc)701 static inline void writecache_verify_watermark(struct dm_writecache *wc)
702 {
703 	if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
704 		queue_work(wc->writeback_wq, &wc->writeback_work);
705 }
706 
writecache_max_age_timer(struct timer_list * t)707 static void writecache_max_age_timer(struct timer_list *t)
708 {
709 	struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
710 
711 	if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
712 		queue_work(wc->writeback_wq, &wc->writeback_work);
713 		mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
714 	}
715 }
716 
writecache_pop_from_freelist(struct dm_writecache * wc,sector_t expected_sector)717 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
718 {
719 	struct wc_entry *e;
720 
721 	if (WC_MODE_SORT_FREELIST(wc)) {
722 		struct rb_node *next;
723 		if (unlikely(!wc->current_free))
724 			return NULL;
725 		e = wc->current_free;
726 		if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
727 			return NULL;
728 		next = rb_next(&e->rb_node);
729 		rb_erase(&e->rb_node, &wc->freetree);
730 		if (unlikely(!next))
731 			next = rb_first(&wc->freetree);
732 		wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
733 	} else {
734 		if (unlikely(list_empty(&wc->freelist)))
735 			return NULL;
736 		e = container_of(wc->freelist.next, struct wc_entry, lru);
737 		if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
738 			return NULL;
739 		list_del(&e->lru);
740 	}
741 	wc->freelist_size--;
742 
743 	writecache_verify_watermark(wc);
744 
745 	return e;
746 }
747 
writecache_free_entry(struct dm_writecache * wc,struct wc_entry * e)748 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
749 {
750 	writecache_unlink(wc, e);
751 	writecache_add_to_freelist(wc, e);
752 	clear_seq_count(wc, e);
753 	writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
754 	if (unlikely(waitqueue_active(&wc->freelist_wait)))
755 		wake_up(&wc->freelist_wait);
756 }
757 
writecache_wait_on_freelist(struct dm_writecache * wc)758 static void writecache_wait_on_freelist(struct dm_writecache *wc)
759 {
760 	DEFINE_WAIT(wait);
761 
762 	prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
763 	wc_unlock(wc);
764 	io_schedule();
765 	finish_wait(&wc->freelist_wait, &wait);
766 	wc_lock(wc);
767 }
768 
writecache_poison_lists(struct dm_writecache * wc)769 static void writecache_poison_lists(struct dm_writecache *wc)
770 {
771 	/*
772 	 * Catch incorrect access to these values while the device is suspended.
773 	 */
774 	memset(&wc->tree, -1, sizeof wc->tree);
775 	wc->lru.next = LIST_POISON1;
776 	wc->lru.prev = LIST_POISON2;
777 	wc->freelist.next = LIST_POISON1;
778 	wc->freelist.prev = LIST_POISON2;
779 }
780 
writecache_flush_entry(struct dm_writecache * wc,struct wc_entry * e)781 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
782 {
783 	writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
784 	if (WC_MODE_PMEM(wc))
785 		writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
786 }
787 
writecache_entry_is_committed(struct dm_writecache * wc,struct wc_entry * e)788 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
789 {
790 	return read_seq_count(wc, e) < wc->seq_count;
791 }
792 
writecache_flush(struct dm_writecache * wc)793 static void writecache_flush(struct dm_writecache *wc)
794 {
795 	struct wc_entry *e, *e2;
796 	bool need_flush_after_free;
797 
798 	wc->uncommitted_blocks = 0;
799 	del_timer(&wc->autocommit_timer);
800 
801 	if (list_empty(&wc->lru))
802 		return;
803 
804 	e = container_of(wc->lru.next, struct wc_entry, lru);
805 	if (writecache_entry_is_committed(wc, e)) {
806 		if (wc->overwrote_committed) {
807 			writecache_wait_for_ios(wc, WRITE);
808 			writecache_disk_flush(wc, wc->ssd_dev);
809 			wc->overwrote_committed = false;
810 		}
811 		return;
812 	}
813 	while (1) {
814 		writecache_flush_entry(wc, e);
815 		if (unlikely(e->lru.next == &wc->lru))
816 			break;
817 		e2 = container_of(e->lru.next, struct wc_entry, lru);
818 		if (writecache_entry_is_committed(wc, e2))
819 			break;
820 		e = e2;
821 		cond_resched();
822 	}
823 	writecache_commit_flushed(wc, true);
824 
825 	wc->seq_count++;
826 	pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
827 	if (WC_MODE_PMEM(wc))
828 		writecache_commit_flushed(wc, false);
829 	else
830 		ssd_commit_superblock(wc);
831 
832 	wc->overwrote_committed = false;
833 
834 	need_flush_after_free = false;
835 	while (1) {
836 		/* Free another committed entry with lower seq-count */
837 		struct rb_node *rb_node = rb_prev(&e->rb_node);
838 
839 		if (rb_node) {
840 			e2 = container_of(rb_node, struct wc_entry, rb_node);
841 			if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
842 			    likely(!e2->write_in_progress)) {
843 				writecache_free_entry(wc, e2);
844 				need_flush_after_free = true;
845 			}
846 		}
847 		if (unlikely(e->lru.prev == &wc->lru))
848 			break;
849 		e = container_of(e->lru.prev, struct wc_entry, lru);
850 		cond_resched();
851 	}
852 
853 	if (need_flush_after_free)
854 		writecache_commit_flushed(wc, false);
855 }
856 
writecache_flush_work(struct work_struct * work)857 static void writecache_flush_work(struct work_struct *work)
858 {
859 	struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
860 
861 	wc_lock(wc);
862 	writecache_flush(wc);
863 	wc_unlock(wc);
864 }
865 
writecache_autocommit_timer(struct timer_list * t)866 static void writecache_autocommit_timer(struct timer_list *t)
867 {
868 	struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
869 	if (!writecache_has_error(wc))
870 		queue_work(wc->writeback_wq, &wc->flush_work);
871 }
872 
writecache_schedule_autocommit(struct dm_writecache * wc)873 static void writecache_schedule_autocommit(struct dm_writecache *wc)
874 {
875 	if (!timer_pending(&wc->autocommit_timer))
876 		mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
877 }
878 
writecache_discard(struct dm_writecache * wc,sector_t start,sector_t end)879 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
880 {
881 	struct wc_entry *e;
882 	bool discarded_something = false;
883 
884 	e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
885 	if (unlikely(!e))
886 		return;
887 
888 	while (read_original_sector(wc, e) < end) {
889 		struct rb_node *node = rb_next(&e->rb_node);
890 
891 		if (likely(!e->write_in_progress)) {
892 			if (!discarded_something) {
893 				if (!WC_MODE_PMEM(wc)) {
894 					writecache_wait_for_ios(wc, READ);
895 					writecache_wait_for_ios(wc, WRITE);
896 				}
897 				discarded_something = true;
898 			}
899 			if (!writecache_entry_is_committed(wc, e))
900 				wc->uncommitted_blocks--;
901 			writecache_free_entry(wc, e);
902 		}
903 
904 		if (unlikely(!node))
905 			break;
906 
907 		e = container_of(node, struct wc_entry, rb_node);
908 	}
909 
910 	if (discarded_something)
911 		writecache_commit_flushed(wc, false);
912 }
913 
writecache_wait_for_writeback(struct dm_writecache * wc)914 static bool writecache_wait_for_writeback(struct dm_writecache *wc)
915 {
916 	if (wc->writeback_size) {
917 		writecache_wait_on_freelist(wc);
918 		return true;
919 	}
920 	return false;
921 }
922 
writecache_suspend(struct dm_target * ti)923 static void writecache_suspend(struct dm_target *ti)
924 {
925 	struct dm_writecache *wc = ti->private;
926 	bool flush_on_suspend;
927 
928 	del_timer_sync(&wc->autocommit_timer);
929 	del_timer_sync(&wc->max_age_timer);
930 
931 	wc_lock(wc);
932 	writecache_flush(wc);
933 	flush_on_suspend = wc->flush_on_suspend;
934 	if (flush_on_suspend) {
935 		wc->flush_on_suspend = false;
936 		wc->writeback_all++;
937 		queue_work(wc->writeback_wq, &wc->writeback_work);
938 	}
939 	wc_unlock(wc);
940 
941 	drain_workqueue(wc->writeback_wq);
942 
943 	wc_lock(wc);
944 	if (flush_on_suspend)
945 		wc->writeback_all--;
946 	while (writecache_wait_for_writeback(wc));
947 
948 	if (WC_MODE_PMEM(wc))
949 		persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
950 
951 	writecache_poison_lists(wc);
952 
953 	wc_unlock(wc);
954 }
955 
writecache_alloc_entries(struct dm_writecache * wc)956 static int writecache_alloc_entries(struct dm_writecache *wc)
957 {
958 	size_t b;
959 
960 	if (wc->entries)
961 		return 0;
962 	wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
963 	if (!wc->entries)
964 		return -ENOMEM;
965 	for (b = 0; b < wc->n_blocks; b++) {
966 		struct wc_entry *e = &wc->entries[b];
967 		e->index = b;
968 		e->write_in_progress = false;
969 		cond_resched();
970 	}
971 
972 	return 0;
973 }
974 
writecache_read_metadata(struct dm_writecache * wc,sector_t n_sectors)975 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
976 {
977 	struct dm_io_region region;
978 	struct dm_io_request req;
979 
980 	region.bdev = wc->ssd_dev->bdev;
981 	region.sector = wc->start_sector;
982 	region.count = n_sectors;
983 	req.bi_op = REQ_OP_READ;
984 	req.bi_op_flags = REQ_SYNC;
985 	req.mem.type = DM_IO_VMA;
986 	req.mem.ptr.vma = (char *)wc->memory_map;
987 	req.client = wc->dm_io;
988 	req.notify.fn = NULL;
989 
990 	return dm_io(&req, 1, &region, NULL);
991 }
992 
writecache_resume(struct dm_target * ti)993 static void writecache_resume(struct dm_target *ti)
994 {
995 	struct dm_writecache *wc = ti->private;
996 	size_t b;
997 	bool need_flush = false;
998 	__le64 sb_seq_count;
999 	int r;
1000 
1001 	wc_lock(wc);
1002 
1003 	wc->data_device_sectors = bdev_nr_sectors(wc->dev->bdev);
1004 
1005 	if (WC_MODE_PMEM(wc)) {
1006 		persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
1007 	} else {
1008 		r = writecache_read_metadata(wc, wc->metadata_sectors);
1009 		if (r) {
1010 			size_t sb_entries_offset;
1011 			writecache_error(wc, r, "unable to read metadata: %d", r);
1012 			sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
1013 			memset((char *)wc->memory_map + sb_entries_offset, -1,
1014 			       (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
1015 		}
1016 	}
1017 
1018 	wc->tree = RB_ROOT;
1019 	INIT_LIST_HEAD(&wc->lru);
1020 	if (WC_MODE_SORT_FREELIST(wc)) {
1021 		wc->freetree = RB_ROOT;
1022 		wc->current_free = NULL;
1023 	} else {
1024 		INIT_LIST_HEAD(&wc->freelist);
1025 	}
1026 	wc->freelist_size = 0;
1027 
1028 	r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count,
1029 			      sizeof(uint64_t));
1030 	if (r) {
1031 		writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
1032 		sb_seq_count = cpu_to_le64(0);
1033 	}
1034 	wc->seq_count = le64_to_cpu(sb_seq_count);
1035 
1036 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
1037 	for (b = 0; b < wc->n_blocks; b++) {
1038 		struct wc_entry *e = &wc->entries[b];
1039 		struct wc_memory_entry wme;
1040 		if (writecache_has_error(wc)) {
1041 			e->original_sector = -1;
1042 			e->seq_count = -1;
1043 			continue;
1044 		}
1045 		r = copy_mc_to_kernel(&wme, memory_entry(wc, e),
1046 				      sizeof(struct wc_memory_entry));
1047 		if (r) {
1048 			writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
1049 					 (unsigned long)b, r);
1050 			e->original_sector = -1;
1051 			e->seq_count = -1;
1052 		} else {
1053 			e->original_sector = le64_to_cpu(wme.original_sector);
1054 			e->seq_count = le64_to_cpu(wme.seq_count);
1055 		}
1056 		cond_resched();
1057 	}
1058 #endif
1059 	for (b = 0; b < wc->n_blocks; b++) {
1060 		struct wc_entry *e = &wc->entries[b];
1061 		if (!writecache_entry_is_committed(wc, e)) {
1062 			if (read_seq_count(wc, e) != -1) {
1063 erase_this:
1064 				clear_seq_count(wc, e);
1065 				need_flush = true;
1066 			}
1067 			writecache_add_to_freelist(wc, e);
1068 		} else {
1069 			struct wc_entry *old;
1070 
1071 			old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
1072 			if (!old) {
1073 				writecache_insert_entry(wc, e);
1074 			} else {
1075 				if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
1076 					writecache_error(wc, -EINVAL,
1077 						 "two identical entries, position %llu, sector %llu, sequence %llu",
1078 						 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1079 						 (unsigned long long)read_seq_count(wc, e));
1080 				}
1081 				if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1082 					goto erase_this;
1083 				} else {
1084 					writecache_free_entry(wc, old);
1085 					writecache_insert_entry(wc, e);
1086 					need_flush = true;
1087 				}
1088 			}
1089 		}
1090 		cond_resched();
1091 	}
1092 
1093 	if (need_flush) {
1094 		writecache_flush_all_metadata(wc);
1095 		writecache_commit_flushed(wc, false);
1096 	}
1097 
1098 	writecache_verify_watermark(wc);
1099 
1100 	if (wc->max_age != MAX_AGE_UNSPECIFIED)
1101 		mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
1102 
1103 	wc_unlock(wc);
1104 }
1105 
process_flush_mesg(unsigned argc,char ** argv,struct dm_writecache * wc)1106 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1107 {
1108 	if (argc != 1)
1109 		return -EINVAL;
1110 
1111 	wc_lock(wc);
1112 	if (dm_suspended(wc->ti)) {
1113 		wc_unlock(wc);
1114 		return -EBUSY;
1115 	}
1116 	if (writecache_has_error(wc)) {
1117 		wc_unlock(wc);
1118 		return -EIO;
1119 	}
1120 
1121 	writecache_flush(wc);
1122 	wc->writeback_all++;
1123 	queue_work(wc->writeback_wq, &wc->writeback_work);
1124 	wc_unlock(wc);
1125 
1126 	flush_workqueue(wc->writeback_wq);
1127 
1128 	wc_lock(wc);
1129 	wc->writeback_all--;
1130 	if (writecache_has_error(wc)) {
1131 		wc_unlock(wc);
1132 		return -EIO;
1133 	}
1134 	wc_unlock(wc);
1135 
1136 	return 0;
1137 }
1138 
process_flush_on_suspend_mesg(unsigned argc,char ** argv,struct dm_writecache * wc)1139 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1140 {
1141 	if (argc != 1)
1142 		return -EINVAL;
1143 
1144 	wc_lock(wc);
1145 	wc->flush_on_suspend = true;
1146 	wc_unlock(wc);
1147 
1148 	return 0;
1149 }
1150 
activate_cleaner(struct dm_writecache * wc)1151 static void activate_cleaner(struct dm_writecache *wc)
1152 {
1153 	wc->flush_on_suspend = true;
1154 	wc->cleaner = true;
1155 	wc->freelist_high_watermark = wc->n_blocks;
1156 	wc->freelist_low_watermark = wc->n_blocks;
1157 }
1158 
process_cleaner_mesg(unsigned argc,char ** argv,struct dm_writecache * wc)1159 static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1160 {
1161 	if (argc != 1)
1162 		return -EINVAL;
1163 
1164 	wc_lock(wc);
1165 	activate_cleaner(wc);
1166 	if (!dm_suspended(wc->ti))
1167 		writecache_verify_watermark(wc);
1168 	wc_unlock(wc);
1169 
1170 	return 0;
1171 }
1172 
process_clear_stats_mesg(unsigned argc,char ** argv,struct dm_writecache * wc)1173 static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1174 {
1175 	if (argc != 1)
1176 		return -EINVAL;
1177 
1178 	wc_lock(wc);
1179 	memset(&wc->stats, 0, sizeof wc->stats);
1180 	wc_unlock(wc);
1181 
1182 	return 0;
1183 }
1184 
writecache_message(struct dm_target * ti,unsigned argc,char ** argv,char * result,unsigned maxlen)1185 static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1186 			      char *result, unsigned maxlen)
1187 {
1188 	int r = -EINVAL;
1189 	struct dm_writecache *wc = ti->private;
1190 
1191 	if (!strcasecmp(argv[0], "flush"))
1192 		r = process_flush_mesg(argc, argv, wc);
1193 	else if (!strcasecmp(argv[0], "flush_on_suspend"))
1194 		r = process_flush_on_suspend_mesg(argc, argv, wc);
1195 	else if (!strcasecmp(argv[0], "cleaner"))
1196 		r = process_cleaner_mesg(argc, argv, wc);
1197 	else if (!strcasecmp(argv[0], "clear_stats"))
1198 		r = process_clear_stats_mesg(argc, argv, wc);
1199 	else
1200 		DMERR("unrecognised message received: %s", argv[0]);
1201 
1202 	return r;
1203 }
1204 
memcpy_flushcache_optimized(void * dest,void * source,size_t size)1205 static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
1206 {
1207 	/*
1208 	 * clflushopt performs better with block size 1024, 2048, 4096
1209 	 * non-temporal stores perform better with block size 512
1210 	 *
1211 	 * block size   512             1024            2048            4096
1212 	 * movnti       496 MB/s        642 MB/s        725 MB/s        744 MB/s
1213 	 * clflushopt   373 MB/s        688 MB/s        1.1 GB/s        1.2 GB/s
1214 	 *
1215 	 * We see that movnti performs better for 512-byte blocks, and
1216 	 * clflushopt performs better for 1024-byte and larger blocks. So, we
1217 	 * prefer clflushopt for sizes >= 768.
1218 	 *
1219 	 * NOTE: this happens to be the case now (with dm-writecache's single
1220 	 * threaded model) but re-evaluate this once memcpy_flushcache() is
1221 	 * enabled to use movdir64b which might invalidate this performance
1222 	 * advantage seen with cache-allocating-writes plus flushing.
1223 	 */
1224 #ifdef CONFIG_X86
1225 	if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
1226 	    likely(boot_cpu_data.x86_clflush_size == 64) &&
1227 	    likely(size >= 768)) {
1228 		do {
1229 			memcpy((void *)dest, (void *)source, 64);
1230 			clflushopt((void *)dest);
1231 			dest += 64;
1232 			source += 64;
1233 			size -= 64;
1234 		} while (size >= 64);
1235 		return;
1236 	}
1237 #endif
1238 	memcpy_flushcache(dest, source, size);
1239 }
1240 
bio_copy_block(struct dm_writecache * wc,struct bio * bio,void * data)1241 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1242 {
1243 	void *buf;
1244 	unsigned size;
1245 	int rw = bio_data_dir(bio);
1246 	unsigned remaining_size = wc->block_size;
1247 
1248 	do {
1249 		struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1250 		buf = bvec_kmap_local(&bv);
1251 		size = bv.bv_len;
1252 		if (unlikely(size > remaining_size))
1253 			size = remaining_size;
1254 
1255 		if (rw == READ) {
1256 			int r;
1257 			r = copy_mc_to_kernel(buf, data, size);
1258 			flush_dcache_page(bio_page(bio));
1259 			if (unlikely(r)) {
1260 				writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1261 				bio->bi_status = BLK_STS_IOERR;
1262 			}
1263 		} else {
1264 			flush_dcache_page(bio_page(bio));
1265 			memcpy_flushcache_optimized(data, buf, size);
1266 		}
1267 
1268 		kunmap_local(buf);
1269 
1270 		data = (char *)data + size;
1271 		remaining_size -= size;
1272 		bio_advance(bio, size);
1273 	} while (unlikely(remaining_size));
1274 }
1275 
writecache_flush_thread(void * data)1276 static int writecache_flush_thread(void *data)
1277 {
1278 	struct dm_writecache *wc = data;
1279 
1280 	while (1) {
1281 		struct bio *bio;
1282 
1283 		wc_lock(wc);
1284 		bio = bio_list_pop(&wc->flush_list);
1285 		if (!bio) {
1286 			set_current_state(TASK_INTERRUPTIBLE);
1287 			wc_unlock(wc);
1288 
1289 			if (unlikely(kthread_should_stop())) {
1290 				set_current_state(TASK_RUNNING);
1291 				break;
1292 			}
1293 
1294 			schedule();
1295 			continue;
1296 		}
1297 
1298 		if (bio_op(bio) == REQ_OP_DISCARD) {
1299 			writecache_discard(wc, bio->bi_iter.bi_sector,
1300 					   bio_end_sector(bio));
1301 			wc_unlock(wc);
1302 			bio_set_dev(bio, wc->dev->bdev);
1303 			submit_bio_noacct(bio);
1304 		} else {
1305 			writecache_flush(wc);
1306 			wc_unlock(wc);
1307 			if (writecache_has_error(wc))
1308 				bio->bi_status = BLK_STS_IOERR;
1309 			bio_endio(bio);
1310 		}
1311 	}
1312 
1313 	return 0;
1314 }
1315 
writecache_offload_bio(struct dm_writecache * wc,struct bio * bio)1316 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1317 {
1318 	if (bio_list_empty(&wc->flush_list))
1319 		wake_up_process(wc->flush_thread);
1320 	bio_list_add(&wc->flush_list, bio);
1321 }
1322 
1323 enum wc_map_op {
1324 	WC_MAP_SUBMIT,
1325 	WC_MAP_REMAP,
1326 	WC_MAP_REMAP_ORIGIN,
1327 	WC_MAP_RETURN,
1328 	WC_MAP_ERROR,
1329 };
1330 
writecache_map_remap_origin(struct dm_writecache * wc,struct bio * bio,struct wc_entry * e)1331 static void writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
1332 					struct wc_entry *e)
1333 {
1334 	if (e) {
1335 		sector_t next_boundary =
1336 			read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1337 		if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
1338 			dm_accept_partial_bio(bio, next_boundary);
1339 	}
1340 }
1341 
writecache_map_read(struct dm_writecache * wc,struct bio * bio)1342 static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
1343 {
1344 	enum wc_map_op map_op;
1345 	struct wc_entry *e;
1346 
1347 read_next_block:
1348 	wc->stats.reads++;
1349 	e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1350 	if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1351 		wc->stats.read_hits++;
1352 		if (WC_MODE_PMEM(wc)) {
1353 			bio_copy_block(wc, bio, memory_data(wc, e));
1354 			if (bio->bi_iter.bi_size)
1355 				goto read_next_block;
1356 			map_op = WC_MAP_SUBMIT;
1357 		} else {
1358 			dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1359 			bio_set_dev(bio, wc->ssd_dev->bdev);
1360 			bio->bi_iter.bi_sector = cache_sector(wc, e);
1361 			if (!writecache_entry_is_committed(wc, e))
1362 				writecache_wait_for_ios(wc, WRITE);
1363 			map_op = WC_MAP_REMAP;
1364 		}
1365 	} else {
1366 		writecache_map_remap_origin(wc, bio, e);
1367 		wc->stats.reads += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
1368 		map_op = WC_MAP_REMAP_ORIGIN;
1369 	}
1370 
1371 	return map_op;
1372 }
1373 
writecache_bio_copy_ssd(struct dm_writecache * wc,struct bio * bio,struct wc_entry * e,bool search_used)1374 static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
1375 				    struct wc_entry *e, bool search_used)
1376 {
1377 	unsigned bio_size = wc->block_size;
1378 	sector_t start_cache_sec = cache_sector(wc, e);
1379 	sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1380 
1381 	while (bio_size < bio->bi_iter.bi_size) {
1382 		if (!search_used) {
1383 			struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1384 			if (!f)
1385 				break;
1386 			write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1387 							(bio_size >> SECTOR_SHIFT), wc->seq_count);
1388 			writecache_insert_entry(wc, f);
1389 			wc->uncommitted_blocks++;
1390 		} else {
1391 			struct wc_entry *f;
1392 			struct rb_node *next = rb_next(&e->rb_node);
1393 			if (!next)
1394 				break;
1395 			f = container_of(next, struct wc_entry, rb_node);
1396 			if (f != e + 1)
1397 				break;
1398 			if (read_original_sector(wc, f) !=
1399 			    read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1400 				break;
1401 			if (unlikely(f->write_in_progress))
1402 				break;
1403 			if (writecache_entry_is_committed(wc, f))
1404 				wc->overwrote_committed = true;
1405 			e = f;
1406 		}
1407 		bio_size += wc->block_size;
1408 		current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1409 	}
1410 
1411 	bio_set_dev(bio, wc->ssd_dev->bdev);
1412 	bio->bi_iter.bi_sector = start_cache_sec;
1413 	dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1414 
1415 	wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
1416 	wc->stats.writes_allocate += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
1417 
1418 	if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1419 		wc->uncommitted_blocks = 0;
1420 		queue_work(wc->writeback_wq, &wc->flush_work);
1421 	} else {
1422 		writecache_schedule_autocommit(wc);
1423 	}
1424 }
1425 
writecache_map_write(struct dm_writecache * wc,struct bio * bio)1426 static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
1427 {
1428 	struct wc_entry *e;
1429 
1430 	do {
1431 		bool found_entry = false;
1432 		bool search_used = false;
1433 		if (writecache_has_error(wc)) {
1434 			wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
1435 			return WC_MAP_ERROR;
1436 		}
1437 		e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1438 		if (e) {
1439 			if (!writecache_entry_is_committed(wc, e)) {
1440 				wc->stats.write_hits_uncommitted++;
1441 				search_used = true;
1442 				goto bio_copy;
1443 			}
1444 			wc->stats.write_hits_committed++;
1445 			if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1446 				wc->overwrote_committed = true;
1447 				search_used = true;
1448 				goto bio_copy;
1449 			}
1450 			found_entry = true;
1451 		} else {
1452 			if (unlikely(wc->cleaner) ||
1453 			    (wc->metadata_only && !(bio->bi_opf & REQ_META)))
1454 				goto direct_write;
1455 		}
1456 		e = writecache_pop_from_freelist(wc, (sector_t)-1);
1457 		if (unlikely(!e)) {
1458 			if (!WC_MODE_PMEM(wc) && !found_entry) {
1459 direct_write:
1460 				e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1461 				writecache_map_remap_origin(wc, bio, e);
1462 				wc->stats.writes_around += bio->bi_iter.bi_size >> wc->block_size_bits;
1463 				wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
1464 				return WC_MAP_REMAP_ORIGIN;
1465 			}
1466 			wc->stats.writes_blocked_on_freelist++;
1467 			writecache_wait_on_freelist(wc);
1468 			continue;
1469 		}
1470 		write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1471 		writecache_insert_entry(wc, e);
1472 		wc->uncommitted_blocks++;
1473 		wc->stats.writes_allocate++;
1474 bio_copy:
1475 		if (WC_MODE_PMEM(wc)) {
1476 			bio_copy_block(wc, bio, memory_data(wc, e));
1477 			wc->stats.writes++;
1478 		} else {
1479 			writecache_bio_copy_ssd(wc, bio, e, search_used);
1480 			return WC_MAP_REMAP;
1481 		}
1482 	} while (bio->bi_iter.bi_size);
1483 
1484 	if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
1485 		writecache_flush(wc);
1486 	else
1487 		writecache_schedule_autocommit(wc);
1488 
1489 	return WC_MAP_SUBMIT;
1490 }
1491 
writecache_map_flush(struct dm_writecache * wc,struct bio * bio)1492 static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio *bio)
1493 {
1494 	if (writecache_has_error(wc))
1495 		return WC_MAP_ERROR;
1496 
1497 	if (WC_MODE_PMEM(wc)) {
1498 		wc->stats.flushes++;
1499 		writecache_flush(wc);
1500 		if (writecache_has_error(wc))
1501 			return WC_MAP_ERROR;
1502 		else if (unlikely(wc->cleaner) || unlikely(wc->metadata_only))
1503 			return WC_MAP_REMAP_ORIGIN;
1504 		return WC_MAP_SUBMIT;
1505 	}
1506 	/* SSD: */
1507 	if (dm_bio_get_target_bio_nr(bio))
1508 		return WC_MAP_REMAP_ORIGIN;
1509 	wc->stats.flushes++;
1510 	writecache_offload_bio(wc, bio);
1511 	return WC_MAP_RETURN;
1512 }
1513 
writecache_map_discard(struct dm_writecache * wc,struct bio * bio)1514 static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio)
1515 {
1516 	wc->stats.discards += bio->bi_iter.bi_size >> wc->block_size_bits;
1517 
1518 	if (writecache_has_error(wc))
1519 		return WC_MAP_ERROR;
1520 
1521 	if (WC_MODE_PMEM(wc)) {
1522 		writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1523 		return WC_MAP_REMAP_ORIGIN;
1524 	}
1525 	/* SSD: */
1526 	writecache_offload_bio(wc, bio);
1527 	return WC_MAP_RETURN;
1528 }
1529 
writecache_map(struct dm_target * ti,struct bio * bio)1530 static int writecache_map(struct dm_target *ti, struct bio *bio)
1531 {
1532 	struct dm_writecache *wc = ti->private;
1533 	enum wc_map_op map_op;
1534 
1535 	bio->bi_private = NULL;
1536 
1537 	wc_lock(wc);
1538 
1539 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1540 		map_op = writecache_map_flush(wc, bio);
1541 		goto done;
1542 	}
1543 
1544 	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1545 
1546 	if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1547 				(wc->block_size / 512 - 1)) != 0)) {
1548 		DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1549 		      (unsigned long long)bio->bi_iter.bi_sector,
1550 		      bio->bi_iter.bi_size, wc->block_size);
1551 		map_op = WC_MAP_ERROR;
1552 		goto done;
1553 	}
1554 
1555 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1556 		map_op = writecache_map_discard(wc, bio);
1557 		goto done;
1558 	}
1559 
1560 	if (bio_data_dir(bio) == READ)
1561 		map_op = writecache_map_read(wc, bio);
1562 	else
1563 		map_op = writecache_map_write(wc, bio);
1564 done:
1565 	switch (map_op) {
1566 	case WC_MAP_REMAP_ORIGIN:
1567 		if (likely(wc->pause != 0)) {
1568 			if (bio_op(bio) == REQ_OP_WRITE) {
1569 				dm_iot_io_begin(&wc->iot, 1);
1570 				bio->bi_private = (void *)2;
1571 			}
1572 		}
1573 		bio_set_dev(bio, wc->dev->bdev);
1574 		wc_unlock(wc);
1575 		return DM_MAPIO_REMAPPED;
1576 
1577 	case WC_MAP_REMAP:
1578 		/* make sure that writecache_end_io decrements bio_in_progress: */
1579 		bio->bi_private = (void *)1;
1580 		atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1581 		wc_unlock(wc);
1582 		return DM_MAPIO_REMAPPED;
1583 
1584 	case WC_MAP_SUBMIT:
1585 		wc_unlock(wc);
1586 		bio_endio(bio);
1587 		return DM_MAPIO_SUBMITTED;
1588 
1589 	case WC_MAP_RETURN:
1590 		wc_unlock(wc);
1591 		return DM_MAPIO_SUBMITTED;
1592 
1593 	case WC_MAP_ERROR:
1594 		wc_unlock(wc);
1595 		bio_io_error(bio);
1596 		return DM_MAPIO_SUBMITTED;
1597 
1598 	default:
1599 		BUG();
1600 		return -1;
1601 	}
1602 }
1603 
writecache_end_io(struct dm_target * ti,struct bio * bio,blk_status_t * status)1604 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1605 {
1606 	struct dm_writecache *wc = ti->private;
1607 
1608 	if (bio->bi_private == (void *)1) {
1609 		int dir = bio_data_dir(bio);
1610 		if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1611 			if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1612 				wake_up(&wc->bio_in_progress_wait[dir]);
1613 	} else if (bio->bi_private == (void *)2) {
1614 		dm_iot_io_end(&wc->iot, 1);
1615 	}
1616 	return 0;
1617 }
1618 
writecache_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)1619 static int writecache_iterate_devices(struct dm_target *ti,
1620 				      iterate_devices_callout_fn fn, void *data)
1621 {
1622 	struct dm_writecache *wc = ti->private;
1623 
1624 	return fn(ti, wc->dev, 0, ti->len, data);
1625 }
1626 
writecache_io_hints(struct dm_target * ti,struct queue_limits * limits)1627 static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1628 {
1629 	struct dm_writecache *wc = ti->private;
1630 
1631 	if (limits->logical_block_size < wc->block_size)
1632 		limits->logical_block_size = wc->block_size;
1633 
1634 	if (limits->physical_block_size < wc->block_size)
1635 		limits->physical_block_size = wc->block_size;
1636 
1637 	if (limits->io_min < wc->block_size)
1638 		limits->io_min = wc->block_size;
1639 }
1640 
1641 
writecache_writeback_endio(struct bio * bio)1642 static void writecache_writeback_endio(struct bio *bio)
1643 {
1644 	struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1645 	struct dm_writecache *wc = wb->wc;
1646 	unsigned long flags;
1647 
1648 	raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1649 	if (unlikely(list_empty(&wc->endio_list)))
1650 		wake_up_process(wc->endio_thread);
1651 	list_add_tail(&wb->endio_entry, &wc->endio_list);
1652 	raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1653 }
1654 
writecache_copy_endio(int read_err,unsigned long write_err,void * ptr)1655 static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1656 {
1657 	struct copy_struct *c = ptr;
1658 	struct dm_writecache *wc = c->wc;
1659 
1660 	c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1661 
1662 	raw_spin_lock_irq(&wc->endio_list_lock);
1663 	if (unlikely(list_empty(&wc->endio_list)))
1664 		wake_up_process(wc->endio_thread);
1665 	list_add_tail(&c->endio_entry, &wc->endio_list);
1666 	raw_spin_unlock_irq(&wc->endio_list_lock);
1667 }
1668 
__writecache_endio_pmem(struct dm_writecache * wc,struct list_head * list)1669 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1670 {
1671 	unsigned i;
1672 	struct writeback_struct *wb;
1673 	struct wc_entry *e;
1674 	unsigned long n_walked = 0;
1675 
1676 	do {
1677 		wb = list_entry(list->next, struct writeback_struct, endio_entry);
1678 		list_del(&wb->endio_entry);
1679 
1680 		if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1681 			writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1682 					"write error %d", wb->bio.bi_status);
1683 		i = 0;
1684 		do {
1685 			e = wb->wc_list[i];
1686 			BUG_ON(!e->write_in_progress);
1687 			e->write_in_progress = false;
1688 			INIT_LIST_HEAD(&e->lru);
1689 			if (!writecache_has_error(wc))
1690 				writecache_free_entry(wc, e);
1691 			BUG_ON(!wc->writeback_size);
1692 			wc->writeback_size--;
1693 			n_walked++;
1694 			if (unlikely(n_walked >= ENDIO_LATENCY)) {
1695 				writecache_commit_flushed(wc, false);
1696 				wc_unlock(wc);
1697 				wc_lock(wc);
1698 				n_walked = 0;
1699 			}
1700 		} while (++i < wb->wc_list_n);
1701 
1702 		if (wb->wc_list != wb->wc_list_inline)
1703 			kfree(wb->wc_list);
1704 		bio_put(&wb->bio);
1705 	} while (!list_empty(list));
1706 }
1707 
__writecache_endio_ssd(struct dm_writecache * wc,struct list_head * list)1708 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1709 {
1710 	struct copy_struct *c;
1711 	struct wc_entry *e;
1712 
1713 	do {
1714 		c = list_entry(list->next, struct copy_struct, endio_entry);
1715 		list_del(&c->endio_entry);
1716 
1717 		if (unlikely(c->error))
1718 			writecache_error(wc, c->error, "copy error");
1719 
1720 		e = c->e;
1721 		do {
1722 			BUG_ON(!e->write_in_progress);
1723 			e->write_in_progress = false;
1724 			INIT_LIST_HEAD(&e->lru);
1725 			if (!writecache_has_error(wc))
1726 				writecache_free_entry(wc, e);
1727 
1728 			BUG_ON(!wc->writeback_size);
1729 			wc->writeback_size--;
1730 			e++;
1731 		} while (--c->n_entries);
1732 		mempool_free(c, &wc->copy_pool);
1733 	} while (!list_empty(list));
1734 }
1735 
writecache_endio_thread(void * data)1736 static int writecache_endio_thread(void *data)
1737 {
1738 	struct dm_writecache *wc = data;
1739 
1740 	while (1) {
1741 		struct list_head list;
1742 
1743 		raw_spin_lock_irq(&wc->endio_list_lock);
1744 		if (!list_empty(&wc->endio_list))
1745 			goto pop_from_list;
1746 		set_current_state(TASK_INTERRUPTIBLE);
1747 		raw_spin_unlock_irq(&wc->endio_list_lock);
1748 
1749 		if (unlikely(kthread_should_stop())) {
1750 			set_current_state(TASK_RUNNING);
1751 			break;
1752 		}
1753 
1754 		schedule();
1755 
1756 		continue;
1757 
1758 pop_from_list:
1759 		list = wc->endio_list;
1760 		list.next->prev = list.prev->next = &list;
1761 		INIT_LIST_HEAD(&wc->endio_list);
1762 		raw_spin_unlock_irq(&wc->endio_list_lock);
1763 
1764 		if (!WC_MODE_FUA(wc))
1765 			writecache_disk_flush(wc, wc->dev);
1766 
1767 		wc_lock(wc);
1768 
1769 		if (WC_MODE_PMEM(wc)) {
1770 			__writecache_endio_pmem(wc, &list);
1771 		} else {
1772 			__writecache_endio_ssd(wc, &list);
1773 			writecache_wait_for_ios(wc, READ);
1774 		}
1775 
1776 		writecache_commit_flushed(wc, false);
1777 
1778 		wc_unlock(wc);
1779 	}
1780 
1781 	return 0;
1782 }
1783 
wc_add_block(struct writeback_struct * wb,struct wc_entry * e)1784 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e)
1785 {
1786 	struct dm_writecache *wc = wb->wc;
1787 	unsigned block_size = wc->block_size;
1788 	void *address = memory_data(wc, e);
1789 
1790 	persistent_memory_flush_cache(address, block_size);
1791 
1792 	if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
1793 		return true;
1794 
1795 	return bio_add_page(&wb->bio, persistent_memory_page(address),
1796 			    block_size, persistent_memory_page_offset(address)) != 0;
1797 }
1798 
1799 struct writeback_list {
1800 	struct list_head list;
1801 	size_t size;
1802 };
1803 
__writeback_throttle(struct dm_writecache * wc,struct writeback_list * wbl)1804 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1805 {
1806 	if (unlikely(wc->max_writeback_jobs)) {
1807 		if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1808 			wc_lock(wc);
1809 			while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1810 				writecache_wait_on_freelist(wc);
1811 			wc_unlock(wc);
1812 		}
1813 	}
1814 	cond_resched();
1815 }
1816 
__writecache_writeback_pmem(struct dm_writecache * wc,struct writeback_list * wbl)1817 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1818 {
1819 	struct wc_entry *e, *f;
1820 	struct bio *bio;
1821 	struct writeback_struct *wb;
1822 	unsigned max_pages;
1823 
1824 	while (wbl->size) {
1825 		wbl->size--;
1826 		e = container_of(wbl->list.prev, struct wc_entry, lru);
1827 		list_del(&e->lru);
1828 
1829 		max_pages = e->wc_list_contiguous;
1830 
1831 		bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1832 		wb = container_of(bio, struct writeback_struct, bio);
1833 		wb->wc = wc;
1834 		bio->bi_end_io = writecache_writeback_endio;
1835 		bio_set_dev(bio, wc->dev->bdev);
1836 		bio->bi_iter.bi_sector = read_original_sector(wc, e);
1837 		if (max_pages <= WB_LIST_INLINE ||
1838 		    unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1839 							   GFP_NOIO | __GFP_NORETRY |
1840 							   __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1841 			wb->wc_list = wb->wc_list_inline;
1842 			max_pages = WB_LIST_INLINE;
1843 		}
1844 
1845 		BUG_ON(!wc_add_block(wb, e));
1846 
1847 		wb->wc_list[0] = e;
1848 		wb->wc_list_n = 1;
1849 
1850 		while (wbl->size && wb->wc_list_n < max_pages) {
1851 			f = container_of(wbl->list.prev, struct wc_entry, lru);
1852 			if (read_original_sector(wc, f) !=
1853 			    read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1854 				break;
1855 			if (!wc_add_block(wb, f))
1856 				break;
1857 			wbl->size--;
1858 			list_del(&f->lru);
1859 			wb->wc_list[wb->wc_list_n++] = f;
1860 			e = f;
1861 		}
1862 		bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1863 		if (writecache_has_error(wc)) {
1864 			bio->bi_status = BLK_STS_IOERR;
1865 			bio_endio(bio);
1866 		} else if (unlikely(!bio_sectors(bio))) {
1867 			bio->bi_status = BLK_STS_OK;
1868 			bio_endio(bio);
1869 		} else {
1870 			submit_bio(bio);
1871 		}
1872 
1873 		__writeback_throttle(wc, wbl);
1874 	}
1875 }
1876 
__writecache_writeback_ssd(struct dm_writecache * wc,struct writeback_list * wbl)1877 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1878 {
1879 	struct wc_entry *e, *f;
1880 	struct dm_io_region from, to;
1881 	struct copy_struct *c;
1882 
1883 	while (wbl->size) {
1884 		unsigned n_sectors;
1885 
1886 		wbl->size--;
1887 		e = container_of(wbl->list.prev, struct wc_entry, lru);
1888 		list_del(&e->lru);
1889 
1890 		n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1891 
1892 		from.bdev = wc->ssd_dev->bdev;
1893 		from.sector = cache_sector(wc, e);
1894 		from.count = n_sectors;
1895 		to.bdev = wc->dev->bdev;
1896 		to.sector = read_original_sector(wc, e);
1897 		to.count = n_sectors;
1898 
1899 		c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1900 		c->wc = wc;
1901 		c->e = e;
1902 		c->n_entries = e->wc_list_contiguous;
1903 
1904 		while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1905 			wbl->size--;
1906 			f = container_of(wbl->list.prev, struct wc_entry, lru);
1907 			BUG_ON(f != e + 1);
1908 			list_del(&f->lru);
1909 			e = f;
1910 		}
1911 
1912 		if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
1913 			if (to.sector >= wc->data_device_sectors) {
1914 				writecache_copy_endio(0, 0, c);
1915 				continue;
1916 			}
1917 			from.count = to.count = wc->data_device_sectors - to.sector;
1918 		}
1919 
1920 		dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1921 
1922 		__writeback_throttle(wc, wbl);
1923 	}
1924 }
1925 
writecache_writeback(struct work_struct * work)1926 static void writecache_writeback(struct work_struct *work)
1927 {
1928 	struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1929 	struct blk_plug plug;
1930 	struct wc_entry *f, *g, *e = NULL;
1931 	struct rb_node *node, *next_node;
1932 	struct list_head skipped;
1933 	struct writeback_list wbl;
1934 	unsigned long n_walked;
1935 
1936 	if (!WC_MODE_PMEM(wc)) {
1937 		/* Wait for any active kcopyd work on behalf of ssd writeback */
1938 		dm_kcopyd_client_flush(wc->dm_kcopyd);
1939 	}
1940 
1941 	if (likely(wc->pause != 0)) {
1942 		while (1) {
1943 			unsigned long idle;
1944 			if (unlikely(wc->cleaner) || unlikely(wc->writeback_all) ||
1945 			    unlikely(dm_suspended(wc->ti)))
1946 				break;
1947 			idle = dm_iot_idle_time(&wc->iot);
1948 			if (idle >= wc->pause)
1949 				break;
1950 			idle = wc->pause - idle;
1951 			if (idle > HZ)
1952 				idle = HZ;
1953 			schedule_timeout_idle(idle);
1954 		}
1955 	}
1956 
1957 	wc_lock(wc);
1958 restart:
1959 	if (writecache_has_error(wc)) {
1960 		wc_unlock(wc);
1961 		return;
1962 	}
1963 
1964 	if (unlikely(wc->writeback_all)) {
1965 		if (writecache_wait_for_writeback(wc))
1966 			goto restart;
1967 	}
1968 
1969 	if (wc->overwrote_committed) {
1970 		writecache_wait_for_ios(wc, WRITE);
1971 	}
1972 
1973 	n_walked = 0;
1974 	INIT_LIST_HEAD(&skipped);
1975 	INIT_LIST_HEAD(&wbl.list);
1976 	wbl.size = 0;
1977 	while (!list_empty(&wc->lru) &&
1978 	       (wc->writeback_all ||
1979 		wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
1980 		(jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
1981 		 wc->max_age - wc->max_age / MAX_AGE_DIV))) {
1982 
1983 		n_walked++;
1984 		if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1985 		    likely(!wc->writeback_all)) {
1986 			if (likely(!dm_suspended(wc->ti)))
1987 				queue_work(wc->writeback_wq, &wc->writeback_work);
1988 			break;
1989 		}
1990 
1991 		if (unlikely(wc->writeback_all)) {
1992 			if (unlikely(!e)) {
1993 				writecache_flush(wc);
1994 				e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1995 			} else
1996 				e = g;
1997 		} else
1998 			e = container_of(wc->lru.prev, struct wc_entry, lru);
1999 		BUG_ON(e->write_in_progress);
2000 		if (unlikely(!writecache_entry_is_committed(wc, e))) {
2001 			writecache_flush(wc);
2002 		}
2003 		node = rb_prev(&e->rb_node);
2004 		if (node) {
2005 			f = container_of(node, struct wc_entry, rb_node);
2006 			if (unlikely(read_original_sector(wc, f) ==
2007 				     read_original_sector(wc, e))) {
2008 				BUG_ON(!f->write_in_progress);
2009 				list_move(&e->lru, &skipped);
2010 				cond_resched();
2011 				continue;
2012 			}
2013 		}
2014 		wc->writeback_size++;
2015 		list_move(&e->lru, &wbl.list);
2016 		wbl.size++;
2017 		e->write_in_progress = true;
2018 		e->wc_list_contiguous = 1;
2019 
2020 		f = e;
2021 
2022 		while (1) {
2023 			next_node = rb_next(&f->rb_node);
2024 			if (unlikely(!next_node))
2025 				break;
2026 			g = container_of(next_node, struct wc_entry, rb_node);
2027 			if (unlikely(read_original_sector(wc, g) ==
2028 			    read_original_sector(wc, f))) {
2029 				f = g;
2030 				continue;
2031 			}
2032 			if (read_original_sector(wc, g) !=
2033 			    read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
2034 				break;
2035 			if (unlikely(g->write_in_progress))
2036 				break;
2037 			if (unlikely(!writecache_entry_is_committed(wc, g)))
2038 				break;
2039 
2040 			if (!WC_MODE_PMEM(wc)) {
2041 				if (g != f + 1)
2042 					break;
2043 			}
2044 
2045 			n_walked++;
2046 			//if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
2047 			//	break;
2048 
2049 			wc->writeback_size++;
2050 			list_move(&g->lru, &wbl.list);
2051 			wbl.size++;
2052 			g->write_in_progress = true;
2053 			g->wc_list_contiguous = BIO_MAX_VECS;
2054 			f = g;
2055 			e->wc_list_contiguous++;
2056 			if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) {
2057 				if (unlikely(wc->writeback_all)) {
2058 					next_node = rb_next(&f->rb_node);
2059 					if (likely(next_node))
2060 						g = container_of(next_node, struct wc_entry, rb_node);
2061 				}
2062 				break;
2063 			}
2064 		}
2065 		cond_resched();
2066 	}
2067 
2068 	if (!list_empty(&skipped)) {
2069 		list_splice_tail(&skipped, &wc->lru);
2070 		/*
2071 		 * If we didn't do any progress, we must wait until some
2072 		 * writeback finishes to avoid burning CPU in a loop
2073 		 */
2074 		if (unlikely(!wbl.size))
2075 			writecache_wait_for_writeback(wc);
2076 	}
2077 
2078 	wc_unlock(wc);
2079 
2080 	blk_start_plug(&plug);
2081 
2082 	if (WC_MODE_PMEM(wc))
2083 		__writecache_writeback_pmem(wc, &wbl);
2084 	else
2085 		__writecache_writeback_ssd(wc, &wbl);
2086 
2087 	blk_finish_plug(&plug);
2088 
2089 	if (unlikely(wc->writeback_all)) {
2090 		wc_lock(wc);
2091 		while (writecache_wait_for_writeback(wc));
2092 		wc_unlock(wc);
2093 	}
2094 }
2095 
calculate_memory_size(uint64_t device_size,unsigned block_size,size_t * n_blocks_p,size_t * n_metadata_blocks_p)2096 static int calculate_memory_size(uint64_t device_size, unsigned block_size,
2097 				 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
2098 {
2099 	uint64_t n_blocks, offset;
2100 	struct wc_entry e;
2101 
2102 	n_blocks = device_size;
2103 	do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
2104 
2105 	while (1) {
2106 		if (!n_blocks)
2107 			return -ENOSPC;
2108 		/* Verify the following entries[n_blocks] won't overflow */
2109 		if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
2110 				 sizeof(struct wc_memory_entry)))
2111 			return -EFBIG;
2112 		offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
2113 		offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
2114 		if (offset + n_blocks * block_size <= device_size)
2115 			break;
2116 		n_blocks--;
2117 	}
2118 
2119 	/* check if the bit field overflows */
2120 	e.index = n_blocks;
2121 	if (e.index != n_blocks)
2122 		return -EFBIG;
2123 
2124 	if (n_blocks_p)
2125 		*n_blocks_p = n_blocks;
2126 	if (n_metadata_blocks_p)
2127 		*n_metadata_blocks_p = offset >> __ffs(block_size);
2128 	return 0;
2129 }
2130 
init_memory(struct dm_writecache * wc)2131 static int init_memory(struct dm_writecache *wc)
2132 {
2133 	size_t b;
2134 	int r;
2135 
2136 	r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
2137 	if (r)
2138 		return r;
2139 
2140 	r = writecache_alloc_entries(wc);
2141 	if (r)
2142 		return r;
2143 
2144 	for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
2145 		pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
2146 	pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
2147 	pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
2148 	pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
2149 	pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
2150 
2151 	for (b = 0; b < wc->n_blocks; b++) {
2152 		write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
2153 		cond_resched();
2154 	}
2155 
2156 	writecache_flush_all_metadata(wc);
2157 	writecache_commit_flushed(wc, false);
2158 	pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
2159 	writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
2160 	writecache_commit_flushed(wc, false);
2161 
2162 	return 0;
2163 }
2164 
writecache_dtr(struct dm_target * ti)2165 static void writecache_dtr(struct dm_target *ti)
2166 {
2167 	struct dm_writecache *wc = ti->private;
2168 
2169 	if (!wc)
2170 		return;
2171 
2172 	if (wc->endio_thread)
2173 		kthread_stop(wc->endio_thread);
2174 
2175 	if (wc->flush_thread)
2176 		kthread_stop(wc->flush_thread);
2177 
2178 	bioset_exit(&wc->bio_set);
2179 
2180 	mempool_exit(&wc->copy_pool);
2181 
2182 	if (wc->writeback_wq)
2183 		destroy_workqueue(wc->writeback_wq);
2184 
2185 	if (wc->dev)
2186 		dm_put_device(ti, wc->dev);
2187 
2188 	if (wc->ssd_dev)
2189 		dm_put_device(ti, wc->ssd_dev);
2190 
2191 	vfree(wc->entries);
2192 
2193 	if (wc->memory_map) {
2194 		if (WC_MODE_PMEM(wc))
2195 			persistent_memory_release(wc);
2196 		else
2197 			vfree(wc->memory_map);
2198 	}
2199 
2200 	if (wc->dm_kcopyd)
2201 		dm_kcopyd_client_destroy(wc->dm_kcopyd);
2202 
2203 	if (wc->dm_io)
2204 		dm_io_client_destroy(wc->dm_io);
2205 
2206 	vfree(wc->dirty_bitmap);
2207 
2208 	kfree(wc);
2209 }
2210 
writecache_ctr(struct dm_target * ti,unsigned argc,char ** argv)2211 static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2212 {
2213 	struct dm_writecache *wc;
2214 	struct dm_arg_set as;
2215 	const char *string;
2216 	unsigned opt_params;
2217 	size_t offset, data_size;
2218 	int i, r;
2219 	char dummy;
2220 	int high_wm_percent = HIGH_WATERMARK;
2221 	int low_wm_percent = LOW_WATERMARK;
2222 	uint64_t x;
2223 	struct wc_memory_superblock s;
2224 
2225 	static struct dm_arg _args[] = {
2226 		{0, 18, "Invalid number of feature args"},
2227 	};
2228 
2229 	as.argc = argc;
2230 	as.argv = argv;
2231 
2232 	wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
2233 	if (!wc) {
2234 		ti->error = "Cannot allocate writecache structure";
2235 		r = -ENOMEM;
2236 		goto bad;
2237 	}
2238 	ti->private = wc;
2239 	wc->ti = ti;
2240 
2241 	mutex_init(&wc->lock);
2242 	wc->max_age = MAX_AGE_UNSPECIFIED;
2243 	writecache_poison_lists(wc);
2244 	init_waitqueue_head(&wc->freelist_wait);
2245 	timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
2246 	timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
2247 
2248 	for (i = 0; i < 2; i++) {
2249 		atomic_set(&wc->bio_in_progress[i], 0);
2250 		init_waitqueue_head(&wc->bio_in_progress_wait[i]);
2251 	}
2252 
2253 	wc->dm_io = dm_io_client_create();
2254 	if (IS_ERR(wc->dm_io)) {
2255 		r = PTR_ERR(wc->dm_io);
2256 		ti->error = "Unable to allocate dm-io client";
2257 		wc->dm_io = NULL;
2258 		goto bad;
2259 	}
2260 
2261 	wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
2262 	if (!wc->writeback_wq) {
2263 		r = -ENOMEM;
2264 		ti->error = "Could not allocate writeback workqueue";
2265 		goto bad;
2266 	}
2267 	INIT_WORK(&wc->writeback_work, writecache_writeback);
2268 	INIT_WORK(&wc->flush_work, writecache_flush_work);
2269 
2270 	dm_iot_init(&wc->iot);
2271 
2272 	raw_spin_lock_init(&wc->endio_list_lock);
2273 	INIT_LIST_HEAD(&wc->endio_list);
2274 	wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
2275 	if (IS_ERR(wc->endio_thread)) {
2276 		r = PTR_ERR(wc->endio_thread);
2277 		wc->endio_thread = NULL;
2278 		ti->error = "Couldn't spawn endio thread";
2279 		goto bad;
2280 	}
2281 	wake_up_process(wc->endio_thread);
2282 
2283 	/*
2284 	 * Parse the mode (pmem or ssd)
2285 	 */
2286 	string = dm_shift_arg(&as);
2287 	if (!string)
2288 		goto bad_arguments;
2289 
2290 	if (!strcasecmp(string, "s")) {
2291 		wc->pmem_mode = false;
2292 	} else if (!strcasecmp(string, "p")) {
2293 #ifdef DM_WRITECACHE_HAS_PMEM
2294 		wc->pmem_mode = true;
2295 		wc->writeback_fua = true;
2296 #else
2297 		/*
2298 		 * If the architecture doesn't support persistent memory or
2299 		 * the kernel doesn't support any DAX drivers, this driver can
2300 		 * only be used in SSD-only mode.
2301 		 */
2302 		r = -EOPNOTSUPP;
2303 		ti->error = "Persistent memory or DAX not supported on this system";
2304 		goto bad;
2305 #endif
2306 	} else {
2307 		goto bad_arguments;
2308 	}
2309 
2310 	if (WC_MODE_PMEM(wc)) {
2311 		r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
2312 				offsetof(struct writeback_struct, bio),
2313 				BIOSET_NEED_BVECS);
2314 		if (r) {
2315 			ti->error = "Could not allocate bio set";
2316 			goto bad;
2317 		}
2318 	} else {
2319 		wc->pause = PAUSE_WRITEBACK;
2320 		r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2321 		if (r) {
2322 			ti->error = "Could not allocate mempool";
2323 			goto bad;
2324 		}
2325 	}
2326 
2327 	/*
2328 	 * Parse the origin data device
2329 	 */
2330 	string = dm_shift_arg(&as);
2331 	if (!string)
2332 		goto bad_arguments;
2333 	r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2334 	if (r) {
2335 		ti->error = "Origin data device lookup failed";
2336 		goto bad;
2337 	}
2338 
2339 	/*
2340 	 * Parse cache data device (be it pmem or ssd)
2341 	 */
2342 	string = dm_shift_arg(&as);
2343 	if (!string)
2344 		goto bad_arguments;
2345 
2346 	r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2347 	if (r) {
2348 		ti->error = "Cache data device lookup failed";
2349 		goto bad;
2350 	}
2351 	wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2352 
2353 	/*
2354 	 * Parse the cache block size
2355 	 */
2356 	string = dm_shift_arg(&as);
2357 	if (!string)
2358 		goto bad_arguments;
2359 	if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2360 	    wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2361 	    (wc->block_size & (wc->block_size - 1))) {
2362 		r = -EINVAL;
2363 		ti->error = "Invalid block size";
2364 		goto bad;
2365 	}
2366 	if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2367 	    wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2368 		r = -EINVAL;
2369 		ti->error = "Block size is smaller than device logical block size";
2370 		goto bad;
2371 	}
2372 	wc->block_size_bits = __ffs(wc->block_size);
2373 
2374 	wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2375 	wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2376 	wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2377 
2378 	/*
2379 	 * Parse optional arguments
2380 	 */
2381 	r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2382 	if (r)
2383 		goto bad;
2384 
2385 	while (opt_params) {
2386 		string = dm_shift_arg(&as), opt_params--;
2387 		if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2388 			unsigned long long start_sector;
2389 			string = dm_shift_arg(&as), opt_params--;
2390 			if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2391 				goto invalid_optional;
2392 			wc->start_sector = start_sector;
2393 			wc->start_sector_set = true;
2394 			if (wc->start_sector != start_sector ||
2395 			    wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2396 				goto invalid_optional;
2397 		} else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2398 			string = dm_shift_arg(&as), opt_params--;
2399 			if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2400 				goto invalid_optional;
2401 			if (high_wm_percent < 0 || high_wm_percent > 100)
2402 				goto invalid_optional;
2403 			wc->high_wm_percent_value = high_wm_percent;
2404 			wc->high_wm_percent_set = true;
2405 		} else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2406 			string = dm_shift_arg(&as), opt_params--;
2407 			if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2408 				goto invalid_optional;
2409 			if (low_wm_percent < 0 || low_wm_percent > 100)
2410 				goto invalid_optional;
2411 			wc->low_wm_percent_value = low_wm_percent;
2412 			wc->low_wm_percent_set = true;
2413 		} else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2414 			string = dm_shift_arg(&as), opt_params--;
2415 			if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2416 				goto invalid_optional;
2417 			wc->max_writeback_jobs_set = true;
2418 		} else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2419 			string = dm_shift_arg(&as), opt_params--;
2420 			if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2421 				goto invalid_optional;
2422 			wc->autocommit_blocks_set = true;
2423 		} else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2424 			unsigned autocommit_msecs;
2425 			string = dm_shift_arg(&as), opt_params--;
2426 			if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2427 				goto invalid_optional;
2428 			if (autocommit_msecs > 3600000)
2429 				goto invalid_optional;
2430 			wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2431 			wc->autocommit_time_value = autocommit_msecs;
2432 			wc->autocommit_time_set = true;
2433 		} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
2434 			unsigned max_age_msecs;
2435 			string = dm_shift_arg(&as), opt_params--;
2436 			if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
2437 				goto invalid_optional;
2438 			if (max_age_msecs > 86400000)
2439 				goto invalid_optional;
2440 			wc->max_age = msecs_to_jiffies(max_age_msecs);
2441 			wc->max_age_set = true;
2442 			wc->max_age_value = max_age_msecs;
2443 		} else if (!strcasecmp(string, "cleaner")) {
2444 			wc->cleaner_set = true;
2445 			wc->cleaner = true;
2446 		} else if (!strcasecmp(string, "fua")) {
2447 			if (WC_MODE_PMEM(wc)) {
2448 				wc->writeback_fua = true;
2449 				wc->writeback_fua_set = true;
2450 			} else goto invalid_optional;
2451 		} else if (!strcasecmp(string, "nofua")) {
2452 			if (WC_MODE_PMEM(wc)) {
2453 				wc->writeback_fua = false;
2454 				wc->writeback_fua_set = true;
2455 			} else goto invalid_optional;
2456 		} else if (!strcasecmp(string, "metadata_only")) {
2457 			wc->metadata_only = true;
2458 		} else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) {
2459 			unsigned pause_msecs;
2460 			if (WC_MODE_PMEM(wc))
2461 				goto invalid_optional;
2462 			string = dm_shift_arg(&as), opt_params--;
2463 			if (sscanf(string, "%u%c", &pause_msecs, &dummy) != 1)
2464 				goto invalid_optional;
2465 			if (pause_msecs > 60000)
2466 				goto invalid_optional;
2467 			wc->pause = msecs_to_jiffies(pause_msecs);
2468 			wc->pause_set = true;
2469 			wc->pause_value = pause_msecs;
2470 		} else {
2471 invalid_optional:
2472 			r = -EINVAL;
2473 			ti->error = "Invalid optional argument";
2474 			goto bad;
2475 		}
2476 	}
2477 
2478 	if (high_wm_percent < low_wm_percent) {
2479 		r = -EINVAL;
2480 		ti->error = "High watermark must be greater than or equal to low watermark";
2481 		goto bad;
2482 	}
2483 
2484 	if (WC_MODE_PMEM(wc)) {
2485 		if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2486 			r = -EOPNOTSUPP;
2487 			ti->error = "Asynchronous persistent memory not supported as pmem cache";
2488 			goto bad;
2489 		}
2490 
2491 		r = persistent_memory_claim(wc);
2492 		if (r) {
2493 			ti->error = "Unable to map persistent memory for cache";
2494 			goto bad;
2495 		}
2496 	} else {
2497 		size_t n_blocks, n_metadata_blocks;
2498 		uint64_t n_bitmap_bits;
2499 
2500 		wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2501 
2502 		bio_list_init(&wc->flush_list);
2503 		wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2504 		if (IS_ERR(wc->flush_thread)) {
2505 			r = PTR_ERR(wc->flush_thread);
2506 			wc->flush_thread = NULL;
2507 			ti->error = "Couldn't spawn flush thread";
2508 			goto bad;
2509 		}
2510 		wake_up_process(wc->flush_thread);
2511 
2512 		r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2513 					  &n_blocks, &n_metadata_blocks);
2514 		if (r) {
2515 			ti->error = "Invalid device size";
2516 			goto bad;
2517 		}
2518 
2519 		n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2520 				 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2521 		/* this is limitation of test_bit functions */
2522 		if (n_bitmap_bits > 1U << 31) {
2523 			r = -EFBIG;
2524 			ti->error = "Invalid device size";
2525 			goto bad;
2526 		}
2527 
2528 		wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2529 		if (!wc->memory_map) {
2530 			r = -ENOMEM;
2531 			ti->error = "Unable to allocate memory for metadata";
2532 			goto bad;
2533 		}
2534 
2535 		wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2536 		if (IS_ERR(wc->dm_kcopyd)) {
2537 			r = PTR_ERR(wc->dm_kcopyd);
2538 			ti->error = "Unable to allocate dm-kcopyd client";
2539 			wc->dm_kcopyd = NULL;
2540 			goto bad;
2541 		}
2542 
2543 		wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2544 		wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2545 			BITS_PER_LONG * sizeof(unsigned long);
2546 		wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2547 		if (!wc->dirty_bitmap) {
2548 			r = -ENOMEM;
2549 			ti->error = "Unable to allocate dirty bitmap";
2550 			goto bad;
2551 		}
2552 
2553 		r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
2554 		if (r) {
2555 			ti->error = "Unable to read first block of metadata";
2556 			goto bad;
2557 		}
2558 	}
2559 
2560 	r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock));
2561 	if (r) {
2562 		ti->error = "Hardware memory error when reading superblock";
2563 		goto bad;
2564 	}
2565 	if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2566 		r = init_memory(wc);
2567 		if (r) {
2568 			ti->error = "Unable to initialize device";
2569 			goto bad;
2570 		}
2571 		r = copy_mc_to_kernel(&s, sb(wc),
2572 				      sizeof(struct wc_memory_superblock));
2573 		if (r) {
2574 			ti->error = "Hardware memory error when reading superblock";
2575 			goto bad;
2576 		}
2577 	}
2578 
2579 	if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2580 		ti->error = "Invalid magic in the superblock";
2581 		r = -EINVAL;
2582 		goto bad;
2583 	}
2584 
2585 	if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2586 		ti->error = "Invalid version in the superblock";
2587 		r = -EINVAL;
2588 		goto bad;
2589 	}
2590 
2591 	if (le32_to_cpu(s.block_size) != wc->block_size) {
2592 		ti->error = "Block size does not match superblock";
2593 		r = -EINVAL;
2594 		goto bad;
2595 	}
2596 
2597 	wc->n_blocks = le64_to_cpu(s.n_blocks);
2598 
2599 	offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2600 	if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2601 overflow:
2602 		ti->error = "Overflow in size calculation";
2603 		r = -EINVAL;
2604 		goto bad;
2605 	}
2606 	offset += sizeof(struct wc_memory_superblock);
2607 	if (offset < sizeof(struct wc_memory_superblock))
2608 		goto overflow;
2609 	offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2610 	data_size = wc->n_blocks * (size_t)wc->block_size;
2611 	if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2612 	    (offset + data_size < offset))
2613 		goto overflow;
2614 	if (offset + data_size > wc->memory_map_size) {
2615 		ti->error = "Memory area is too small";
2616 		r = -EINVAL;
2617 		goto bad;
2618 	}
2619 
2620 	wc->metadata_sectors = offset >> SECTOR_SHIFT;
2621 	wc->block_start = (char *)sb(wc) + offset;
2622 
2623 	x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2624 	x += 50;
2625 	do_div(x, 100);
2626 	wc->freelist_high_watermark = x;
2627 	x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2628 	x += 50;
2629 	do_div(x, 100);
2630 	wc->freelist_low_watermark = x;
2631 
2632 	if (wc->cleaner)
2633 		activate_cleaner(wc);
2634 
2635 	r = writecache_alloc_entries(wc);
2636 	if (r) {
2637 		ti->error = "Cannot allocate memory";
2638 		goto bad;
2639 	}
2640 
2641 	ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2;
2642 	ti->flush_supported = true;
2643 	ti->num_discard_bios = 1;
2644 
2645 	if (WC_MODE_PMEM(wc))
2646 		persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2647 
2648 	return 0;
2649 
2650 bad_arguments:
2651 	r = -EINVAL;
2652 	ti->error = "Bad arguments";
2653 bad:
2654 	writecache_dtr(ti);
2655 	return r;
2656 }
2657 
writecache_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)2658 static void writecache_status(struct dm_target *ti, status_type_t type,
2659 			      unsigned status_flags, char *result, unsigned maxlen)
2660 {
2661 	struct dm_writecache *wc = ti->private;
2662 	unsigned extra_args;
2663 	unsigned sz = 0;
2664 
2665 	switch (type) {
2666 	case STATUSTYPE_INFO:
2667 		DMEMIT("%ld %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu",
2668 		       writecache_has_error(wc),
2669 		       (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2670 		       (unsigned long long)wc->writeback_size,
2671 		       wc->stats.reads,
2672 		       wc->stats.read_hits,
2673 		       wc->stats.writes,
2674 		       wc->stats.write_hits_uncommitted,
2675 		       wc->stats.write_hits_committed,
2676 		       wc->stats.writes_around,
2677 		       wc->stats.writes_allocate,
2678 		       wc->stats.writes_blocked_on_freelist,
2679 		       wc->stats.flushes,
2680 		       wc->stats.discards);
2681 		break;
2682 	case STATUSTYPE_TABLE:
2683 		DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2684 				wc->dev->name, wc->ssd_dev->name, wc->block_size);
2685 		extra_args = 0;
2686 		if (wc->start_sector_set)
2687 			extra_args += 2;
2688 		if (wc->high_wm_percent_set)
2689 			extra_args += 2;
2690 		if (wc->low_wm_percent_set)
2691 			extra_args += 2;
2692 		if (wc->max_writeback_jobs_set)
2693 			extra_args += 2;
2694 		if (wc->autocommit_blocks_set)
2695 			extra_args += 2;
2696 		if (wc->autocommit_time_set)
2697 			extra_args += 2;
2698 		if (wc->max_age_set)
2699 			extra_args += 2;
2700 		if (wc->cleaner_set)
2701 			extra_args++;
2702 		if (wc->writeback_fua_set)
2703 			extra_args++;
2704 		if (wc->metadata_only)
2705 			extra_args++;
2706 		if (wc->pause_set)
2707 			extra_args += 2;
2708 
2709 		DMEMIT("%u", extra_args);
2710 		if (wc->start_sector_set)
2711 			DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2712 		if (wc->high_wm_percent_set)
2713 			DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
2714 		if (wc->low_wm_percent_set)
2715 			DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
2716 		if (wc->max_writeback_jobs_set)
2717 			DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2718 		if (wc->autocommit_blocks_set)
2719 			DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2720 		if (wc->autocommit_time_set)
2721 			DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
2722 		if (wc->max_age_set)
2723 			DMEMIT(" max_age %u", wc->max_age_value);
2724 		if (wc->cleaner_set)
2725 			DMEMIT(" cleaner");
2726 		if (wc->writeback_fua_set)
2727 			DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2728 		if (wc->metadata_only)
2729 			DMEMIT(" metadata_only");
2730 		if (wc->pause_set)
2731 			DMEMIT(" pause_writeback %u", wc->pause_value);
2732 		break;
2733 	case STATUSTYPE_IMA:
2734 		*result = '\0';
2735 		break;
2736 	}
2737 }
2738 
2739 static struct target_type writecache_target = {
2740 	.name			= "writecache",
2741 	.version		= {1, 6, 0},
2742 	.module			= THIS_MODULE,
2743 	.ctr			= writecache_ctr,
2744 	.dtr			= writecache_dtr,
2745 	.status			= writecache_status,
2746 	.postsuspend		= writecache_suspend,
2747 	.resume			= writecache_resume,
2748 	.message		= writecache_message,
2749 	.map			= writecache_map,
2750 	.end_io			= writecache_end_io,
2751 	.iterate_devices	= writecache_iterate_devices,
2752 	.io_hints		= writecache_io_hints,
2753 };
2754 
dm_writecache_init(void)2755 static int __init dm_writecache_init(void)
2756 {
2757 	int r;
2758 
2759 	r = dm_register_target(&writecache_target);
2760 	if (r < 0) {
2761 		DMERR("register failed %d", r);
2762 		return r;
2763 	}
2764 
2765 	return 0;
2766 }
2767 
dm_writecache_exit(void)2768 static void __exit dm_writecache_exit(void)
2769 {
2770 	dm_unregister_target(&writecache_target);
2771 }
2772 
2773 module_init(dm_writecache_init);
2774 module_exit(dm_writecache_exit);
2775 
2776 MODULE_DESCRIPTION(DM_NAME " writecache target");
2777 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2778 MODULE_LICENSE("GPL");
2779