• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 Google Limited.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include "dm-core.h"
9 
10 #include <linux/crc32.h>
11 #include <linux/dm-bufio.h>
12 #include <linux/module.h>
13 
14 #define DM_MSG_PREFIX "bow"
15 
16 struct log_entry {
17 	u64 source;
18 	u64 dest;
19 	u32 size;
20 	u32 checksum;
21 } __packed;
22 
23 struct log_sector {
24 	u32 magic;
25 	u16 header_version;
26 	u16 header_size;
27 	u32 block_size;
28 	u32 count;
29 	u32 sequence;
30 	sector_t sector0;
31 	struct log_entry entries[];
32 } __packed;
33 
34 /*
35  * MAGIC is BOW in ascii
36  */
37 #define MAGIC 0x00574f42
38 #define HEADER_VERSION 0x0100
39 
40 /*
41  * A sorted set of ranges representing the state of the data on the device.
42  * Use an rb_tree for fast lookup of a given sector
43  * Consecutive ranges are always of different type - operations on this
44  * set must merge matching consecutive ranges.
45  *
46  * Top range is always of type TOP
47  */
48 struct bow_range {
49 	struct rb_node		node;
50 	sector_t		sector;
51 	enum {
52 		INVALID,	/* Type not set */
53 		SECTOR0,	/* First sector - holds log record */
54 		SECTOR0_CURRENT,/* Live contents of sector0 */
55 		UNCHANGED,	/* Original contents */
56 		TRIMMED,	/* Range has been trimmed */
57 		CHANGED,	/* Range has been changed */
58 		BACKUP,		/* Range is being used as a backup */
59 		TOP,		/* Final range - sector is size of device */
60 	} type;
61 	struct list_head	trimmed_list; /* list of TRIMMED ranges */
62 };
63 
64 static const char * const readable_type[] = {
65 	"Invalid",
66 	"Sector0",
67 	"Sector0_current",
68 	"Unchanged",
69 	"Free",
70 	"Changed",
71 	"Backup",
72 	"Top",
73 };
74 
75 enum state {
76 	TRIM,
77 	CHECKPOINT,
78 	COMMITTED,
79 };
80 
81 struct bow_context {
82 	struct dm_dev *dev;
83 	u32 block_size;
84 	u32 block_shift;
85 	struct workqueue_struct *workqueue;
86 	struct dm_bufio_client *bufio;
87 	struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
88 	struct rb_root ranges;
89 	struct dm_kobject_holder kobj_holder;	/* for sysfs attributes */
90 	atomic_t state; /* One of the enum state values above */
91 	u64 trims_total;
92 	struct log_sector *log_sector;
93 	struct list_head trimmed_list;
94 	bool forward_trims;
95 };
96 
range_top(struct bow_range * br)97 sector_t range_top(struct bow_range *br)
98 {
99 	return container_of(rb_next(&br->node), struct bow_range, node)
100 		->sector;
101 }
102 
range_size(struct bow_range * br)103 u64 range_size(struct bow_range *br)
104 {
105 	return (range_top(br) - br->sector) * SECTOR_SIZE;
106 }
107 
bvec_top(struct bvec_iter * bi_iter)108 static sector_t bvec_top(struct bvec_iter *bi_iter)
109 {
110 	return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
111 }
112 
113 /*
114  * Find the first range that overlaps with bi_iter
115  * bi_iter is set to the size of the overlapping sub-range
116  */
find_first_overlapping_range(struct rb_root * ranges,struct bvec_iter * bi_iter)117 static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
118 						      struct bvec_iter *bi_iter)
119 {
120 	struct rb_node *node = ranges->rb_node;
121 	struct bow_range *br;
122 
123 	while (node) {
124 		br = container_of(node, struct bow_range, node);
125 
126 		if (br->sector <= bi_iter->bi_sector
127 		    && bi_iter->bi_sector < range_top(br))
128 			break;
129 
130 		if (bi_iter->bi_sector < br->sector)
131 			node = node->rb_left;
132 		else
133 			node = node->rb_right;
134 	}
135 
136 	WARN_ON(!node);
137 	if (!node)
138 		return NULL;
139 
140 	if (range_top(br) - bi_iter->bi_sector
141 	    < bi_iter->bi_size >> SECTOR_SHIFT)
142 		bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
143 			<< SECTOR_SHIFT;
144 
145 	return br;
146 }
147 
add_before(struct rb_root * ranges,struct bow_range * new_br,struct bow_range * existing)148 void add_before(struct rb_root *ranges, struct bow_range *new_br,
149 		struct bow_range *existing)
150 {
151 	struct rb_node *parent = &(existing->node);
152 	struct rb_node **link = &(parent->rb_left);
153 
154 	while (*link) {
155 		parent = *link;
156 		link = &((*link)->rb_right);
157 	}
158 
159 	rb_link_node(&new_br->node, parent, link);
160 	rb_insert_color(&new_br->node, ranges);
161 }
162 
163 /*
164  * Given a range br returned by find_first_overlapping_range, split br into a
165  * leading range, a range matching the bi_iter and a trailing range.
166  * Leading and trailing may end up size 0 and will then be deleted. The
167  * new range matching the bi_iter is then returned and should have its type
168  * and type specific fields populated.
169  * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
170  */
split_range(struct bow_context * bc,struct bow_range ** br,struct bvec_iter * bi_iter)171 static int split_range(struct bow_context *bc, struct bow_range **br,
172 		       struct bvec_iter *bi_iter)
173 {
174 	struct bow_range *new_br;
175 
176 	if (bi_iter->bi_sector < (*br)->sector) {
177 		WARN_ON(true);
178 		return BLK_STS_IOERR;
179 	}
180 
181 	if (bi_iter->bi_sector > (*br)->sector) {
182 		struct bow_range *leading_br =
183 			kzalloc(sizeof(*leading_br), GFP_KERNEL);
184 
185 		if (!leading_br)
186 			return BLK_STS_RESOURCE;
187 
188 		*leading_br = **br;
189 		if (leading_br->type == TRIMMED)
190 			list_add(&leading_br->trimmed_list, &bc->trimmed_list);
191 
192 		add_before(&bc->ranges, leading_br, *br);
193 		(*br)->sector = bi_iter->bi_sector;
194 	}
195 
196 	if (bvec_top(bi_iter) >= range_top(*br)) {
197 		bi_iter->bi_size = (range_top(*br) - (*br)->sector)
198 					* SECTOR_SIZE;
199 		return BLK_STS_OK;
200 	}
201 
202 	/* new_br will be the beginning, existing br will be the tail */
203 	new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
204 	if (!new_br)
205 		return BLK_STS_RESOURCE;
206 
207 	new_br->sector = (*br)->sector;
208 	(*br)->sector = bvec_top(bi_iter);
209 	add_before(&bc->ranges, new_br, *br);
210 	*br = new_br;
211 
212 	return BLK_STS_OK;
213 }
214 
215 /*
216  * Sets type of a range. May merge range into surrounding ranges
217  * Since br may be invalidated, always sets br to NULL to prevent
218  * usage after this is called
219  */
set_type(struct bow_context * bc,struct bow_range ** br,int type)220 static void set_type(struct bow_context *bc, struct bow_range **br, int type)
221 {
222 	struct bow_range *prev = container_of(rb_prev(&(*br)->node),
223 						      struct bow_range, node);
224 	struct bow_range *next = container_of(rb_next(&(*br)->node),
225 						      struct bow_range, node);
226 
227 	if ((*br)->type == TRIMMED) {
228 		bc->trims_total -= range_size(*br);
229 		list_del(&(*br)->trimmed_list);
230 	}
231 
232 	if (type == TRIMMED) {
233 		bc->trims_total += range_size(*br);
234 		list_add(&(*br)->trimmed_list, &bc->trimmed_list);
235 	}
236 
237 	(*br)->type = type;
238 
239 	if (next->type == type) {
240 		if (type == TRIMMED)
241 			list_del(&next->trimmed_list);
242 		rb_erase(&next->node, &bc->ranges);
243 		kfree(next);
244 	}
245 
246 	if (prev->type == type) {
247 		if (type == TRIMMED)
248 			list_del(&(*br)->trimmed_list);
249 		rb_erase(&(*br)->node, &bc->ranges);
250 		kfree(*br);
251 	}
252 
253 	*br = NULL;
254 }
255 
find_free_range(struct bow_context * bc)256 static struct bow_range *find_free_range(struct bow_context *bc)
257 {
258 	if (list_empty(&bc->trimmed_list)) {
259 		DMERR("Unable to find free space to back up to");
260 		return NULL;
261 	}
262 
263 	return list_first_entry(&bc->trimmed_list, struct bow_range,
264 				trimmed_list);
265 }
266 
sector_to_page(struct bow_context const * bc,sector_t sector)267 static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
268 {
269 	WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
270 		!= 0);
271 	return sector >> (bc->block_shift - SECTOR_SHIFT);
272 }
273 
copy_data(struct bow_context const * bc,struct bow_range * source,struct bow_range * dest,u32 * checksum)274 static int copy_data(struct bow_context const *bc,
275 		     struct bow_range *source, struct bow_range *dest,
276 		     u32 *checksum)
277 {
278 	int i;
279 
280 	if (range_size(source) != range_size(dest)) {
281 		WARN_ON(1);
282 		return BLK_STS_IOERR;
283 	}
284 
285 	if (checksum)
286 		*checksum = sector_to_page(bc, source->sector);
287 
288 	for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
289 		struct dm_buffer *read_buffer, *write_buffer;
290 		u8 *read, *write;
291 		sector_t page = sector_to_page(bc, source->sector) + i;
292 
293 		read = dm_bufio_read(bc->bufio, page, &read_buffer);
294 		if (IS_ERR(read)) {
295 			DMERR("Cannot read page %llu",
296 			      (unsigned long long)page);
297 			return PTR_ERR(read);
298 		}
299 
300 		if (checksum)
301 			*checksum = crc32(*checksum, read, bc->block_size);
302 
303 		write = dm_bufio_new(bc->bufio,
304 				     sector_to_page(bc, dest->sector) + i,
305 				     &write_buffer);
306 		if (IS_ERR(write)) {
307 			DMERR("Cannot write sector");
308 			dm_bufio_release(read_buffer);
309 			return PTR_ERR(write);
310 		}
311 
312 		memcpy(write, read, bc->block_size);
313 
314 		dm_bufio_mark_buffer_dirty(write_buffer);
315 		dm_bufio_release(write_buffer);
316 		dm_bufio_release(read_buffer);
317 	}
318 
319 	dm_bufio_write_dirty_buffers(bc->bufio);
320 	return BLK_STS_OK;
321 }
322 
323 /****** logging functions ******/
324 
325 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
326 			 unsigned int size, u32 checksum);
327 
backup_log_sector(struct bow_context * bc)328 static int backup_log_sector(struct bow_context *bc)
329 {
330 	struct bow_range *first_br, *free_br;
331 	struct bvec_iter bi_iter;
332 	u32 checksum = 0;
333 	int ret;
334 
335 	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
336 
337 	if (first_br->type != SECTOR0) {
338 		WARN_ON(1);
339 		return BLK_STS_IOERR;
340 	}
341 
342 	if (range_size(first_br) != bc->block_size) {
343 		WARN_ON(1);
344 		return BLK_STS_IOERR;
345 	}
346 
347 	free_br = find_free_range(bc);
348 	/* No space left - return this error to userspace */
349 	if (!free_br)
350 		return BLK_STS_NOSPC;
351 	bi_iter.bi_sector = free_br->sector;
352 	bi_iter.bi_size = bc->block_size;
353 	ret = split_range(bc, &free_br, &bi_iter);
354 	if (ret)
355 		return ret;
356 	if (bi_iter.bi_size != bc->block_size) {
357 		WARN_ON(1);
358 		return BLK_STS_IOERR;
359 	}
360 
361 	ret = copy_data(bc, first_br, free_br, &checksum);
362 	if (ret)
363 		return ret;
364 
365 	bc->log_sector->count = 0;
366 	bc->log_sector->sequence++;
367 	ret = add_log_entry(bc, first_br->sector, free_br->sector,
368 			    range_size(first_br), checksum);
369 	if (ret)
370 		return ret;
371 
372 	set_type(bc, &free_br, BACKUP);
373 	return BLK_STS_OK;
374 }
375 
add_log_entry(struct bow_context * bc,sector_t source,sector_t dest,unsigned int size,u32 checksum)376 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
377 			 unsigned int size, u32 checksum)
378 {
379 	struct dm_buffer *sector_buffer;
380 	u8 *sector;
381 
382 	if (sizeof(struct log_sector)
383 	    + sizeof(struct log_entry) * (bc->log_sector->count + 1)
384 		> bc->block_size) {
385 		int ret = backup_log_sector(bc);
386 
387 		if (ret)
388 			return ret;
389 	}
390 
391 	sector = dm_bufio_new(bc->bufio, 0, &sector_buffer);
392 	if (IS_ERR(sector)) {
393 		DMERR("Cannot write boot sector");
394 		dm_bufio_release(sector_buffer);
395 		return BLK_STS_NOSPC;
396 	}
397 
398 	bc->log_sector->entries[bc->log_sector->count].source = source;
399 	bc->log_sector->entries[bc->log_sector->count].dest = dest;
400 	bc->log_sector->entries[bc->log_sector->count].size = size;
401 	bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
402 	bc->log_sector->count++;
403 
404 	memcpy(sector, bc->log_sector, bc->block_size);
405 	dm_bufio_mark_buffer_dirty(sector_buffer);
406 	dm_bufio_release(sector_buffer);
407 	dm_bufio_write_dirty_buffers(bc->bufio);
408 	return BLK_STS_OK;
409 }
410 
prepare_log(struct bow_context * bc)411 static int prepare_log(struct bow_context *bc)
412 {
413 	struct bow_range *free_br, *first_br;
414 	struct bvec_iter bi_iter;
415 	u32 checksum = 0;
416 	int ret;
417 
418 	/* Carve out first sector as log sector */
419 	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
420 	if (first_br->type != UNCHANGED) {
421 		WARN_ON(1);
422 		return BLK_STS_IOERR;
423 	}
424 
425 	if (range_size(first_br) < bc->block_size) {
426 		WARN_ON(1);
427 		return BLK_STS_IOERR;
428 	}
429 	bi_iter.bi_sector = 0;
430 	bi_iter.bi_size = bc->block_size;
431 	ret = split_range(bc, &first_br, &bi_iter);
432 	if (ret)
433 		return ret;
434 	first_br->type = SECTOR0;
435 	if (range_size(first_br) != bc->block_size) {
436 		WARN_ON(1);
437 		return BLK_STS_IOERR;
438 	}
439 
440 	/* Find free sector for active sector0 reads/writes */
441 	free_br = find_free_range(bc);
442 	if (!free_br)
443 		return BLK_STS_NOSPC;
444 	bi_iter.bi_sector = free_br->sector;
445 	bi_iter.bi_size = bc->block_size;
446 	ret = split_range(bc, &free_br, &bi_iter);
447 	if (ret)
448 		return ret;
449 	free_br->type = SECTOR0_CURRENT;
450 
451 	/* Copy data */
452 	ret = copy_data(bc, first_br, free_br, NULL);
453 	if (ret)
454 		return ret;
455 
456 	bc->log_sector->sector0 = free_br->sector;
457 
458 	/* Find free sector to back up original sector zero */
459 	free_br = find_free_range(bc);
460 	if (!free_br)
461 		return BLK_STS_NOSPC;
462 	bi_iter.bi_sector = free_br->sector;
463 	bi_iter.bi_size = bc->block_size;
464 	ret = split_range(bc, &free_br, &bi_iter);
465 	if (ret)
466 		return ret;
467 
468 	/* Back up */
469 	ret = copy_data(bc, first_br, free_br, &checksum);
470 	if (ret)
471 		return ret;
472 
473 	/*
474 	 * Set up our replacement boot sector - it will get written when we
475 	 * add the first log entry, which we do immediately
476 	 */
477 	bc->log_sector->magic = MAGIC;
478 	bc->log_sector->header_version = HEADER_VERSION;
479 	bc->log_sector->header_size = sizeof(*bc->log_sector);
480 	bc->log_sector->block_size = bc->block_size;
481 	bc->log_sector->count = 0;
482 	bc->log_sector->sequence = 0;
483 
484 	/* Add log entry */
485 	ret = add_log_entry(bc, first_br->sector, free_br->sector,
486 			    range_size(first_br), checksum);
487 	if (ret)
488 		return ret;
489 
490 	set_type(bc, &free_br, BACKUP);
491 	return BLK_STS_OK;
492 }
493 
find_sector0_current(struct bow_context * bc)494 static struct bow_range *find_sector0_current(struct bow_context *bc)
495 {
496 	struct bvec_iter bi_iter;
497 
498 	bi_iter.bi_sector = bc->log_sector->sector0;
499 	bi_iter.bi_size = bc->block_size;
500 	return find_first_overlapping_range(&bc->ranges, &bi_iter);
501 }
502 
503 /****** sysfs interface functions ******/
504 
state_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)505 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
506 			  char *buf)
507 {
508 	struct bow_context *bc = container_of(kobj, struct bow_context,
509 					      kobj_holder.kobj);
510 
511 	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
512 }
513 
state_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)514 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
515 			   const char *buf, size_t count)
516 {
517 	struct bow_context *bc = container_of(kobj, struct bow_context,
518 					      kobj_holder.kobj);
519 	enum state state, original_state;
520 	int ret;
521 
522 	state = buf[0] - '0';
523 	if (state < TRIM || state > COMMITTED) {
524 		DMERR("State value %d out of range", state);
525 		return -EINVAL;
526 	}
527 
528 	mutex_lock(&bc->ranges_lock);
529 	original_state = atomic_read(&bc->state);
530 	if (state != original_state + 1) {
531 		DMERR("Invalid state change from %d to %d",
532 		      original_state, state);
533 		ret = -EINVAL;
534 		goto bad;
535 	}
536 
537 	DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
538 	       : state == COMMITTED ? "Committed" : "Unknown");
539 
540 	if (state == CHECKPOINT) {
541 		ret = prepare_log(bc);
542 		if (ret) {
543 			DMERR("Failed to switch to checkpoint state");
544 			goto bad;
545 		}
546 	} else if (state == COMMITTED) {
547 		struct bow_range *br = find_sector0_current(bc);
548 		struct bow_range *sector0_br =
549 			container_of(rb_first(&bc->ranges), struct bow_range,
550 				     node);
551 
552 		ret = copy_data(bc, br, sector0_br, 0);
553 		if (ret) {
554 			DMERR("Failed to switch to committed state");
555 			goto bad;
556 		}
557 	}
558 	atomic_inc(&bc->state);
559 	ret = count;
560 
561 bad:
562 	mutex_unlock(&bc->ranges_lock);
563 	return ret;
564 }
565 
free_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)566 static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
567 			  char *buf)
568 {
569 	struct bow_context *bc = container_of(kobj, struct bow_context,
570 					      kobj_holder.kobj);
571 	u64 trims_total;
572 
573 	mutex_lock(&bc->ranges_lock);
574 	trims_total = bc->trims_total;
575 	mutex_unlock(&bc->ranges_lock);
576 
577 	return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
578 }
579 
580 static struct kobj_attribute attr_state = __ATTR_RW(state);
581 static struct kobj_attribute attr_free = __ATTR_RO(free);
582 
583 static struct attribute *bow_attrs[] = {
584 	&attr_state.attr,
585 	&attr_free.attr,
586 	NULL
587 };
588 
589 static struct kobj_type bow_ktype = {
590 	.sysfs_ops = &kobj_sysfs_ops,
591 	.default_attrs = bow_attrs,
592 	.release = dm_kobject_release
593 };
594 
595 /****** constructor/destructor ******/
596 
dm_bow_dtr(struct dm_target * ti)597 static void dm_bow_dtr(struct dm_target *ti)
598 {
599 	struct bow_context *bc = (struct bow_context *) ti->private;
600 	struct kobject *kobj;
601 
602 	mutex_lock(&bc->ranges_lock);
603 	while (rb_first(&bc->ranges)) {
604 		struct bow_range *br = container_of(rb_first(&bc->ranges),
605 						    struct bow_range, node);
606 
607 		rb_erase(&br->node, &bc->ranges);
608 		kfree(br);
609 	}
610 	mutex_unlock(&bc->ranges_lock);
611 
612 	if (bc->workqueue)
613 		destroy_workqueue(bc->workqueue);
614 	if (bc->bufio)
615 		dm_bufio_client_destroy(bc->bufio);
616 
617 	kobj = &bc->kobj_holder.kobj;
618 	if (kobj->state_initialized) {
619 		kobject_put(kobj);
620 		wait_for_completion(dm_get_completion_from_kobject(kobj));
621 	}
622 
623 	kfree(bc->log_sector);
624 	kfree(bc);
625 }
626 
dm_bow_io_hints(struct dm_target * ti,struct queue_limits * limits)627 static void dm_bow_io_hints(struct dm_target *ti, struct queue_limits *limits)
628 {
629 	struct bow_context *bc = ti->private;
630 	const unsigned int block_size = bc->block_size;
631 
632 	limits->logical_block_size =
633 		max_t(unsigned short, limits->logical_block_size, block_size);
634 	limits->physical_block_size =
635 		max_t(unsigned int, limits->physical_block_size, block_size);
636 	limits->io_min = max_t(unsigned int, limits->io_min, block_size);
637 
638 	if (limits->max_discard_sectors == 0) {
639 		limits->discard_granularity = 1 << 12;
640 		limits->max_hw_discard_sectors = 1 << 15;
641 		limits->max_discard_sectors = 1 << 15;
642 		bc->forward_trims = false;
643 	} else {
644 		limits->discard_granularity = 1 << 12;
645 		bc->forward_trims = true;
646 	}
647 }
648 
dm_bow_ctr_optional(struct dm_target * ti,unsigned int argc,char ** argv)649 static int dm_bow_ctr_optional(struct dm_target *ti, unsigned int argc,
650 		char **argv)
651 {
652 	struct bow_context *bc = ti->private;
653 	struct dm_arg_set as;
654 	static const struct dm_arg _args[] = {
655 		{0, 1, "Invalid number of feature args"},
656 	};
657 	unsigned int opt_params;
658 	const char *opt_string;
659 	int err;
660 	char dummy;
661 
662 	as.argc = argc;
663 	as.argv = argv;
664 
665 	err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
666 	if (err)
667 		return err;
668 
669 	while (opt_params--) {
670 		opt_string = dm_shift_arg(&as);
671 		if (!opt_string) {
672 			ti->error = "Not enough feature arguments";
673 			return -EINVAL;
674 		}
675 
676 		if (sscanf(opt_string, "block_size:%u%c",
677 					&bc->block_size, &dummy) == 1) {
678 			if (bc->block_size < SECTOR_SIZE ||
679 			    bc->block_size > 4096 ||
680 			    !is_power_of_2(bc->block_size)) {
681 				ti->error = "Invalid block_size";
682 				return -EINVAL;
683 			}
684 		} else {
685 			ti->error = "Invalid feature arguments";
686 			return -EINVAL;
687 		}
688 	}
689 
690 	return 0;
691 }
692 
dm_bow_ctr(struct dm_target * ti,unsigned int argc,char ** argv)693 static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
694 {
695 	struct bow_context *bc;
696 	struct bow_range *br;
697 	int ret;
698 	struct mapped_device *md = dm_table_get_md(ti->table);
699 
700 	if (argc < 1) {
701 		ti->error = "Invalid argument count";
702 		return -EINVAL;
703 	}
704 
705 	bc = kzalloc(sizeof(*bc), GFP_KERNEL);
706 	if (!bc) {
707 		ti->error = "Cannot allocate bow context";
708 		return -ENOMEM;
709 	}
710 
711 	ti->num_flush_bios = 1;
712 	ti->num_discard_bios = 1;
713 	ti->num_write_same_bios = 1;
714 	ti->private = bc;
715 
716 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
717 			    &bc->dev);
718 	if (ret) {
719 		ti->error = "Device lookup failed";
720 		goto bad;
721 	}
722 
723 	bc->block_size = bc->dev->bdev->bd_queue->limits.logical_block_size;
724 	if (argc > 1) {
725 		ret = dm_bow_ctr_optional(ti, argc - 1, &argv[1]);
726 		if (ret)
727 			goto bad;
728 	}
729 
730 	bc->block_shift = ilog2(bc->block_size);
731 	bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
732 	if (!bc->log_sector) {
733 		ti->error = "Cannot allocate log sector";
734 		goto bad;
735 	}
736 
737 	init_completion(&bc->kobj_holder.completion);
738 	ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
739 				   &disk_to_dev(dm_disk(md))->kobj, "%s",
740 				   "bow");
741 	if (ret) {
742 		ti->error = "Cannot create sysfs node";
743 		goto bad;
744 	}
745 
746 	mutex_init(&bc->ranges_lock);
747 	bc->ranges = RB_ROOT;
748 	bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
749 					   NULL, NULL);
750 	if (IS_ERR(bc->bufio)) {
751 		ti->error = "Cannot initialize dm-bufio";
752 		ret = PTR_ERR(bc->bufio);
753 		bc->bufio = NULL;
754 		goto bad;
755 	}
756 
757 	bc->workqueue = alloc_workqueue("dm-bow",
758 					WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
759 					| WQ_UNBOUND, num_online_cpus());
760 	if (!bc->workqueue) {
761 		ti->error = "Cannot allocate workqueue";
762 		ret = -ENOMEM;
763 		goto bad;
764 	}
765 
766 	INIT_LIST_HEAD(&bc->trimmed_list);
767 
768 	br = kzalloc(sizeof(*br), GFP_KERNEL);
769 	if (!br) {
770 		ti->error = "Cannot allocate ranges";
771 		ret = -ENOMEM;
772 		goto bad;
773 	}
774 
775 	br->sector = ti->len;
776 	br->type = TOP;
777 	rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
778 	rb_insert_color(&br->node, &bc->ranges);
779 
780 	br = kzalloc(sizeof(*br), GFP_KERNEL);
781 	if (!br) {
782 		ti->error = "Cannot allocate ranges";
783 		ret = -ENOMEM;
784 		goto bad;
785 	}
786 
787 	br->sector = 0;
788 	br->type = UNCHANGED;
789 	rb_link_node(&br->node, bc->ranges.rb_node,
790 		     &bc->ranges.rb_node->rb_left);
791 	rb_insert_color(&br->node, &bc->ranges);
792 
793 	ti->discards_supported = true;
794 	ti->may_passthrough_inline_crypto = true;
795 
796 	return 0;
797 
798 bad:
799 	dm_bow_dtr(ti);
800 	return ret;
801 }
802 
803 /****** Handle writes ******/
804 
prepare_unchanged_range(struct bow_context * bc,struct bow_range * br,struct bvec_iter * bi_iter,bool record_checksum)805 static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
806 				   struct bvec_iter *bi_iter,
807 				   bool record_checksum)
808 {
809 	struct bow_range *backup_br;
810 	struct bvec_iter backup_bi;
811 	sector_t log_source, log_dest;
812 	unsigned int log_size;
813 	u32 checksum = 0;
814 	int ret;
815 	int original_type;
816 	sector_t sector0;
817 
818 	/* Find a free range */
819 	backup_br = find_free_range(bc);
820 	if (!backup_br)
821 		return BLK_STS_NOSPC;
822 
823 	/* Carve out a backup range. This may be smaller than the br given */
824 	backup_bi.bi_sector = backup_br->sector;
825 	backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
826 	ret = split_range(bc, &backup_br, &backup_bi);
827 	if (ret)
828 		return ret;
829 
830 	/*
831 	 * Carve out a changed range. This will not be smaller than the backup
832 	 * br since the backup br is smaller than the source range and iterator
833 	 */
834 	bi_iter->bi_size = backup_bi.bi_size;
835 	ret = split_range(bc, &br, bi_iter);
836 	if (ret)
837 		return ret;
838 	if (range_size(br) != range_size(backup_br)) {
839 		WARN_ON(1);
840 		return BLK_STS_IOERR;
841 	}
842 
843 
844 	/* Copy data over */
845 	ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
846 	if (ret)
847 		return ret;
848 
849 	/* Add an entry to the log */
850 	log_source = br->sector;
851 	log_dest = backup_br->sector;
852 	log_size = range_size(br);
853 
854 	/*
855 	 * Set the types. Note that since set_type also amalgamates ranges
856 	 * we have to set both sectors to their final type before calling
857 	 * set_type on either
858 	 */
859 	original_type = br->type;
860 	sector0 = backup_br->sector;
861 	bc->trims_total -= range_size(backup_br);
862 	if (backup_br->type == TRIMMED)
863 		list_del(&backup_br->trimmed_list);
864 	backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
865 						      : BACKUP;
866 	br->type = CHANGED;
867 	set_type(bc, &backup_br, backup_br->type);
868 
869 	/*
870 	 * Add the log entry after marking the backup sector, since adding a log
871 	 * can cause another backup
872 	 */
873 	ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
874 	if (ret) {
875 		br->type = original_type;
876 		return ret;
877 	}
878 
879 	/* Now it is safe to mark this backup successful */
880 	if (original_type == SECTOR0_CURRENT)
881 		bc->log_sector->sector0 = sector0;
882 
883 	set_type(bc, &br, br->type);
884 	return ret;
885 }
886 
prepare_free_range(struct bow_context * bc,struct bow_range * br,struct bvec_iter * bi_iter)887 static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
888 			      struct bvec_iter *bi_iter)
889 {
890 	int ret;
891 
892 	ret = split_range(bc, &br, bi_iter);
893 	if (ret)
894 		return ret;
895 	set_type(bc, &br, CHANGED);
896 	return BLK_STS_OK;
897 }
898 
prepare_changed_range(struct bow_context * bc,struct bow_range * br,struct bvec_iter * bi_iter)899 static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
900 				 struct bvec_iter *bi_iter)
901 {
902 	/* Nothing to do ... */
903 	return BLK_STS_OK;
904 }
905 
prepare_one_range(struct bow_context * bc,struct bvec_iter * bi_iter)906 static int prepare_one_range(struct bow_context *bc,
907 			     struct bvec_iter *bi_iter)
908 {
909 	struct bow_range *br = find_first_overlapping_range(&bc->ranges,
910 							    bi_iter);
911 	switch (br->type) {
912 	case CHANGED:
913 		return prepare_changed_range(bc, br, bi_iter);
914 
915 	case TRIMMED:
916 		return prepare_free_range(bc, br, bi_iter);
917 
918 	case UNCHANGED:
919 	case BACKUP:
920 		return prepare_unchanged_range(bc, br, bi_iter, true);
921 
922 	/*
923 	 * We cannot track the checksum for the active sector0, since it
924 	 * may change at any point.
925 	 */
926 	case SECTOR0_CURRENT:
927 		return prepare_unchanged_range(bc, br, bi_iter, false);
928 
929 	case SECTOR0:	/* Handled in the dm_bow_map */
930 	case TOP:	/* Illegal - top is off the end of the device */
931 	default:
932 		WARN_ON(1);
933 		return BLK_STS_IOERR;
934 	}
935 }
936 
937 struct write_work {
938 	struct work_struct work;
939 	struct bow_context *bc;
940 	struct bio *bio;
941 };
942 
bow_write(struct work_struct * work)943 static void bow_write(struct work_struct *work)
944 {
945 	struct write_work *ww = container_of(work, struct write_work, work);
946 	struct bow_context *bc = ww->bc;
947 	struct bio *bio = ww->bio;
948 	struct bvec_iter bi_iter = bio->bi_iter;
949 	int ret = BLK_STS_OK;
950 
951 	kfree(ww);
952 
953 	mutex_lock(&bc->ranges_lock);
954 	do {
955 		ret = prepare_one_range(bc, &bi_iter);
956 		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
957 		bi_iter.bi_size = bio->bi_iter.bi_size
958 			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
959 			  * SECTOR_SIZE;
960 	} while (!ret && bi_iter.bi_size);
961 
962 	mutex_unlock(&bc->ranges_lock);
963 
964 	if (!ret) {
965 		bio_set_dev(bio, bc->dev->bdev);
966 		submit_bio(bio);
967 	} else {
968 		DMERR("Write failure with error %d", -ret);
969 		bio->bi_status = ret;
970 		bio_endio(bio);
971 	}
972 }
973 
queue_write(struct bow_context * bc,struct bio * bio)974 static int queue_write(struct bow_context *bc, struct bio *bio)
975 {
976 	struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
977 					| __GFP_NOMEMALLOC | __GFP_NOWARN);
978 	if (!ww) {
979 		DMERR("Failed to allocate write_work");
980 		return -ENOMEM;
981 	}
982 
983 	INIT_WORK(&ww->work, bow_write);
984 	ww->bc = bc;
985 	ww->bio = bio;
986 	queue_work(bc->workqueue, &ww->work);
987 	return DM_MAPIO_SUBMITTED;
988 }
989 
handle_sector0(struct bow_context * bc,struct bio * bio)990 static int handle_sector0(struct bow_context *bc, struct bio *bio)
991 {
992 	int ret = DM_MAPIO_REMAPPED;
993 
994 	if (bio->bi_iter.bi_size > bc->block_size) {
995 		struct bio * split = bio_split(bio,
996 					       bc->block_size >> SECTOR_SHIFT,
997 					       GFP_NOIO,
998 					       &fs_bio_set);
999 		if (!split) {
1000 			DMERR("Failed to split bio");
1001 			bio->bi_status = BLK_STS_RESOURCE;
1002 			bio_endio(bio);
1003 			return DM_MAPIO_SUBMITTED;
1004 		}
1005 
1006 		bio_chain(split, bio);
1007 		split->bi_iter.bi_sector = bc->log_sector->sector0;
1008 		bio_set_dev(split, bc->dev->bdev);
1009 		submit_bio(split);
1010 
1011 		if (bio_data_dir(bio) == WRITE)
1012 			ret = queue_write(bc, bio);
1013 	} else {
1014 		bio->bi_iter.bi_sector = bc->log_sector->sector0;
1015 	}
1016 
1017 	return ret;
1018 }
1019 
add_trim(struct bow_context * bc,struct bio * bio)1020 static int add_trim(struct bow_context *bc, struct bio *bio)
1021 {
1022 	struct bow_range *br;
1023 	struct bvec_iter bi_iter = bio->bi_iter;
1024 
1025 	DMDEBUG("add_trim: %llu, %u",
1026 		(unsigned long long)bio->bi_iter.bi_sector,
1027 		bio->bi_iter.bi_size);
1028 
1029 	do {
1030 		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
1031 
1032 		switch (br->type) {
1033 		case UNCHANGED:
1034 			if (!split_range(bc, &br, &bi_iter))
1035 				set_type(bc, &br, TRIMMED);
1036 			break;
1037 
1038 		case TRIMMED:
1039 			/* Nothing to do */
1040 			break;
1041 
1042 		default:
1043 			/* No other case is legal in TRIM state */
1044 			WARN_ON(true);
1045 			break;
1046 		}
1047 
1048 		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
1049 		bi_iter.bi_size = bio->bi_iter.bi_size
1050 			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
1051 			  * SECTOR_SIZE;
1052 
1053 	} while (bi_iter.bi_size);
1054 
1055 	bio_endio(bio);
1056 	return DM_MAPIO_SUBMITTED;
1057 }
1058 
remove_trim(struct bow_context * bc,struct bio * bio)1059 static int remove_trim(struct bow_context *bc, struct bio *bio)
1060 {
1061 	struct bow_range *br;
1062 	struct bvec_iter bi_iter = bio->bi_iter;
1063 
1064 	DMDEBUG("remove_trim: %llu, %u",
1065 		(unsigned long long)bio->bi_iter.bi_sector,
1066 		bio->bi_iter.bi_size);
1067 
1068 	do {
1069 		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
1070 
1071 		switch (br->type) {
1072 		case UNCHANGED:
1073 			/* Nothing to do */
1074 			break;
1075 
1076 		case TRIMMED:
1077 			if (!split_range(bc, &br, &bi_iter))
1078 				set_type(bc, &br, UNCHANGED);
1079 			break;
1080 
1081 		default:
1082 			/* No other case is legal in TRIM state */
1083 			WARN_ON(true);
1084 			break;
1085 		}
1086 
1087 		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
1088 		bi_iter.bi_size = bio->bi_iter.bi_size
1089 			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
1090 			  * SECTOR_SIZE;
1091 
1092 	} while (bi_iter.bi_size);
1093 
1094 	return DM_MAPIO_REMAPPED;
1095 }
1096 
remap_unless_illegal_trim(struct bow_context * bc,struct bio * bio)1097 int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
1098 {
1099 	if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
1100 		bio->bi_status = BLK_STS_NOTSUPP;
1101 		bio_endio(bio);
1102 		return DM_MAPIO_SUBMITTED;
1103 	} else {
1104 		bio_set_dev(bio, bc->dev->bdev);
1105 		return DM_MAPIO_REMAPPED;
1106 	}
1107 }
1108 
1109 /****** dm interface ******/
1110 
dm_bow_map(struct dm_target * ti,struct bio * bio)1111 static int dm_bow_map(struct dm_target *ti, struct bio *bio)
1112 {
1113 	int ret = DM_MAPIO_REMAPPED;
1114 	struct bow_context *bc = ti->private;
1115 
1116 	if (likely(bc->state.counter == COMMITTED))
1117 		return remap_unless_illegal_trim(bc, bio);
1118 
1119 	if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
1120 		return remap_unless_illegal_trim(bc, bio);
1121 
1122 	if (atomic_read(&bc->state) != COMMITTED) {
1123 		enum state state;
1124 
1125 		mutex_lock(&bc->ranges_lock);
1126 		state = atomic_read(&bc->state);
1127 		if (state == TRIM) {
1128 			if (bio_op(bio) == REQ_OP_DISCARD)
1129 				ret = add_trim(bc, bio);
1130 			else if (bio_data_dir(bio) == WRITE)
1131 				ret = remove_trim(bc, bio);
1132 			else
1133 				/* pass-through */;
1134 		} else if (state == CHECKPOINT) {
1135 			if (bio->bi_iter.bi_sector == 0)
1136 				ret = handle_sector0(bc, bio);
1137 			else if (bio_data_dir(bio) == WRITE)
1138 				ret = queue_write(bc, bio);
1139 			else
1140 				/* pass-through */;
1141 		} else {
1142 			/* pass-through */
1143 		}
1144 		mutex_unlock(&bc->ranges_lock);
1145 	}
1146 
1147 	if (ret == DM_MAPIO_REMAPPED)
1148 		return remap_unless_illegal_trim(bc, bio);
1149 
1150 	return ret;
1151 }
1152 
dm_bow_tablestatus(struct dm_target * ti,char * result,unsigned int maxlen)1153 static void dm_bow_tablestatus(struct dm_target *ti, char *result,
1154 			       unsigned int maxlen)
1155 {
1156 	char *end = result + maxlen;
1157 	struct bow_context *bc = ti->private;
1158 	struct rb_node *i;
1159 	int trimmed_list_length = 0;
1160 	int trimmed_range_count = 0;
1161 	struct bow_range *br;
1162 
1163 	if (maxlen == 0)
1164 		return;
1165 	result[0] = 0;
1166 
1167 	list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
1168 		if (br->type == TRIMMED) {
1169 			++trimmed_list_length;
1170 		} else {
1171 			scnprintf(result, end - result,
1172 				  "ERROR: non-trimmed entry in trimmed_list");
1173 			return;
1174 		}
1175 
1176 	if (!rb_first(&bc->ranges)) {
1177 		scnprintf(result, end - result, "ERROR: Empty ranges");
1178 		return;
1179 	}
1180 
1181 	if (container_of(rb_first(&bc->ranges), struct bow_range, node)
1182 	    ->sector) {
1183 		scnprintf(result, end - result,
1184 			 "ERROR: First range does not start at sector 0");
1185 		return;
1186 	}
1187 
1188 	mutex_lock(&bc->ranges_lock);
1189 	for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
1190 		struct bow_range *br = container_of(i, struct bow_range, node);
1191 
1192 		result += scnprintf(result, end - result, "%s: %llu",
1193 				    readable_type[br->type],
1194 				    (unsigned long long)br->sector);
1195 		if (result >= end)
1196 			goto unlock;
1197 
1198 		result += scnprintf(result, end - result, "\n");
1199 		if (result >= end)
1200 			goto unlock;
1201 
1202 		if (br->type == TRIMMED)
1203 			++trimmed_range_count;
1204 
1205 		if (br->type == TOP) {
1206 			if (br->sector != ti->len) {
1207 				scnprintf(result, end - result,
1208 					 "\nERROR: Top sector is incorrect");
1209 			}
1210 
1211 			if (&br->node != rb_last(&bc->ranges)) {
1212 				scnprintf(result, end - result,
1213 					  "\nERROR: Top sector is not last");
1214 			}
1215 
1216 			break;
1217 		}
1218 
1219 		if (!rb_next(i)) {
1220 			scnprintf(result, end - result,
1221 				  "\nERROR: Last range not of type TOP");
1222 			goto unlock;
1223 		}
1224 
1225 		if (br->sector > range_top(br)) {
1226 			scnprintf(result, end - result,
1227 				  "\nERROR: sectors out of order");
1228 			goto unlock;
1229 		}
1230 	}
1231 
1232 	if (trimmed_range_count != trimmed_list_length)
1233 		scnprintf(result, end - result,
1234 			  "\nERROR: not all trimmed ranges in trimmed list");
1235 
1236 unlock:
1237 	mutex_unlock(&bc->ranges_lock);
1238 }
1239 
dm_bow_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)1240 static void dm_bow_status(struct dm_target *ti, status_type_t type,
1241 			  unsigned int status_flags, char *result,
1242 			  unsigned int maxlen)
1243 {
1244 	switch (type) {
1245 	case STATUSTYPE_INFO:
1246 		if (maxlen)
1247 			result[0] = 0;
1248 		break;
1249 
1250 	case STATUSTYPE_TABLE:
1251 		dm_bow_tablestatus(ti, result, maxlen);
1252 		break;
1253 	}
1254 }
1255 
dm_bow_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)1256 int dm_bow_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
1257 {
1258 	struct bow_context *bc = ti->private;
1259 	struct dm_dev *dev = bc->dev;
1260 
1261 	*bdev = dev->bdev;
1262 	/* Only pass ioctls through if the device sizes match exactly. */
1263 	return ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1264 }
1265 
dm_bow_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)1266 static int dm_bow_iterate_devices(struct dm_target *ti,
1267 				  iterate_devices_callout_fn fn, void *data)
1268 {
1269 	struct bow_context *bc = ti->private;
1270 
1271 	return fn(ti, bc->dev, 0, ti->len, data);
1272 }
1273 
1274 static struct target_type bow_target = {
1275 	.name   = "bow",
1276 	.version = {1, 2, 0},
1277 	.module = THIS_MODULE,
1278 	.ctr    = dm_bow_ctr,
1279 	.dtr    = dm_bow_dtr,
1280 	.map    = dm_bow_map,
1281 	.status = dm_bow_status,
1282 	.prepare_ioctl  = dm_bow_prepare_ioctl,
1283 	.iterate_devices = dm_bow_iterate_devices,
1284 	.io_hints = dm_bow_io_hints,
1285 };
1286 
dm_bow_init(void)1287 int __init dm_bow_init(void)
1288 {
1289 	int r = dm_register_target(&bow_target);
1290 
1291 	if (r < 0)
1292 		DMERR("registering bow failed %d", r);
1293 	return r;
1294 }
1295 
dm_bow_exit(void)1296 void dm_bow_exit(void)
1297 {
1298 	dm_unregister_target(&bow_target);
1299 }
1300 
1301 MODULE_LICENSE("GPL");
1302 
1303 module_init(dm_bow_init);
1304 module_exit(dm_bow_exit);
1305