• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 Google Limited.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include "dm-core.h"
9 
10 #include <linux/crc32.h>
11 #include <linux/dm-bufio.h>
12 #include <linux/module.h>
13 
14 #define DM_MSG_PREFIX "bow"
15 
16 struct log_entry {
17 	u64 source;
18 	u64 dest;
19 	u32 size;
20 	u32 checksum;
21 } __packed;
22 
23 struct log_sector {
24 	u32 magic;
25 	u16 header_version;
26 	u16 header_size;
27 	u32 block_size;
28 	u32 count;
29 	u32 sequence;
30 	sector_t sector0;
31 	struct log_entry entries[];
32 } __packed;
33 
34 /*
35  * MAGIC is BOW in ascii
36  */
37 #define MAGIC 0x00574f42
38 #define HEADER_VERSION 0x0100
39 
40 /*
41  * A sorted set of ranges representing the state of the data on the device.
42  * Use an rb_tree for fast lookup of a given sector
43  * Consecutive ranges are always of different type - operations on this
44  * set must merge matching consecutive ranges.
45  *
46  * Top range is always of type TOP
47  */
48 struct bow_range {
49 	struct rb_node		node;
50 	sector_t		sector;
51 	enum {
52 		INVALID,	/* Type not set */
53 		SECTOR0,	/* First sector - holds log record */
54 		SECTOR0_CURRENT,/* Live contents of sector0 */
55 		UNCHANGED,	/* Original contents */
56 		TRIMMED,	/* Range has been trimmed */
57 		CHANGED,	/* Range has been changed */
58 		BACKUP,		/* Range is being used as a backup */
59 		TOP,		/* Final range - sector is size of device */
60 	} type;
61 	struct list_head	trimmed_list; /* list of TRIMMED ranges */
62 };
63 
64 static const char * const readable_type[] = {
65 	"Invalid",
66 	"Sector0",
67 	"Sector0_current",
68 	"Unchanged",
69 	"Free",
70 	"Changed",
71 	"Backup",
72 	"Top",
73 };
74 
75 enum state {
76 	TRIM,
77 	CHECKPOINT,
78 	COMMITTED,
79 };
80 
81 struct bow_context {
82 	struct dm_dev *dev;
83 	u32 block_size;
84 	u32 block_shift;
85 	struct workqueue_struct *workqueue;
86 	struct dm_bufio_client *bufio;
87 	struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
88 	struct rb_root ranges;
89 	struct dm_kobject_holder kobj_holder;	/* for sysfs attributes */
90 	atomic_t state; /* One of the enum state values above */
91 	u64 trims_total;
92 	struct log_sector *log_sector;
93 	struct list_head trimmed_list;
94 	bool forward_trims;
95 };
96 
range_top(struct bow_range * br)97 sector_t range_top(struct bow_range *br)
98 {
99 	return container_of(rb_next(&br->node), struct bow_range, node)
100 		->sector;
101 }
102 
range_size(struct bow_range * br)103 u64 range_size(struct bow_range *br)
104 {
105 	return (range_top(br) - br->sector) * SECTOR_SIZE;
106 }
107 
bvec_top(struct bvec_iter * bi_iter)108 static sector_t bvec_top(struct bvec_iter *bi_iter)
109 {
110 	return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
111 }
112 
113 /*
114  * Find the first range that overlaps with bi_iter
115  * bi_iter is set to the size of the overlapping sub-range
116  */
find_first_overlapping_range(struct rb_root * ranges,struct bvec_iter * bi_iter)117 static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
118 						      struct bvec_iter *bi_iter)
119 {
120 	struct rb_node *node = ranges->rb_node;
121 	struct bow_range *br;
122 
123 	while (node) {
124 		br = container_of(node, struct bow_range, node);
125 
126 		if (br->sector <= bi_iter->bi_sector
127 		    && bi_iter->bi_sector < range_top(br))
128 			break;
129 
130 		if (bi_iter->bi_sector < br->sector)
131 			node = node->rb_left;
132 		else
133 			node = node->rb_right;
134 	}
135 
136 	WARN_ON(!node);
137 	if (!node)
138 		return NULL;
139 
140 	if (range_top(br) - bi_iter->bi_sector
141 	    < bi_iter->bi_size >> SECTOR_SHIFT)
142 		bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
143 			<< SECTOR_SHIFT;
144 
145 	return br;
146 }
147 
add_before(struct rb_root * ranges,struct bow_range * new_br,struct bow_range * existing)148 void add_before(struct rb_root *ranges, struct bow_range *new_br,
149 		struct bow_range *existing)
150 {
151 	struct rb_node *parent = &(existing->node);
152 	struct rb_node **link = &(parent->rb_left);
153 
154 	while (*link) {
155 		parent = *link;
156 		link = &((*link)->rb_right);
157 	}
158 
159 	rb_link_node(&new_br->node, parent, link);
160 	rb_insert_color(&new_br->node, ranges);
161 }
162 
163 /*
164  * Given a range br returned by find_first_overlapping_range, split br into a
165  * leading range, a range matching the bi_iter and a trailing range.
166  * Leading and trailing may end up size 0 and will then be deleted. The
167  * new range matching the bi_iter is then returned and should have its type
168  * and type specific fields populated.
169  * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
170  */
split_range(struct bow_context * bc,struct bow_range ** br,struct bvec_iter * bi_iter)171 static int split_range(struct bow_context *bc, struct bow_range **br,
172 		       struct bvec_iter *bi_iter)
173 {
174 	struct bow_range *new_br;
175 
176 	if (bi_iter->bi_sector < (*br)->sector) {
177 		WARN_ON(true);
178 		return BLK_STS_IOERR;
179 	}
180 
181 	if (bi_iter->bi_sector > (*br)->sector) {
182 		struct bow_range *leading_br =
183 			kzalloc(sizeof(*leading_br), GFP_KERNEL);
184 
185 		if (!leading_br)
186 			return BLK_STS_RESOURCE;
187 
188 		*leading_br = **br;
189 		if (leading_br->type == TRIMMED)
190 			list_add(&leading_br->trimmed_list, &bc->trimmed_list);
191 
192 		add_before(&bc->ranges, leading_br, *br);
193 		(*br)->sector = bi_iter->bi_sector;
194 	}
195 
196 	if (bvec_top(bi_iter) >= range_top(*br)) {
197 		bi_iter->bi_size = (range_top(*br) - (*br)->sector)
198 					* SECTOR_SIZE;
199 		return BLK_STS_OK;
200 	}
201 
202 	/* new_br will be the beginning, existing br will be the tail */
203 	new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
204 	if (!new_br)
205 		return BLK_STS_RESOURCE;
206 
207 	new_br->sector = (*br)->sector;
208 	(*br)->sector = bvec_top(bi_iter);
209 	add_before(&bc->ranges, new_br, *br);
210 	*br = new_br;
211 
212 	return BLK_STS_OK;
213 }
214 
215 /*
216  * Sets type of a range. May merge range into surrounding ranges
217  * Since br may be invalidated, always sets br to NULL to prevent
218  * usage after this is called
219  */
set_type(struct bow_context * bc,struct bow_range ** br,int type)220 static void set_type(struct bow_context *bc, struct bow_range **br, int type)
221 {
222 	struct bow_range *prev = container_of(rb_prev(&(*br)->node),
223 						      struct bow_range, node);
224 	struct bow_range *next = container_of(rb_next(&(*br)->node),
225 						      struct bow_range, node);
226 
227 	if ((*br)->type == TRIMMED) {
228 		bc->trims_total -= range_size(*br);
229 		list_del(&(*br)->trimmed_list);
230 	}
231 
232 	if (type == TRIMMED) {
233 		bc->trims_total += range_size(*br);
234 		list_add(&(*br)->trimmed_list, &bc->trimmed_list);
235 	}
236 
237 	(*br)->type = type;
238 
239 	if (next->type == type) {
240 		if (type == TRIMMED)
241 			list_del(&next->trimmed_list);
242 		rb_erase(&next->node, &bc->ranges);
243 		kfree(next);
244 	}
245 
246 	if (prev->type == type) {
247 		if (type == TRIMMED)
248 			list_del(&(*br)->trimmed_list);
249 		rb_erase(&(*br)->node, &bc->ranges);
250 		kfree(*br);
251 	}
252 
253 	*br = NULL;
254 }
255 
find_free_range(struct bow_context * bc)256 static struct bow_range *find_free_range(struct bow_context *bc)
257 {
258 	if (list_empty(&bc->trimmed_list)) {
259 		DMERR("Unable to find free space to back up to");
260 		return NULL;
261 	}
262 
263 	return list_first_entry(&bc->trimmed_list, struct bow_range,
264 				trimmed_list);
265 }
266 
sector_to_page(struct bow_context const * bc,sector_t sector)267 static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
268 {
269 	WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
270 		!= 0);
271 	return sector >> (bc->block_shift - SECTOR_SHIFT);
272 }
273 
copy_data(struct bow_context const * bc,struct bow_range * source,struct bow_range * dest,u32 * checksum)274 static int copy_data(struct bow_context const *bc,
275 		     struct bow_range *source, struct bow_range *dest,
276 		     u32 *checksum)
277 {
278 	int i;
279 
280 	if (range_size(source) != range_size(dest)) {
281 		WARN_ON(1);
282 		return BLK_STS_IOERR;
283 	}
284 
285 	if (checksum)
286 		*checksum = sector_to_page(bc, source->sector);
287 
288 	for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
289 		struct dm_buffer *read_buffer, *write_buffer;
290 		u8 *read, *write;
291 		sector_t page = sector_to_page(bc, source->sector) + i;
292 
293 		read = dm_bufio_read(bc->bufio, page, &read_buffer);
294 		if (IS_ERR(read)) {
295 			DMERR("Cannot read page %llu",
296 			      (unsigned long long)page);
297 			return PTR_ERR(read);
298 		}
299 
300 		if (checksum)
301 			*checksum = crc32(*checksum, read, bc->block_size);
302 
303 		write = dm_bufio_new(bc->bufio,
304 				     sector_to_page(bc, dest->sector) + i,
305 				     &write_buffer);
306 		if (IS_ERR(write)) {
307 			DMERR("Cannot write sector");
308 			dm_bufio_release(read_buffer);
309 			return PTR_ERR(write);
310 		}
311 
312 		memcpy(write, read, bc->block_size);
313 
314 		dm_bufio_mark_buffer_dirty(write_buffer);
315 		dm_bufio_release(write_buffer);
316 		dm_bufio_release(read_buffer);
317 	}
318 
319 	dm_bufio_write_dirty_buffers(bc->bufio);
320 	return BLK_STS_OK;
321 }
322 
323 /****** logging functions ******/
324 
325 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
326 			 unsigned int size, u32 checksum);
327 
backup_log_sector(struct bow_context * bc)328 static int backup_log_sector(struct bow_context *bc)
329 {
330 	struct bow_range *first_br, *free_br;
331 	struct bvec_iter bi_iter;
332 	u32 checksum = 0;
333 	int ret;
334 
335 	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
336 
337 	if (first_br->type != SECTOR0) {
338 		WARN_ON(1);
339 		return BLK_STS_IOERR;
340 	}
341 
342 	if (range_size(first_br) != bc->block_size) {
343 		WARN_ON(1);
344 		return BLK_STS_IOERR;
345 	}
346 
347 	free_br = find_free_range(bc);
348 	/* No space left - return this error to userspace */
349 	if (!free_br)
350 		return BLK_STS_NOSPC;
351 	bi_iter.bi_sector = free_br->sector;
352 	bi_iter.bi_size = bc->block_size;
353 	ret = split_range(bc, &free_br, &bi_iter);
354 	if (ret)
355 		return ret;
356 	if (bi_iter.bi_size != bc->block_size) {
357 		WARN_ON(1);
358 		return BLK_STS_IOERR;
359 	}
360 
361 	ret = copy_data(bc, first_br, free_br, &checksum);
362 	if (ret)
363 		return ret;
364 
365 	bc->log_sector->count = 0;
366 	bc->log_sector->sequence++;
367 	ret = add_log_entry(bc, first_br->sector, free_br->sector,
368 			    range_size(first_br), checksum);
369 	if (ret)
370 		return ret;
371 
372 	set_type(bc, &free_br, BACKUP);
373 	return BLK_STS_OK;
374 }
375 
add_log_entry(struct bow_context * bc,sector_t source,sector_t dest,unsigned int size,u32 checksum)376 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
377 			 unsigned int size, u32 checksum)
378 {
379 	struct dm_buffer *sector_buffer;
380 	u8 *sector;
381 
382 	if (sizeof(struct log_sector)
383 	    + sizeof(struct log_entry) * (bc->log_sector->count + 1)
384 		> bc->block_size) {
385 		int ret = backup_log_sector(bc);
386 
387 		if (ret)
388 			return ret;
389 	}
390 
391 	sector = dm_bufio_new(bc->bufio, 0, &sector_buffer);
392 	if (IS_ERR(sector)) {
393 		DMERR("Cannot write boot sector");
394 		dm_bufio_release(sector_buffer);
395 		return BLK_STS_NOSPC;
396 	}
397 
398 	bc->log_sector->entries[bc->log_sector->count].source = source;
399 	bc->log_sector->entries[bc->log_sector->count].dest = dest;
400 	bc->log_sector->entries[bc->log_sector->count].size = size;
401 	bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
402 	bc->log_sector->count++;
403 
404 	memcpy(sector, bc->log_sector, bc->block_size);
405 	dm_bufio_mark_buffer_dirty(sector_buffer);
406 	dm_bufio_release(sector_buffer);
407 	dm_bufio_write_dirty_buffers(bc->bufio);
408 	return BLK_STS_OK;
409 }
410 
prepare_log(struct bow_context * bc)411 static int prepare_log(struct bow_context *bc)
412 {
413 	struct bow_range *free_br, *first_br;
414 	struct bvec_iter bi_iter;
415 	u32 checksum = 0;
416 	int ret;
417 
418 	/* Carve out first sector as log sector */
419 	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
420 	if (first_br->type != UNCHANGED) {
421 		WARN_ON(1);
422 		return BLK_STS_IOERR;
423 	}
424 
425 	if (range_size(first_br) < bc->block_size) {
426 		WARN_ON(1);
427 		return BLK_STS_IOERR;
428 	}
429 	bi_iter.bi_sector = 0;
430 	bi_iter.bi_size = bc->block_size;
431 	ret = split_range(bc, &first_br, &bi_iter);
432 	if (ret)
433 		return ret;
434 	first_br->type = SECTOR0;
435 	if (range_size(first_br) != bc->block_size) {
436 		WARN_ON(1);
437 		return BLK_STS_IOERR;
438 	}
439 
440 	/* Find free sector for active sector0 reads/writes */
441 	free_br = find_free_range(bc);
442 	if (!free_br)
443 		return BLK_STS_NOSPC;
444 	bi_iter.bi_sector = free_br->sector;
445 	bi_iter.bi_size = bc->block_size;
446 	ret = split_range(bc, &free_br, &bi_iter);
447 	if (ret)
448 		return ret;
449 
450 	/* Copy data */
451 	ret = copy_data(bc, first_br, free_br, NULL);
452 	if (ret)
453 		return ret;
454 
455 	bc->log_sector->sector0 = free_br->sector;
456 
457 	set_type(bc, &free_br, SECTOR0_CURRENT);
458 
459 	/* Find free sector to back up original sector zero */
460 	free_br = find_free_range(bc);
461 	if (!free_br)
462 		return BLK_STS_NOSPC;
463 	bi_iter.bi_sector = free_br->sector;
464 	bi_iter.bi_size = bc->block_size;
465 	ret = split_range(bc, &free_br, &bi_iter);
466 	if (ret)
467 		return ret;
468 
469 	/* Back up */
470 	ret = copy_data(bc, first_br, free_br, &checksum);
471 	if (ret)
472 		return ret;
473 
474 	/*
475 	 * Set up our replacement boot sector - it will get written when we
476 	 * add the first log entry, which we do immediately
477 	 */
478 	bc->log_sector->magic = MAGIC;
479 	bc->log_sector->header_version = HEADER_VERSION;
480 	bc->log_sector->header_size = sizeof(*bc->log_sector);
481 	bc->log_sector->block_size = bc->block_size;
482 	bc->log_sector->count = 0;
483 	bc->log_sector->sequence = 0;
484 
485 	/* Add log entry */
486 	ret = add_log_entry(bc, first_br->sector, free_br->sector,
487 			    range_size(first_br), checksum);
488 	if (ret)
489 		return ret;
490 
491 	set_type(bc, &free_br, BACKUP);
492 	return BLK_STS_OK;
493 }
494 
find_sector0_current(struct bow_context * bc)495 static struct bow_range *find_sector0_current(struct bow_context *bc)
496 {
497 	struct bvec_iter bi_iter;
498 
499 	bi_iter.bi_sector = bc->log_sector->sector0;
500 	bi_iter.bi_size = bc->block_size;
501 	return find_first_overlapping_range(&bc->ranges, &bi_iter);
502 }
503 
504 /****** sysfs interface functions ******/
505 
state_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)506 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
507 			  char *buf)
508 {
509 	struct bow_context *bc = container_of(kobj, struct bow_context,
510 					      kobj_holder.kobj);
511 
512 	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
513 }
514 
state_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)515 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
516 			   const char *buf, size_t count)
517 {
518 	struct bow_context *bc = container_of(kobj, struct bow_context,
519 					      kobj_holder.kobj);
520 	enum state state, original_state;
521 	int ret;
522 
523 	state = buf[0] - '0';
524 	if (state < TRIM || state > COMMITTED) {
525 		DMERR("State value %d out of range", state);
526 		return -EINVAL;
527 	}
528 
529 	mutex_lock(&bc->ranges_lock);
530 	original_state = atomic_read(&bc->state);
531 	if (state != original_state + 1) {
532 		DMERR("Invalid state change from %d to %d",
533 		      original_state, state);
534 		ret = -EINVAL;
535 		goto bad;
536 	}
537 
538 	DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
539 	       : state == COMMITTED ? "Committed" : "Unknown");
540 
541 	if (state == CHECKPOINT) {
542 		ret = prepare_log(bc);
543 		if (ret) {
544 			DMERR("Failed to switch to checkpoint state");
545 			goto bad;
546 		}
547 	} else if (state == COMMITTED) {
548 		struct bow_range *br = find_sector0_current(bc);
549 		struct bow_range *sector0_br =
550 			container_of(rb_first(&bc->ranges), struct bow_range,
551 				     node);
552 
553 		ret = copy_data(bc, br, sector0_br, 0);
554 		if (ret) {
555 			DMERR("Failed to switch to committed state");
556 			goto bad;
557 		}
558 	}
559 	atomic_inc(&bc->state);
560 	ret = count;
561 
562 bad:
563 	mutex_unlock(&bc->ranges_lock);
564 	return ret;
565 }
566 
free_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)567 static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
568 			  char *buf)
569 {
570 	struct bow_context *bc = container_of(kobj, struct bow_context,
571 					      kobj_holder.kobj);
572 	u64 trims_total;
573 
574 	mutex_lock(&bc->ranges_lock);
575 	trims_total = bc->trims_total;
576 	mutex_unlock(&bc->ranges_lock);
577 
578 	return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
579 }
580 
581 static struct kobj_attribute attr_state = __ATTR_RW(state);
582 static struct kobj_attribute attr_free = __ATTR_RO(free);
583 
584 static struct attribute *bow_attrs[] = {
585 	&attr_state.attr,
586 	&attr_free.attr,
587 	NULL
588 };
589 
590 static struct kobj_type bow_ktype = {
591 	.sysfs_ops = &kobj_sysfs_ops,
592 	.default_attrs = bow_attrs,
593 	.release = dm_kobject_release
594 };
595 
596 /****** constructor/destructor ******/
597 
dm_bow_dtr(struct dm_target * ti)598 static void dm_bow_dtr(struct dm_target *ti)
599 {
600 	struct bow_context *bc = (struct bow_context *) ti->private;
601 	struct kobject *kobj;
602 
603 	if (bc->workqueue)
604 		destroy_workqueue(bc->workqueue);
605 	if (bc->bufio)
606 		dm_bufio_client_destroy(bc->bufio);
607 
608 	kobj = &bc->kobj_holder.kobj;
609 	if (kobj->state_initialized) {
610 		kobject_put(kobj);
611 		wait_for_completion(dm_get_completion_from_kobject(kobj));
612 	}
613 
614 	mutex_lock(&bc->ranges_lock);
615 	while (rb_first(&bc->ranges)) {
616 		struct bow_range *br = container_of(rb_first(&bc->ranges),
617 					      struct bow_range, node);
618 
619 		rb_erase(&br->node, &bc->ranges);
620 		kfree(br);
621 	}
622 	mutex_unlock(&bc->ranges_lock);
623 
624 	mutex_destroy(&bc->ranges_lock);
625 	kfree(bc->log_sector);
626 	kfree(bc);
627 }
628 
dm_bow_io_hints(struct dm_target * ti,struct queue_limits * limits)629 static void dm_bow_io_hints(struct dm_target *ti, struct queue_limits *limits)
630 {
631 	struct bow_context *bc = ti->private;
632 	const unsigned int block_size = bc->block_size;
633 
634 	limits->logical_block_size =
635 		max_t(unsigned int, limits->logical_block_size, block_size);
636 	limits->physical_block_size =
637 		max_t(unsigned int, limits->physical_block_size, block_size);
638 	limits->io_min = max_t(unsigned int, limits->io_min, block_size);
639 
640 	if (limits->max_discard_sectors == 0) {
641 		limits->discard_granularity = 1 << 12;
642 		limits->max_hw_discard_sectors = 1 << 15;
643 		limits->max_discard_sectors = 1 << 15;
644 		bc->forward_trims = false;
645 	} else {
646 		limits->discard_granularity = 1 << 12;
647 		bc->forward_trims = true;
648 	}
649 }
650 
dm_bow_ctr_optional(struct dm_target * ti,unsigned int argc,char ** argv)651 static int dm_bow_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
652 {
653 	struct bow_context *bc = ti->private;
654 	struct dm_arg_set as;
655 	static const struct dm_arg _args[] = {
656 		{0, 1, "Invalid number of feature args"},
657 	};
658 	unsigned int opt_params;
659 	const char *opt_string;
660 	int err;
661 	char dummy;
662 
663 	as.argc = argc;
664 	as.argv = argv;
665 
666 	err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
667 	if (err)
668 		return err;
669 
670 	while (opt_params--) {
671 		opt_string = dm_shift_arg(&as);
672 		if (!opt_string) {
673 			ti->error = "Not enough feature arguments";
674 			return -EINVAL;
675 		}
676 
677 		if (sscanf(opt_string, "block_size:%u%c",
678 					&bc->block_size, &dummy) == 1) {
679 			if (bc->block_size < SECTOR_SIZE ||
680 			    bc->block_size > 4096 ||
681 			    !is_power_of_2(bc->block_size)) {
682 				ti->error = "Invalid block_size";
683 				return -EINVAL;
684 			}
685 		} else {
686 			ti->error = "Invalid feature arguments";
687 			return -EINVAL;
688 		}
689 	}
690 
691 	return 0;
692 }
693 
dm_bow_ctr(struct dm_target * ti,unsigned int argc,char ** argv)694 static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
695 {
696 	struct bow_context *bc;
697 	struct bow_range *br;
698 	int ret;
699 
700 	if (argc < 1) {
701 		ti->error = "Invalid argument count";
702 		return -EINVAL;
703 	}
704 
705 	bc = kzalloc(sizeof(*bc), GFP_KERNEL);
706 	if (!bc) {
707 		ti->error = "Cannot allocate bow context";
708 		return -ENOMEM;
709 	}
710 
711 	ti->num_flush_bios = 1;
712 	ti->num_discard_bios = 1;
713 	ti->num_write_same_bios = 1;
714 	ti->private = bc;
715 
716 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
717 			    &bc->dev);
718 	if (ret) {
719 		ti->error = "Device lookup failed";
720 		goto bad;
721 	}
722 
723 	bc->block_size =
724 		bdev_get_queue(bc->dev->bdev)->limits.logical_block_size;
725 	if (argc > 1) {
726 		ret = dm_bow_ctr_optional(ti, argc - 1, &argv[1]);
727 		if (ret)
728 			goto bad;
729 	}
730 
731 	bc->block_shift = ilog2(bc->block_size);
732 	bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
733 	if (!bc->log_sector) {
734 		ti->error = "Cannot allocate log sector";
735 		goto bad;
736 	}
737 
738 	init_completion(&bc->kobj_holder.completion);
739 	mutex_init(&bc->ranges_lock);
740 	bc->ranges = RB_ROOT;
741 	bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
742 					   NULL, NULL);
743 	if (IS_ERR(bc->bufio)) {
744 		ti->error = "Cannot initialize dm-bufio";
745 		ret = PTR_ERR(bc->bufio);
746 		bc->bufio = NULL;
747 		goto bad;
748 	}
749 
750 	bc->workqueue = alloc_workqueue("dm-bow",
751 					WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
752 					| WQ_UNBOUND, num_online_cpus());
753 	if (!bc->workqueue) {
754 		ti->error = "Cannot allocate workqueue";
755 		ret = -ENOMEM;
756 		goto bad;
757 	}
758 
759 	INIT_LIST_HEAD(&bc->trimmed_list);
760 
761 	br = kzalloc(sizeof(*br), GFP_KERNEL);
762 	if (!br) {
763 		ti->error = "Cannot allocate ranges";
764 		ret = -ENOMEM;
765 		goto bad;
766 	}
767 
768 	br->sector = ti->len;
769 	br->type = TOP;
770 	rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
771 	rb_insert_color(&br->node, &bc->ranges);
772 
773 	br = kzalloc(sizeof(*br), GFP_KERNEL);
774 	if (!br) {
775 		ti->error = "Cannot allocate ranges";
776 		ret = -ENOMEM;
777 		goto bad;
778 	}
779 
780 	br->sector = 0;
781 	br->type = UNCHANGED;
782 	rb_link_node(&br->node, bc->ranges.rb_node,
783 		     &bc->ranges.rb_node->rb_left);
784 	rb_insert_color(&br->node, &bc->ranges);
785 
786 	ti->discards_supported = true;
787 
788 	return 0;
789 
790 bad:
791 	dm_bow_dtr(ti);
792 	return ret;
793 }
794 
dm_bow_resume(struct dm_target * ti)795 void dm_bow_resume(struct dm_target *ti)
796 {
797 	struct mapped_device *md = dm_table_get_md(ti->table);
798 	struct bow_context *bc = ti->private;
799 	int ret;
800 
801 	if (bc->kobj_holder.kobj.state_initialized)
802 		return;
803 
804 	ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
805 				   &disk_to_dev(dm_disk(md))->kobj, "%s",
806 				   "bow");
807 	if (ret)
808 		ti->error = "Cannot create sysfs node";
809 }
810 
811 /****** Handle writes ******/
812 
prepare_unchanged_range(struct bow_context * bc,struct bow_range * br,struct bvec_iter * bi_iter,bool record_checksum)813 static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
814 				   struct bvec_iter *bi_iter,
815 				   bool record_checksum)
816 {
817 	struct bow_range *backup_br;
818 	struct bvec_iter backup_bi;
819 	sector_t log_source, log_dest;
820 	unsigned int log_size;
821 	u32 checksum = 0;
822 	int ret;
823 	int original_type;
824 	sector_t sector0;
825 
826 	/* Find a free range */
827 	backup_br = find_free_range(bc);
828 	if (!backup_br)
829 		return BLK_STS_NOSPC;
830 
831 	/* Carve out a backup range. This may be smaller than the br given */
832 	backup_bi.bi_sector = backup_br->sector;
833 	backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
834 	ret = split_range(bc, &backup_br, &backup_bi);
835 	if (ret)
836 		return ret;
837 
838 	/*
839 	 * Carve out a changed range. This will not be smaller than the backup
840 	 * br since the backup br is smaller than the source range and iterator
841 	 */
842 	bi_iter->bi_size = backup_bi.bi_size;
843 	ret = split_range(bc, &br, bi_iter);
844 	if (ret)
845 		return ret;
846 	if (range_size(br) != range_size(backup_br)) {
847 		WARN_ON(1);
848 		return BLK_STS_IOERR;
849 	}
850 
851 
852 	/* Copy data over */
853 	ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
854 	if (ret)
855 		return ret;
856 
857 	/* Add an entry to the log */
858 	log_source = br->sector;
859 	log_dest = backup_br->sector;
860 	log_size = range_size(br);
861 
862 	/*
863 	 * Set the types. Note that since set_type also amalgamates ranges
864 	 * we have to set both sectors to their final type before calling
865 	 * set_type on either
866 	 */
867 	original_type = br->type;
868 	sector0 = backup_br->sector;
869 	bc->trims_total -= range_size(backup_br);
870 	if (backup_br->type == TRIMMED)
871 		list_del(&backup_br->trimmed_list);
872 	backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
873 						      : BACKUP;
874 	br->type = CHANGED;
875 	set_type(bc, &backup_br, backup_br->type);
876 
877 	/*
878 	 * Add the log entry after marking the backup sector, since adding a log
879 	 * can cause another backup
880 	 */
881 	ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
882 	if (ret) {
883 		br->type = original_type;
884 		return ret;
885 	}
886 
887 	/* Now it is safe to mark this backup successful */
888 	if (original_type == SECTOR0_CURRENT)
889 		bc->log_sector->sector0 = sector0;
890 
891 	set_type(bc, &br, br->type);
892 	return ret;
893 }
894 
prepare_free_range(struct bow_context * bc,struct bow_range * br,struct bvec_iter * bi_iter)895 static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
896 			      struct bvec_iter *bi_iter)
897 {
898 	int ret;
899 
900 	ret = split_range(bc, &br, bi_iter);
901 	if (ret)
902 		return ret;
903 	set_type(bc, &br, CHANGED);
904 	return BLK_STS_OK;
905 }
906 
prepare_changed_range(struct bow_context * bc,struct bow_range * br,struct bvec_iter * bi_iter)907 static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
908 				 struct bvec_iter *bi_iter)
909 {
910 	/* Nothing to do ... */
911 	return BLK_STS_OK;
912 }
913 
prepare_one_range(struct bow_context * bc,struct bvec_iter * bi_iter)914 static int prepare_one_range(struct bow_context *bc,
915 			     struct bvec_iter *bi_iter)
916 {
917 	struct bow_range *br = find_first_overlapping_range(&bc->ranges,
918 							    bi_iter);
919 	switch (br->type) {
920 	case CHANGED:
921 		return prepare_changed_range(bc, br, bi_iter);
922 
923 	case TRIMMED:
924 		return prepare_free_range(bc, br, bi_iter);
925 
926 	case UNCHANGED:
927 	case BACKUP:
928 		return prepare_unchanged_range(bc, br, bi_iter, true);
929 
930 	/*
931 	 * We cannot track the checksum for the active sector0, since it
932 	 * may change at any point.
933 	 */
934 	case SECTOR0_CURRENT:
935 		return prepare_unchanged_range(bc, br, bi_iter, false);
936 
937 	case SECTOR0:	/* Handled in the dm_bow_map */
938 	case TOP:	/* Illegal - top is off the end of the device */
939 	default:
940 		WARN_ON(1);
941 		return BLK_STS_IOERR;
942 	}
943 }
944 
945 struct write_work {
946 	struct work_struct work;
947 	struct bow_context *bc;
948 	struct bio *bio;
949 };
950 
bow_write(struct work_struct * work)951 static void bow_write(struct work_struct *work)
952 {
953 	struct write_work *ww = container_of(work, struct write_work, work);
954 	struct bow_context *bc = ww->bc;
955 	struct bio *bio = ww->bio;
956 	struct bvec_iter bi_iter = bio->bi_iter;
957 	int ret = BLK_STS_OK;
958 
959 	kfree(ww);
960 
961 	mutex_lock(&bc->ranges_lock);
962 	do {
963 		ret = prepare_one_range(bc, &bi_iter);
964 		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
965 		bi_iter.bi_size = bio->bi_iter.bi_size
966 			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
967 			  * SECTOR_SIZE;
968 	} while (!ret && bi_iter.bi_size);
969 
970 	mutex_unlock(&bc->ranges_lock);
971 
972 	if (!ret) {
973 		bio_set_dev(bio, bc->dev->bdev);
974 		submit_bio(bio);
975 	} else {
976 		DMERR("Write failure with error %d", -ret);
977 		bio->bi_status = ret;
978 		bio_endio(bio);
979 	}
980 }
981 
queue_write(struct bow_context * bc,struct bio * bio)982 static int queue_write(struct bow_context *bc, struct bio *bio)
983 {
984 	struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
985 					| __GFP_NOMEMALLOC | __GFP_NOWARN);
986 	if (!ww) {
987 		DMERR("Failed to allocate write_work");
988 		return -ENOMEM;
989 	}
990 
991 	INIT_WORK(&ww->work, bow_write);
992 	ww->bc = bc;
993 	ww->bio = bio;
994 	queue_work(bc->workqueue, &ww->work);
995 	return DM_MAPIO_SUBMITTED;
996 }
997 
handle_sector0(struct bow_context * bc,struct bio * bio)998 static int handle_sector0(struct bow_context *bc, struct bio *bio)
999 {
1000 	int ret = DM_MAPIO_REMAPPED;
1001 
1002 	if (bio->bi_iter.bi_size > bc->block_size) {
1003 		struct bio * split = bio_split(bio,
1004 					       bc->block_size >> SECTOR_SHIFT,
1005 					       GFP_NOIO,
1006 					       &fs_bio_set);
1007 		if (!split) {
1008 			DMERR("Failed to split bio");
1009 			bio->bi_status = BLK_STS_RESOURCE;
1010 			bio_endio(bio);
1011 			return DM_MAPIO_SUBMITTED;
1012 		}
1013 
1014 		bio_chain(split, bio);
1015 		split->bi_iter.bi_sector = bc->log_sector->sector0;
1016 		bio_set_dev(split, bc->dev->bdev);
1017 		submit_bio(split);
1018 
1019 		if (bio_data_dir(bio) == WRITE)
1020 			ret = queue_write(bc, bio);
1021 	} else {
1022 		bio->bi_iter.bi_sector = bc->log_sector->sector0;
1023 	}
1024 
1025 	return ret;
1026 }
1027 
add_trim(struct bow_context * bc,struct bio * bio)1028 static int add_trim(struct bow_context *bc, struct bio *bio)
1029 {
1030 	struct bow_range *br;
1031 	struct bvec_iter bi_iter = bio->bi_iter;
1032 
1033 	DMDEBUG("add_trim: %llu, %u",
1034 		(unsigned long long)bio->bi_iter.bi_sector,
1035 		bio->bi_iter.bi_size);
1036 
1037 	do {
1038 		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
1039 
1040 		switch (br->type) {
1041 		case UNCHANGED:
1042 			if (!split_range(bc, &br, &bi_iter))
1043 				set_type(bc, &br, TRIMMED);
1044 			break;
1045 
1046 		case TRIMMED:
1047 			/* Nothing to do */
1048 			break;
1049 
1050 		default:
1051 			/* No other case is legal in TRIM state */
1052 			WARN_ON(true);
1053 			break;
1054 		}
1055 
1056 		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
1057 		bi_iter.bi_size = bio->bi_iter.bi_size
1058 			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
1059 			  * SECTOR_SIZE;
1060 
1061 	} while (bi_iter.bi_size);
1062 
1063 	bio_endio(bio);
1064 	return DM_MAPIO_SUBMITTED;
1065 }
1066 
remove_trim(struct bow_context * bc,struct bio * bio)1067 static int remove_trim(struct bow_context *bc, struct bio *bio)
1068 {
1069 	struct bow_range *br;
1070 	struct bvec_iter bi_iter = bio->bi_iter;
1071 
1072 	DMDEBUG("remove_trim: %llu, %u",
1073 		(unsigned long long)bio->bi_iter.bi_sector,
1074 		bio->bi_iter.bi_size);
1075 
1076 	do {
1077 		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
1078 
1079 		switch (br->type) {
1080 		case UNCHANGED:
1081 			/* Nothing to do */
1082 			break;
1083 
1084 		case TRIMMED:
1085 			if (!split_range(bc, &br, &bi_iter))
1086 				set_type(bc, &br, UNCHANGED);
1087 			break;
1088 
1089 		default:
1090 			/* No other case is legal in TRIM state */
1091 			WARN_ON(true);
1092 			break;
1093 		}
1094 
1095 		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
1096 		bi_iter.bi_size = bio->bi_iter.bi_size
1097 			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
1098 			  * SECTOR_SIZE;
1099 
1100 	} while (bi_iter.bi_size);
1101 
1102 	return DM_MAPIO_REMAPPED;
1103 }
1104 
remap_unless_illegal_trim(struct bow_context * bc,struct bio * bio)1105 int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
1106 {
1107 	if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
1108 		bio->bi_status = BLK_STS_NOTSUPP;
1109 		bio_endio(bio);
1110 		return DM_MAPIO_SUBMITTED;
1111 	} else {
1112 		bio_set_dev(bio, bc->dev->bdev);
1113 		return DM_MAPIO_REMAPPED;
1114 	}
1115 }
1116 
1117 /****** dm interface ******/
1118 
dm_bow_map(struct dm_target * ti,struct bio * bio)1119 static int dm_bow_map(struct dm_target *ti, struct bio *bio)
1120 {
1121 	int ret = DM_MAPIO_REMAPPED;
1122 	struct bow_context *bc = ti->private;
1123 
1124 	if (likely(bc->state.counter == COMMITTED))
1125 		return remap_unless_illegal_trim(bc, bio);
1126 
1127 	if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
1128 		return remap_unless_illegal_trim(bc, bio);
1129 
1130 	if (atomic_read(&bc->state) != COMMITTED) {
1131 		enum state state;
1132 
1133 		mutex_lock(&bc->ranges_lock);
1134 		state = atomic_read(&bc->state);
1135 		if (state == TRIM) {
1136 			if (bio_op(bio) == REQ_OP_DISCARD)
1137 				ret = add_trim(bc, bio);
1138 			else if (bio_data_dir(bio) == WRITE)
1139 				ret = remove_trim(bc, bio);
1140 			else
1141 				/* pass-through */;
1142 		} else if (state == CHECKPOINT) {
1143 			if (bio->bi_iter.bi_sector == 0)
1144 				ret = handle_sector0(bc, bio);
1145 			else if (bio_data_dir(bio) == WRITE)
1146 				ret = queue_write(bc, bio);
1147 			else
1148 				/* pass-through */;
1149 		} else {
1150 			/* pass-through */
1151 		}
1152 		mutex_unlock(&bc->ranges_lock);
1153 	}
1154 
1155 	if (ret == DM_MAPIO_REMAPPED)
1156 		return remap_unless_illegal_trim(bc, bio);
1157 
1158 	return ret;
1159 }
1160 
dm_bow_tablestatus(struct dm_target * ti,char * result,unsigned int maxlen)1161 static void dm_bow_tablestatus(struct dm_target *ti, char *result,
1162 			       unsigned int maxlen)
1163 {
1164 	char *end = result + maxlen;
1165 	struct bow_context *bc = ti->private;
1166 	struct rb_node *i;
1167 	int trimmed_list_length = 0;
1168 	int trimmed_range_count = 0;
1169 	struct bow_range *br;
1170 
1171 	if (maxlen == 0)
1172 		return;
1173 	result[0] = 0;
1174 
1175 	list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
1176 		if (br->type == TRIMMED) {
1177 			++trimmed_list_length;
1178 		} else {
1179 			scnprintf(result, end - result,
1180 				  "ERROR: non-trimmed entry in trimmed_list");
1181 			return;
1182 		}
1183 
1184 	if (!rb_first(&bc->ranges)) {
1185 		scnprintf(result, end - result, "ERROR: Empty ranges");
1186 		return;
1187 	}
1188 
1189 	if (container_of(rb_first(&bc->ranges), struct bow_range, node)
1190 	    ->sector) {
1191 		scnprintf(result, end - result,
1192 			 "ERROR: First range does not start at sector 0");
1193 		return;
1194 	}
1195 
1196 	mutex_lock(&bc->ranges_lock);
1197 	for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
1198 		struct bow_range *br = container_of(i, struct bow_range, node);
1199 
1200 		result += scnprintf(result, end - result, "%s: %llu",
1201 				    readable_type[br->type],
1202 				    (unsigned long long)br->sector);
1203 		if (result >= end)
1204 			goto unlock;
1205 
1206 		result += scnprintf(result, end - result, "\n");
1207 		if (result >= end)
1208 			goto unlock;
1209 
1210 		if (br->type == TRIMMED)
1211 			++trimmed_range_count;
1212 
1213 		if (br->type == TOP) {
1214 			if (br->sector != ti->len) {
1215 				scnprintf(result, end - result,
1216 					 "\nERROR: Top sector is incorrect");
1217 			}
1218 
1219 			if (&br->node != rb_last(&bc->ranges)) {
1220 				scnprintf(result, end - result,
1221 					  "\nERROR: Top sector is not last");
1222 			}
1223 
1224 			break;
1225 		}
1226 
1227 		if (!rb_next(i)) {
1228 			scnprintf(result, end - result,
1229 				  "\nERROR: Last range not of type TOP");
1230 			goto unlock;
1231 		}
1232 
1233 		if (br->sector > range_top(br)) {
1234 			scnprintf(result, end - result,
1235 				  "\nERROR: sectors out of order");
1236 			goto unlock;
1237 		}
1238 	}
1239 
1240 	if (trimmed_range_count != trimmed_list_length)
1241 		scnprintf(result, end - result,
1242 			  "\nERROR: not all trimmed ranges in trimmed list");
1243 
1244 unlock:
1245 	mutex_unlock(&bc->ranges_lock);
1246 }
1247 
dm_bow_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)1248 static void dm_bow_status(struct dm_target *ti, status_type_t type,
1249 			  unsigned int status_flags, char *result,
1250 			  unsigned int maxlen)
1251 {
1252 	switch (type) {
1253 	case STATUSTYPE_INFO:
1254 	case STATUSTYPE_IMA:
1255 		if (maxlen)
1256 			result[0] = 0;
1257 		break;
1258 
1259 	case STATUSTYPE_TABLE:
1260 		dm_bow_tablestatus(ti, result, maxlen);
1261 		break;
1262 	}
1263 }
1264 
dm_bow_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)1265 int dm_bow_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
1266 {
1267 	struct bow_context *bc = ti->private;
1268 	struct dm_dev *dev = bc->dev;
1269 
1270 	*bdev = dev->bdev;
1271 	/* Only pass ioctls through if the device sizes match exactly. */
1272 	return ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1273 }
1274 
dm_bow_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)1275 static int dm_bow_iterate_devices(struct dm_target *ti,
1276 				  iterate_devices_callout_fn fn, void *data)
1277 {
1278 	struct bow_context *bc = ti->private;
1279 
1280 	return fn(ti, bc->dev, 0, ti->len, data);
1281 }
1282 
1283 static struct target_type bow_target = {
1284 	.name   = "bow",
1285 	.version = {1, 2, 0},
1286 	.features = DM_TARGET_PASSES_CRYPTO,
1287 	.module = THIS_MODULE,
1288 	.ctr    = dm_bow_ctr,
1289 	.resume = dm_bow_resume,
1290 	.dtr    = dm_bow_dtr,
1291 	.map    = dm_bow_map,
1292 	.status = dm_bow_status,
1293 	.prepare_ioctl  = dm_bow_prepare_ioctl,
1294 	.iterate_devices = dm_bow_iterate_devices,
1295 	.io_hints = dm_bow_io_hints,
1296 };
1297 
dm_bow_init(void)1298 int __init dm_bow_init(void)
1299 {
1300 	int r = dm_register_target(&bow_target);
1301 
1302 	if (r < 0)
1303 		DMERR("registering bow failed %d", r);
1304 	return r;
1305 }
1306 
dm_bow_exit(void)1307 void dm_bow_exit(void)
1308 {
1309 	dm_unregister_target(&bow_target);
1310 }
1311 
1312 MODULE_LICENSE("GPL");
1313 
1314 module_init(dm_bow_init);
1315 module_exit(dm_bow_exit);
1316