• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2003 Sistina Software (UK) Limited.
4  * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/device-mapper.h>
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/slab.h>
16 
17 #define DM_MSG_PREFIX "flakey"
18 
19 #define PROBABILITY_BASE	1000000000
20 
21 #define all_corrupt_bio_flags_match(bio, fc)	\
22 	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
23 
24 /*
25  * Flakey: Used for testing only, simulates intermittent,
26  * catastrophic device failure.
27  */
28 struct flakey_c {
29 	struct dm_dev *dev;
30 	unsigned long start_time;
31 	sector_t start;
32 	unsigned int up_interval;
33 	unsigned int down_interval;
34 	unsigned long flags;
35 	unsigned int corrupt_bio_byte;
36 	unsigned int corrupt_bio_rw;
37 	unsigned int corrupt_bio_value;
38 	blk_opf_t corrupt_bio_flags;
39 	unsigned int random_read_corrupt;
40 	unsigned int random_write_corrupt;
41 };
42 
43 enum feature_flag_bits {
44 	ERROR_READS,
45 	DROP_WRITES,
46 	ERROR_WRITES
47 };
48 
49 struct per_bio_data {
50 	bool bio_can_corrupt;
51 	struct bvec_iter saved_iter;
52 };
53 
parse_features(struct dm_arg_set * as,struct flakey_c * fc,struct dm_target * ti)54 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
55 			  struct dm_target *ti)
56 {
57 	int r = 0;
58 	unsigned int argc = 0;
59 	const char *arg_name;
60 
61 	static const struct dm_arg _args[] = {
62 		{0, 11, "Invalid number of feature args"},
63 		{1, UINT_MAX, "Invalid corrupt bio byte"},
64 		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
65 		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
66 		{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
67 	};
68 
69 	if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error)))
70 		return r;
71 
72 	/* No feature arguments supplied. */
73 	if (!argc)
74 		goto error_all_io;
75 
76 	while (argc) {
77 		arg_name = dm_shift_arg(as);
78 		argc--;
79 
80 		if (!arg_name) {
81 			ti->error = "Insufficient feature arguments";
82 			return -EINVAL;
83 		}
84 
85 		/*
86 		 * error_reads
87 		 */
88 		if (!strcasecmp(arg_name, "error_reads")) {
89 			if (test_and_set_bit(ERROR_READS, &fc->flags)) {
90 				ti->error = "Feature error_reads duplicated";
91 				return -EINVAL;
92 			}
93 			continue;
94 		}
95 
96 		/*
97 		 * drop_writes
98 		 */
99 		if (!strcasecmp(arg_name, "drop_writes")) {
100 			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
101 				ti->error = "Feature drop_writes duplicated";
102 				return -EINVAL;
103 			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
104 				ti->error = "Feature drop_writes conflicts with feature error_writes";
105 				return -EINVAL;
106 			}
107 
108 			continue;
109 		}
110 
111 		/*
112 		 * error_writes
113 		 */
114 		if (!strcasecmp(arg_name, "error_writes")) {
115 			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
116 				ti->error = "Feature error_writes duplicated";
117 				return -EINVAL;
118 
119 			} else if (test_bit(DROP_WRITES, &fc->flags)) {
120 				ti->error = "Feature error_writes conflicts with feature drop_writes";
121 				return -EINVAL;
122 			}
123 
124 			continue;
125 		}
126 
127 		/*
128 		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
129 		 */
130 		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
131 			if (!argc) {
132 				ti->error = "Feature corrupt_bio_byte requires parameters";
133 				return -EINVAL;
134 			}
135 
136 			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
137 			if (r)
138 				return r;
139 			argc--;
140 
141 			/*
142 			 * Direction r or w?
143 			 */
144 			arg_name = dm_shift_arg(as);
145 			if (arg_name && !strcasecmp(arg_name, "w"))
146 				fc->corrupt_bio_rw = WRITE;
147 			else if (arg_name && !strcasecmp(arg_name, "r"))
148 				fc->corrupt_bio_rw = READ;
149 			else {
150 				ti->error = "Invalid corrupt bio direction (r or w)";
151 				return -EINVAL;
152 			}
153 			argc--;
154 
155 			/*
156 			 * Value of byte (0-255) to write in place of correct one.
157 			 */
158 			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
159 			if (r)
160 				return r;
161 			argc--;
162 
163 			/*
164 			 * Only corrupt bios with these flags set.
165 			 */
166 			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
167 				     sizeof(unsigned int));
168 			r = dm_read_arg(_args + 3, as,
169 				(__force unsigned int *)&fc->corrupt_bio_flags,
170 				&ti->error);
171 			if (r)
172 				return r;
173 			argc--;
174 
175 			continue;
176 		}
177 
178 		if (!strcasecmp(arg_name, "random_read_corrupt")) {
179 			if (!argc) {
180 				ti->error = "Feature random_read_corrupt requires a parameter";
181 				return -EINVAL;
182 			}
183 			r = dm_read_arg(_args + 4, as, &fc->random_read_corrupt, &ti->error);
184 			if (r)
185 				return r;
186 			argc--;
187 
188 			continue;
189 		}
190 
191 		if (!strcasecmp(arg_name, "random_write_corrupt")) {
192 			if (!argc) {
193 				ti->error = "Feature random_write_corrupt requires a parameter";
194 				return -EINVAL;
195 			}
196 			r = dm_read_arg(_args + 4, as, &fc->random_write_corrupt, &ti->error);
197 			if (r)
198 				return r;
199 			argc--;
200 
201 			continue;
202 		}
203 
204 		ti->error = "Unrecognised flakey feature requested";
205 		return -EINVAL;
206 	}
207 
208 	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
209 		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
210 		return -EINVAL;
211 
212 	} else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
213 		ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
214 		return -EINVAL;
215 	}
216 
217 	if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
218 	    !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
219 	    !fc->random_read_corrupt && !fc->random_write_corrupt) {
220 error_all_io:
221 		set_bit(ERROR_WRITES, &fc->flags);
222 		set_bit(ERROR_READS, &fc->flags);
223 	}
224 
225 	return 0;
226 }
227 
228 /*
229  * Construct a flakey mapping:
230  * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
231  *
232  *   Feature args:
233  *     [drop_writes]
234  *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
235  *
236  *   Nth_byte starts from 1 for the first byte.
237  *   Direction is r for READ or w for WRITE.
238  *   bio_flags is ignored if 0.
239  */
flakey_ctr(struct dm_target * ti,unsigned int argc,char ** argv)240 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
241 {
242 	static const struct dm_arg _args[] = {
243 		{0, UINT_MAX, "Invalid up interval"},
244 		{0, UINT_MAX, "Invalid down interval"},
245 	};
246 
247 	int r;
248 	struct flakey_c *fc;
249 	unsigned long long tmpll;
250 	struct dm_arg_set as;
251 	const char *devname;
252 	char dummy;
253 
254 	as.argc = argc;
255 	as.argv = argv;
256 
257 	if (argc < 4) {
258 		ti->error = "Invalid argument count";
259 		return -EINVAL;
260 	}
261 
262 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
263 	if (!fc) {
264 		ti->error = "Cannot allocate context";
265 		return -ENOMEM;
266 	}
267 	fc->start_time = jiffies;
268 
269 	devname = dm_shift_arg(&as);
270 
271 	r = -EINVAL;
272 	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
273 		ti->error = "Invalid device sector";
274 		goto bad;
275 	}
276 	fc->start = tmpll;
277 
278 	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
279 	if (r)
280 		goto bad;
281 
282 	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
283 	if (r)
284 		goto bad;
285 
286 	if (!(fc->up_interval + fc->down_interval)) {
287 		ti->error = "Total (up + down) interval is zero";
288 		r = -EINVAL;
289 		goto bad;
290 	}
291 
292 	if (fc->up_interval + fc->down_interval < fc->up_interval) {
293 		ti->error = "Interval overflow";
294 		r = -EINVAL;
295 		goto bad;
296 	}
297 
298 	r = parse_features(&as, fc, ti);
299 	if (r)
300 		goto bad;
301 
302 	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
303 	if (r) {
304 		ti->error = "Device lookup failed";
305 		goto bad;
306 	}
307 
308 	ti->num_flush_bios = 1;
309 	ti->num_discard_bios = 1;
310 	ti->per_io_data_size = sizeof(struct per_bio_data);
311 	ti->private = fc;
312 	return 0;
313 
314 bad:
315 	kfree(fc);
316 	return r;
317 }
318 
flakey_dtr(struct dm_target * ti)319 static void flakey_dtr(struct dm_target *ti)
320 {
321 	struct flakey_c *fc = ti->private;
322 
323 	dm_put_device(ti, fc->dev);
324 	kfree(fc);
325 }
326 
flakey_map_sector(struct dm_target * ti,sector_t bi_sector)327 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
328 {
329 	struct flakey_c *fc = ti->private;
330 
331 	return fc->start + dm_target_offset(ti, bi_sector);
332 }
333 
flakey_map_bio(struct dm_target * ti,struct bio * bio)334 static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
335 {
336 	struct flakey_c *fc = ti->private;
337 
338 	bio_set_dev(bio, fc->dev->bdev);
339 	bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
340 }
341 
corrupt_bio_common(struct bio * bio,unsigned int corrupt_bio_byte,unsigned char corrupt_bio_value,struct bvec_iter start)342 static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
343 			       unsigned char corrupt_bio_value,
344 			       struct bvec_iter start)
345 {
346 	struct bvec_iter iter;
347 	struct bio_vec bvec;
348 
349 	/*
350 	 * Overwrite the Nth byte of the bio's data, on whichever page
351 	 * it falls.
352 	 */
353 	__bio_for_each_segment(bvec, bio, iter, start) {
354 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
355 			unsigned char *segment = bvec_kmap_local(&bvec);
356 			segment[corrupt_bio_byte] = corrupt_bio_value;
357 			kunmap_local(segment);
358 			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
359 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
360 				bio, corrupt_bio_value, corrupt_bio_byte,
361 				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
362 				(unsigned long long)start.bi_sector,
363 				start.bi_size);
364 			break;
365 		}
366 		corrupt_bio_byte -= bio_iter_len(bio, iter);
367 	}
368 }
369 
corrupt_bio_data(struct bio * bio,struct flakey_c * fc,struct bvec_iter start)370 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
371 			     struct bvec_iter start)
372 {
373 	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
374 
375 	corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
376 }
377 
corrupt_bio_random(struct bio * bio,struct bvec_iter start)378 static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
379 {
380 	unsigned int corrupt_byte;
381 	unsigned char corrupt_value;
382 
383 	corrupt_byte = get_random_u32() % start.bi_size;
384 	corrupt_value = get_random_u8();
385 
386 	corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
387 }
388 
clone_free(struct bio * clone)389 static void clone_free(struct bio *clone)
390 {
391 	struct folio_iter fi;
392 
393 	if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
394 		bio_for_each_folio_all(fi, clone)
395 			folio_put(fi.folio);
396 	}
397 
398 	bio_uninit(clone);
399 	kfree(clone);
400 }
401 
clone_endio(struct bio * clone)402 static void clone_endio(struct bio *clone)
403 {
404 	struct bio *bio = clone->bi_private;
405 	bio->bi_status = clone->bi_status;
406 	clone_free(clone);
407 	bio_endio(bio);
408 }
409 
clone_bio(struct dm_target * ti,struct flakey_c * fc,struct bio * bio)410 static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct bio *bio)
411 {
412 	struct bio *clone;
413 	unsigned size, remaining_size, nr_iovecs, order;
414 	struct bvec_iter iter = bio->bi_iter;
415 
416 	if (unlikely(bio->bi_iter.bi_size > UIO_MAXIOV << PAGE_SHIFT))
417 		dm_accept_partial_bio(bio, UIO_MAXIOV << PAGE_SHIFT >> SECTOR_SHIFT);
418 
419 	size = bio->bi_iter.bi_size;
420 	nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
421 
422 	clone = bio_kmalloc(nr_iovecs, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
423 	if (!clone)
424 		return NULL;
425 
426 	bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
427 
428 	clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
429 	clone->bi_private = bio;
430 	clone->bi_end_io = clone_endio;
431 
432 	remaining_size = size;
433 
434 	order = MAX_PAGE_ORDER;
435 	while (remaining_size) {
436 		struct page *pages;
437 		unsigned size_to_add, to_copy;
438 		unsigned char *virt;
439 		unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
440 		order = min(order, remaining_order);
441 
442 retry_alloc_pages:
443 		pages = alloc_pages(GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP, order);
444 		if (unlikely(!pages)) {
445 			if (order) {
446 				order--;
447 				goto retry_alloc_pages;
448 			}
449 			clone_free(clone);
450 			return NULL;
451 		}
452 		size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
453 
454 		virt = page_to_virt(pages);
455 		to_copy = size_to_add;
456 		do {
457 			struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter);
458 			unsigned this_step = min(bvec.bv_len, to_copy);
459 			void *map = bvec_kmap_local(&bvec);
460 			memcpy(virt, map, this_step);
461 			kunmap_local(map);
462 
463 			bvec_iter_advance(bio->bi_io_vec, &iter, this_step);
464 			to_copy -= this_step;
465 			virt += this_step;
466 		} while (to_copy);
467 
468 		__bio_add_page(clone, pages, size_to_add, 0);
469 		remaining_size -= size_to_add;
470 	}
471 
472 	return clone;
473 }
474 
flakey_map(struct dm_target * ti,struct bio * bio)475 static int flakey_map(struct dm_target *ti, struct bio *bio)
476 {
477 	struct flakey_c *fc = ti->private;
478 	unsigned int elapsed;
479 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
480 
481 	pb->bio_can_corrupt = false;
482 
483 	if (op_is_zone_mgmt(bio_op(bio)))
484 		goto map_bio;
485 
486 	/* Are we alive ? */
487 	elapsed = (jiffies - fc->start_time) / HZ;
488 	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
489 		bool corrupt_fixed, corrupt_random;
490 
491 		if (bio_has_data(bio)) {
492 			pb->bio_can_corrupt = true;
493 			pb->saved_iter = bio->bi_iter;
494 		}
495 
496 		/*
497 		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
498 		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
499 		 */
500 		if (bio_data_dir(bio) == READ) {
501 			if (test_bit(ERROR_READS, &fc->flags))
502 				return DM_MAPIO_KILL;
503 			goto map_bio;
504 		}
505 
506 		/*
507 		 * Drop or error writes?
508 		 */
509 		if (test_bit(DROP_WRITES, &fc->flags)) {
510 			bio_endio(bio);
511 			return DM_MAPIO_SUBMITTED;
512 		} else if (test_bit(ERROR_WRITES, &fc->flags)) {
513 			bio_io_error(bio);
514 			return DM_MAPIO_SUBMITTED;
515 		}
516 
517 		if (!pb->bio_can_corrupt)
518 			goto map_bio;
519 		/*
520 		 * Corrupt matching writes.
521 		 */
522 		corrupt_fixed = false;
523 		corrupt_random = false;
524 		if (fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) {
525 			if (all_corrupt_bio_flags_match(bio, fc))
526 				corrupt_fixed = true;
527 		}
528 		if (fc->random_write_corrupt) {
529 			u64 rnd = get_random_u64();
530 			u32 rem = do_div(rnd, PROBABILITY_BASE);
531 			if (rem < fc->random_write_corrupt)
532 				corrupt_random = true;
533 		}
534 		if (corrupt_fixed || corrupt_random) {
535 			struct bio *clone = clone_bio(ti, fc, bio);
536 			if (clone) {
537 				if (corrupt_fixed)
538 					corrupt_bio_data(clone, fc,
539 							 clone->bi_iter);
540 				if (corrupt_random)
541 					corrupt_bio_random(clone,
542 							   clone->bi_iter);
543 				submit_bio(clone);
544 				return DM_MAPIO_SUBMITTED;
545 			}
546 		}
547 	}
548 
549 map_bio:
550 	flakey_map_bio(ti, bio);
551 
552 	return DM_MAPIO_REMAPPED;
553 }
554 
flakey_end_io(struct dm_target * ti,struct bio * bio,blk_status_t * error)555 static int flakey_end_io(struct dm_target *ti, struct bio *bio,
556 			 blk_status_t *error)
557 {
558 	struct flakey_c *fc = ti->private;
559 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
560 
561 	if (op_is_zone_mgmt(bio_op(bio)))
562 		return DM_ENDIO_DONE;
563 
564 	if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
565 		if (fc->corrupt_bio_byte) {
566 			if ((fc->corrupt_bio_rw == READ) &&
567 			    all_corrupt_bio_flags_match(bio, fc)) {
568 				/*
569 				 * Corrupt successful matching READs while in down state.
570 				 */
571 				corrupt_bio_data(bio, fc, pb->saved_iter);
572 			}
573 		}
574 		if (fc->random_read_corrupt) {
575 			u64 rnd = get_random_u64();
576 			u32 rem = do_div(rnd, PROBABILITY_BASE);
577 			if (rem < fc->random_read_corrupt)
578 				corrupt_bio_random(bio, pb->saved_iter);
579 		}
580 		if (test_bit(ERROR_READS, &fc->flags)) {
581 			/*
582 			 * Error read during the down_interval if drop_writes
583 			 * and error_writes were not configured.
584 			 */
585 			*error = BLK_STS_IOERR;
586 		}
587 	}
588 
589 	return DM_ENDIO_DONE;
590 }
591 
flakey_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)592 static void flakey_status(struct dm_target *ti, status_type_t type,
593 			  unsigned int status_flags, char *result, unsigned int maxlen)
594 {
595 	unsigned int sz = 0;
596 	struct flakey_c *fc = ti->private;
597 	unsigned int error_reads, drop_writes, error_writes;
598 
599 	switch (type) {
600 	case STATUSTYPE_INFO:
601 		result[0] = '\0';
602 		break;
603 
604 	case STATUSTYPE_TABLE:
605 		DMEMIT("%s %llu %u %u", fc->dev->name,
606 		       (unsigned long long)fc->start, fc->up_interval,
607 		       fc->down_interval);
608 
609 		error_reads = test_bit(ERROR_READS, &fc->flags);
610 		drop_writes = test_bit(DROP_WRITES, &fc->flags);
611 		error_writes = test_bit(ERROR_WRITES, &fc->flags);
612 		DMEMIT(" %u", error_reads + drop_writes + error_writes +
613 			(fc->corrupt_bio_byte > 0) * 5 +
614 			(fc->random_read_corrupt > 0) * 2 +
615 			(fc->random_write_corrupt > 0) * 2);
616 
617 		if (error_reads)
618 			DMEMIT(" error_reads");
619 		if (drop_writes)
620 			DMEMIT(" drop_writes");
621 		else if (error_writes)
622 			DMEMIT(" error_writes");
623 
624 		if (fc->corrupt_bio_byte)
625 			DMEMIT(" corrupt_bio_byte %u %c %u %u",
626 			       fc->corrupt_bio_byte,
627 			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
628 			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
629 
630 		if (fc->random_read_corrupt > 0)
631 			DMEMIT(" random_read_corrupt %u", fc->random_read_corrupt);
632 		if (fc->random_write_corrupt > 0)
633 			DMEMIT(" random_write_corrupt %u", fc->random_write_corrupt);
634 
635 		break;
636 
637 	case STATUSTYPE_IMA:
638 		result[0] = '\0';
639 		break;
640 	}
641 }
642 
flakey_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)643 static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
644 {
645 	struct flakey_c *fc = ti->private;
646 
647 	*bdev = fc->dev->bdev;
648 
649 	/*
650 	 * Only pass ioctls through if the device sizes match exactly.
651 	 */
652 	if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
653 		return 1;
654 	return 0;
655 }
656 
657 #ifdef CONFIG_BLK_DEV_ZONED
flakey_report_zones(struct dm_target * ti,struct dm_report_zones_args * args,unsigned int nr_zones)658 static int flakey_report_zones(struct dm_target *ti,
659 		struct dm_report_zones_args *args, unsigned int nr_zones)
660 {
661 	struct flakey_c *fc = ti->private;
662 
663 	return dm_report_zones(fc->dev->bdev, fc->start,
664 			       flakey_map_sector(ti, args->next_sector),
665 			       args, nr_zones);
666 }
667 #else
668 #define flakey_report_zones NULL
669 #endif
670 
flakey_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)671 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
672 {
673 	struct flakey_c *fc = ti->private;
674 
675 	return fn(ti, fc->dev, fc->start, ti->len, data);
676 }
677 
678 static struct target_type flakey_target = {
679 	.name   = "flakey",
680 	.version = {1, 5, 0},
681 	.features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
682 	.report_zones = flakey_report_zones,
683 	.module = THIS_MODULE,
684 	.ctr    = flakey_ctr,
685 	.dtr    = flakey_dtr,
686 	.map    = flakey_map,
687 	.end_io = flakey_end_io,
688 	.status = flakey_status,
689 	.prepare_ioctl = flakey_prepare_ioctl,
690 	.iterate_devices = flakey_iterate_devices,
691 };
692 module_dm(flakey);
693 
694 MODULE_DESCRIPTION(DM_NAME " flakey target");
695 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
696 MODULE_LICENSE("GPL");
697