• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-zoned.h"
8 
9 #include <linux/module.h>
10 
11 #define	DM_MSG_PREFIX		"zoned"
12 
13 #define DMZ_MIN_BIOS		8192
14 
15 /*
16  * Zone BIO context.
17  */
18 struct dmz_bioctx {
19 	struct dmz_target	*target;
20 	struct dm_zone		*zone;
21 	struct bio		*bio;
22 	atomic_t		ref;
23 };
24 
25 /*
26  * Chunk work descriptor.
27  */
28 struct dm_chunk_work {
29 	struct work_struct	work;
30 	atomic_t		refcount;
31 	struct dmz_target	*target;
32 	unsigned int		chunk;
33 	struct bio_list		bio_list;
34 };
35 
36 /*
37  * Target descriptor.
38  */
39 struct dmz_target {
40 	struct dm_dev		*ddev;
41 
42 	unsigned long		flags;
43 
44 	/* Zoned block device information */
45 	struct dmz_dev		*dev;
46 
47 	/* For metadata handling */
48 	struct dmz_metadata     *metadata;
49 
50 	/* For reclaim */
51 	struct dmz_reclaim	*reclaim;
52 
53 	/* For chunk work */
54 	struct mutex		chunk_lock;
55 	struct radix_tree_root	chunk_rxtree;
56 	struct workqueue_struct *chunk_wq;
57 
58 	/* For cloned BIOs to zones */
59 	struct bio_set		*bio_set;
60 
61 	/* For flush */
62 	spinlock_t		flush_lock;
63 	struct bio_list		flush_list;
64 	struct delayed_work	flush_work;
65 	struct workqueue_struct *flush_wq;
66 };
67 
68 /*
69  * Flush intervals (seconds).
70  */
71 #define DMZ_FLUSH_PERIOD	(10 * HZ)
72 
73 /*
74  * Target BIO completion.
75  */
dmz_bio_endio(struct bio * bio,blk_status_t status)76 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
77 {
78 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
79 
80 	if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
81 		bio->bi_status = status;
82 	if (bio->bi_status != BLK_STS_OK)
83 		bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
84 
85 	if (atomic_dec_and_test(&bioctx->ref)) {
86 		struct dm_zone *zone = bioctx->zone;
87 
88 		if (zone) {
89 			if (bio->bi_status != BLK_STS_OK &&
90 			    bio_op(bio) == REQ_OP_WRITE &&
91 			    dmz_is_seq(zone))
92 				set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
93 			dmz_deactivate_zone(zone);
94 		}
95 		bio_endio(bio);
96 	}
97 }
98 
99 /*
100  * Completion callback for an internally cloned target BIO. This terminates the
101  * target BIO when there are no more references to its context.
102  */
dmz_clone_endio(struct bio * clone)103 static void dmz_clone_endio(struct bio *clone)
104 {
105 	struct dmz_bioctx *bioctx = clone->bi_private;
106 	blk_status_t status = clone->bi_status;
107 
108 	bio_put(clone);
109 	dmz_bio_endio(bioctx->bio, status);
110 }
111 
112 /*
113  * Issue a clone of a target BIO. The clone may only partially process the
114  * original target BIO.
115  */
dmz_submit_bio(struct dmz_target * dmz,struct dm_zone * zone,struct bio * bio,sector_t chunk_block,unsigned int nr_blocks)116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
117 			  struct bio *bio, sector_t chunk_block,
118 			  unsigned int nr_blocks)
119 {
120 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
121 	struct bio *clone;
122 
123 	clone = bio_clone_fast(bio, GFP_NOIO, dmz->bio_set);
124 	if (!clone)
125 		return -ENOMEM;
126 
127 	bio_set_dev(clone, dmz->dev->bdev);
128 	clone->bi_iter.bi_sector =
129 		dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
130 	clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
131 	clone->bi_end_io = dmz_clone_endio;
132 	clone->bi_private = bioctx;
133 
134 	bio_advance(bio, clone->bi_iter.bi_size);
135 
136 	atomic_inc(&bioctx->ref);
137 	generic_make_request(clone);
138 
139 	if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
140 		zone->wp_block += nr_blocks;
141 
142 	return 0;
143 }
144 
145 /*
146  * Zero out pages of discarded blocks accessed by a read BIO.
147  */
dmz_handle_read_zero(struct dmz_target * dmz,struct bio * bio,sector_t chunk_block,unsigned int nr_blocks)148 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
149 				 sector_t chunk_block, unsigned int nr_blocks)
150 {
151 	unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
152 
153 	/* Clear nr_blocks */
154 	swap(bio->bi_iter.bi_size, size);
155 	zero_fill_bio(bio);
156 	swap(bio->bi_iter.bi_size, size);
157 
158 	bio_advance(bio, size);
159 }
160 
161 /*
162  * Process a read BIO.
163  */
dmz_handle_read(struct dmz_target * dmz,struct dm_zone * zone,struct bio * bio)164 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
165 			   struct bio *bio)
166 {
167 	sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
168 	unsigned int nr_blocks = dmz_bio_blocks(bio);
169 	sector_t end_block = chunk_block + nr_blocks;
170 	struct dm_zone *rzone, *bzone;
171 	int ret;
172 
173 	/* Read into unmapped chunks need only zeroing the BIO buffer */
174 	if (!zone) {
175 		zero_fill_bio(bio);
176 		return 0;
177 	}
178 
179 	dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
180 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
181 		      (dmz_is_rnd(zone) ? "RND" : "SEQ"),
182 		      dmz_id(dmz->metadata, zone),
183 		      (unsigned long long)chunk_block, nr_blocks);
184 
185 	/* Check block validity to determine the read location */
186 	bzone = zone->bzone;
187 	while (chunk_block < end_block) {
188 		nr_blocks = 0;
189 		if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
190 			/* Test block validity in the data zone */
191 			ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
192 			if (ret < 0)
193 				return ret;
194 			if (ret > 0) {
195 				/* Read data zone blocks */
196 				nr_blocks = ret;
197 				rzone = zone;
198 			}
199 		}
200 
201 		/*
202 		 * No valid blocks found in the data zone.
203 		 * Check the buffer zone, if there is one.
204 		 */
205 		if (!nr_blocks && bzone) {
206 			ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
207 			if (ret < 0)
208 				return ret;
209 			if (ret > 0) {
210 				/* Read buffer zone blocks */
211 				nr_blocks = ret;
212 				rzone = bzone;
213 			}
214 		}
215 
216 		if (nr_blocks) {
217 			/* Valid blocks found: read them */
218 			nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
219 			ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
220 			if (ret)
221 				return ret;
222 			chunk_block += nr_blocks;
223 		} else {
224 			/* No valid block: zeroout the current BIO block */
225 			dmz_handle_read_zero(dmz, bio, chunk_block, 1);
226 			chunk_block++;
227 		}
228 	}
229 
230 	return 0;
231 }
232 
233 /*
234  * Write blocks directly in a data zone, at the write pointer.
235  * If a buffer zone is assigned, invalidate the blocks written
236  * in place.
237  */
dmz_handle_direct_write(struct dmz_target * dmz,struct dm_zone * zone,struct bio * bio,sector_t chunk_block,unsigned int nr_blocks)238 static int dmz_handle_direct_write(struct dmz_target *dmz,
239 				   struct dm_zone *zone, struct bio *bio,
240 				   sector_t chunk_block,
241 				   unsigned int nr_blocks)
242 {
243 	struct dmz_metadata *zmd = dmz->metadata;
244 	struct dm_zone *bzone = zone->bzone;
245 	int ret;
246 
247 	if (dmz_is_readonly(zone))
248 		return -EROFS;
249 
250 	/* Submit write */
251 	ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
252 	if (ret)
253 		return ret;
254 
255 	/*
256 	 * Validate the blocks in the data zone and invalidate
257 	 * in the buffer zone, if there is one.
258 	 */
259 	ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
260 	if (ret == 0 && bzone)
261 		ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
262 
263 	return ret;
264 }
265 
266 /*
267  * Write blocks in the buffer zone of @zone.
268  * If no buffer zone is assigned yet, get one.
269  * Called with @zone write locked.
270  */
dmz_handle_buffered_write(struct dmz_target * dmz,struct dm_zone * zone,struct bio * bio,sector_t chunk_block,unsigned int nr_blocks)271 static int dmz_handle_buffered_write(struct dmz_target *dmz,
272 				     struct dm_zone *zone, struct bio *bio,
273 				     sector_t chunk_block,
274 				     unsigned int nr_blocks)
275 {
276 	struct dmz_metadata *zmd = dmz->metadata;
277 	struct dm_zone *bzone;
278 	int ret;
279 
280 	/* Get the buffer zone. One will be allocated if needed */
281 	bzone = dmz_get_chunk_buffer(zmd, zone);
282 	if (IS_ERR(bzone))
283 		return PTR_ERR(bzone);
284 
285 	if (dmz_is_readonly(bzone))
286 		return -EROFS;
287 
288 	/* Submit write */
289 	ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
290 	if (ret)
291 		return ret;
292 
293 	/*
294 	 * Validate the blocks in the buffer zone
295 	 * and invalidate in the data zone.
296 	 */
297 	ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
298 	if (ret == 0 && chunk_block < zone->wp_block)
299 		ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
300 
301 	return ret;
302 }
303 
304 /*
305  * Process a write BIO.
306  */
dmz_handle_write(struct dmz_target * dmz,struct dm_zone * zone,struct bio * bio)307 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
308 			    struct bio *bio)
309 {
310 	sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
311 	unsigned int nr_blocks = dmz_bio_blocks(bio);
312 
313 	if (!zone)
314 		return -ENOSPC;
315 
316 	dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
317 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
318 		      (dmz_is_rnd(zone) ? "RND" : "SEQ"),
319 		      dmz_id(dmz->metadata, zone),
320 		      (unsigned long long)chunk_block, nr_blocks);
321 
322 	if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
323 		/*
324 		 * zone is a random zone or it is a sequential zone
325 		 * and the BIO is aligned to the zone write pointer:
326 		 * direct write the zone.
327 		 */
328 		return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
329 	}
330 
331 	/*
332 	 * This is an unaligned write in a sequential zone:
333 	 * use buffered write.
334 	 */
335 	return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
336 }
337 
338 /*
339  * Process a discard BIO.
340  */
dmz_handle_discard(struct dmz_target * dmz,struct dm_zone * zone,struct bio * bio)341 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
342 			      struct bio *bio)
343 {
344 	struct dmz_metadata *zmd = dmz->metadata;
345 	sector_t block = dmz_bio_block(bio);
346 	unsigned int nr_blocks = dmz_bio_blocks(bio);
347 	sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
348 	int ret = 0;
349 
350 	/* For unmapped chunks, there is nothing to do */
351 	if (!zone)
352 		return 0;
353 
354 	if (dmz_is_readonly(zone))
355 		return -EROFS;
356 
357 	dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
358 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
359 		      dmz_id(zmd, zone),
360 		      (unsigned long long)chunk_block, nr_blocks);
361 
362 	/*
363 	 * Invalidate blocks in the data zone and its
364 	 * buffer zone if one is mapped.
365 	 */
366 	if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
367 		ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
368 	if (ret == 0 && zone->bzone)
369 		ret = dmz_invalidate_blocks(zmd, zone->bzone,
370 					    chunk_block, nr_blocks);
371 	return ret;
372 }
373 
374 /*
375  * Process a BIO.
376  */
dmz_handle_bio(struct dmz_target * dmz,struct dm_chunk_work * cw,struct bio * bio)377 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
378 			   struct bio *bio)
379 {
380 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
381 	struct dmz_metadata *zmd = dmz->metadata;
382 	struct dm_zone *zone;
383 	int ret;
384 
385 	/*
386 	 * Write may trigger a zone allocation. So make sure the
387 	 * allocation can succeed.
388 	 */
389 	if (bio_op(bio) == REQ_OP_WRITE)
390 		dmz_schedule_reclaim(dmz->reclaim);
391 
392 	dmz_lock_metadata(zmd);
393 
394 	if (dmz->dev->flags & DMZ_BDEV_DYING) {
395 		ret = -EIO;
396 		goto out;
397 	}
398 
399 	/*
400 	 * Get the data zone mapping the chunk. There may be no
401 	 * mapping for read and discard. If a mapping is obtained,
402 	 + the zone returned will be set to active state.
403 	 */
404 	zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
405 				     bio_op(bio));
406 	if (IS_ERR(zone)) {
407 		ret = PTR_ERR(zone);
408 		goto out;
409 	}
410 
411 	/* Process the BIO */
412 	if (zone) {
413 		dmz_activate_zone(zone);
414 		bioctx->zone = zone;
415 	}
416 
417 	switch (bio_op(bio)) {
418 	case REQ_OP_READ:
419 		ret = dmz_handle_read(dmz, zone, bio);
420 		break;
421 	case REQ_OP_WRITE:
422 		ret = dmz_handle_write(dmz, zone, bio);
423 		break;
424 	case REQ_OP_DISCARD:
425 	case REQ_OP_WRITE_ZEROES:
426 		ret = dmz_handle_discard(dmz, zone, bio);
427 		break;
428 	default:
429 		dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
430 			    bio_op(bio));
431 		ret = -EIO;
432 	}
433 
434 	/*
435 	 * Release the chunk mapping. This will check that the mapping
436 	 * is still valid, that is, that the zone used still has valid blocks.
437 	 */
438 	if (zone)
439 		dmz_put_chunk_mapping(zmd, zone);
440 out:
441 	dmz_bio_endio(bio, errno_to_blk_status(ret));
442 
443 	dmz_unlock_metadata(zmd);
444 }
445 
446 /*
447  * Increment a chunk reference counter.
448  */
dmz_get_chunk_work(struct dm_chunk_work * cw)449 static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
450 {
451 	atomic_inc(&cw->refcount);
452 }
453 
454 /*
455  * Decrement a chunk work reference count and
456  * free it if it becomes 0.
457  */
dmz_put_chunk_work(struct dm_chunk_work * cw)458 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
459 {
460 	if (atomic_dec_and_test(&cw->refcount)) {
461 		WARN_ON(!bio_list_empty(&cw->bio_list));
462 		radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
463 		kfree(cw);
464 	}
465 }
466 
467 /*
468  * Chunk BIO work function.
469  */
dmz_chunk_work(struct work_struct * work)470 static void dmz_chunk_work(struct work_struct *work)
471 {
472 	struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
473 	struct dmz_target *dmz = cw->target;
474 	struct bio *bio;
475 
476 	mutex_lock(&dmz->chunk_lock);
477 
478 	/* Process the chunk BIOs */
479 	while ((bio = bio_list_pop(&cw->bio_list))) {
480 		mutex_unlock(&dmz->chunk_lock);
481 		dmz_handle_bio(dmz, cw, bio);
482 		mutex_lock(&dmz->chunk_lock);
483 		dmz_put_chunk_work(cw);
484 	}
485 
486 	/* Queueing the work incremented the work refcount */
487 	dmz_put_chunk_work(cw);
488 
489 	mutex_unlock(&dmz->chunk_lock);
490 }
491 
492 /*
493  * Flush work.
494  */
dmz_flush_work(struct work_struct * work)495 static void dmz_flush_work(struct work_struct *work)
496 {
497 	struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
498 	struct bio *bio;
499 	int ret;
500 
501 	/* Flush dirty metadata blocks */
502 	ret = dmz_flush_metadata(dmz->metadata);
503 	if (ret)
504 		dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
505 
506 	/* Process queued flush requests */
507 	while (1) {
508 		spin_lock(&dmz->flush_lock);
509 		bio = bio_list_pop(&dmz->flush_list);
510 		spin_unlock(&dmz->flush_lock);
511 
512 		if (!bio)
513 			break;
514 
515 		dmz_bio_endio(bio, errno_to_blk_status(ret));
516 	}
517 
518 	queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
519 }
520 
521 /*
522  * Get a chunk work and start it to process a new BIO.
523  * If the BIO chunk has no work yet, create one.
524  */
dmz_queue_chunk_work(struct dmz_target * dmz,struct bio * bio)525 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
526 {
527 	unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
528 	struct dm_chunk_work *cw;
529 	int ret = 0;
530 
531 	mutex_lock(&dmz->chunk_lock);
532 
533 	/* Get the BIO chunk work. If one is not active yet, create one */
534 	cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
535 	if (!cw) {
536 
537 		/* Create a new chunk work */
538 		cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
539 		if (unlikely(!cw)) {
540 			ret = -ENOMEM;
541 			goto out;
542 		}
543 
544 		INIT_WORK(&cw->work, dmz_chunk_work);
545 		atomic_set(&cw->refcount, 0);
546 		cw->target = dmz;
547 		cw->chunk = chunk;
548 		bio_list_init(&cw->bio_list);
549 
550 		ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
551 		if (unlikely(ret)) {
552 			kfree(cw);
553 			goto out;
554 		}
555 	}
556 
557 	bio_list_add(&cw->bio_list, bio);
558 	dmz_get_chunk_work(cw);
559 
560 	dmz_reclaim_bio_acc(dmz->reclaim);
561 	if (queue_work(dmz->chunk_wq, &cw->work))
562 		dmz_get_chunk_work(cw);
563 out:
564 	mutex_unlock(&dmz->chunk_lock);
565 	return ret;
566 }
567 
568 /*
569  * Check if the backing device is being removed. If it's on the way out,
570  * start failing I/O. Reclaim and metadata components also call this
571  * function to cleanly abort operation in the event of such failure.
572  */
dmz_bdev_is_dying(struct dmz_dev * dmz_dev)573 bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
574 {
575 	if (dmz_dev->flags & DMZ_BDEV_DYING)
576 		return true;
577 
578 	if (dmz_dev->flags & DMZ_CHECK_BDEV)
579 		return !dmz_check_bdev(dmz_dev);
580 
581 	if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
582 		dmz_dev_warn(dmz_dev, "Backing device queue dying");
583 		dmz_dev->flags |= DMZ_BDEV_DYING;
584 	}
585 
586 	return dmz_dev->flags & DMZ_BDEV_DYING;
587 }
588 
589 /*
590  * Check the backing device availability. This detects such events as
591  * backing device going offline due to errors, media removals, etc.
592  * This check is less efficient than dmz_bdev_is_dying() and should
593  * only be performed as a part of error handling.
594  */
dmz_check_bdev(struct dmz_dev * dmz_dev)595 bool dmz_check_bdev(struct dmz_dev *dmz_dev)
596 {
597 	struct gendisk *disk;
598 
599 	dmz_dev->flags &= ~DMZ_CHECK_BDEV;
600 
601 	if (dmz_bdev_is_dying(dmz_dev))
602 		return false;
603 
604 	disk = dmz_dev->bdev->bd_disk;
605 	if (disk->fops->check_events &&
606 	    disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
607 		dmz_dev_warn(dmz_dev, "Backing device offline");
608 		dmz_dev->flags |= DMZ_BDEV_DYING;
609 	}
610 
611 	return !(dmz_dev->flags & DMZ_BDEV_DYING);
612 }
613 
614 /*
615  * Process a new BIO.
616  */
dmz_map(struct dm_target * ti,struct bio * bio)617 static int dmz_map(struct dm_target *ti, struct bio *bio)
618 {
619 	struct dmz_target *dmz = ti->private;
620 	struct dmz_dev *dev = dmz->dev;
621 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
622 	sector_t sector = bio->bi_iter.bi_sector;
623 	unsigned int nr_sectors = bio_sectors(bio);
624 	sector_t chunk_sector;
625 	int ret;
626 
627 	if (dmz_bdev_is_dying(dmz->dev))
628 		return DM_MAPIO_KILL;
629 
630 	dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
631 		      bio_op(bio), (unsigned long long)sector, nr_sectors,
632 		      (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
633 		      (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
634 		      (unsigned int)dmz_bio_blocks(bio));
635 
636 	bio_set_dev(bio, dev->bdev);
637 
638 	if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
639 		return DM_MAPIO_REMAPPED;
640 
641 	/* The BIO should be block aligned */
642 	if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
643 		return DM_MAPIO_KILL;
644 
645 	/* Initialize the BIO context */
646 	bioctx->target = dmz;
647 	bioctx->zone = NULL;
648 	bioctx->bio = bio;
649 	atomic_set(&bioctx->ref, 1);
650 
651 	/* Set the BIO pending in the flush list */
652 	if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
653 		spin_lock(&dmz->flush_lock);
654 		bio_list_add(&dmz->flush_list, bio);
655 		spin_unlock(&dmz->flush_lock);
656 		mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
657 		return DM_MAPIO_SUBMITTED;
658 	}
659 
660 	/* Split zone BIOs to fit entirely into a zone */
661 	chunk_sector = sector & (dev->zone_nr_sectors - 1);
662 	if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
663 		dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
664 
665 	/* Now ready to handle this BIO */
666 	ret = dmz_queue_chunk_work(dmz, bio);
667 	if (ret) {
668 		dmz_dev_debug(dmz->dev,
669 			      "BIO op %d, can't process chunk %llu, err %i\n",
670 			      bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
671 			      ret);
672 		return DM_MAPIO_REQUEUE;
673 	}
674 
675 	return DM_MAPIO_SUBMITTED;
676 }
677 
678 /*
679  * Get zoned device information.
680  */
dmz_get_zoned_device(struct dm_target * ti,char * path)681 static int dmz_get_zoned_device(struct dm_target *ti, char *path)
682 {
683 	struct dmz_target *dmz = ti->private;
684 	struct request_queue *q;
685 	struct dmz_dev *dev;
686 	sector_t aligned_capacity;
687 	int ret;
688 
689 	/* Get the target device */
690 	ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
691 	if (ret) {
692 		ti->error = "Get target device failed";
693 		dmz->ddev = NULL;
694 		return ret;
695 	}
696 
697 	dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
698 	if (!dev) {
699 		ret = -ENOMEM;
700 		goto err;
701 	}
702 
703 	dev->bdev = dmz->ddev->bdev;
704 	(void)bdevname(dev->bdev, dev->name);
705 
706 	if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
707 		ti->error = "Not a zoned block device";
708 		ret = -EINVAL;
709 		goto err;
710 	}
711 
712 	q = bdev_get_queue(dev->bdev);
713 	dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
714 	aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
715 	if (ti->begin ||
716 	    ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
717 		ti->error = "Partial mapping not supported";
718 		ret = -EINVAL;
719 		goto err;
720 	}
721 
722 	dev->zone_nr_sectors = blk_queue_zone_sectors(q);
723 	dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
724 
725 	dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
726 	dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
727 
728 	dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
729 		>> dev->zone_nr_sectors_shift;
730 
731 	dmz->dev = dev;
732 
733 	return 0;
734 err:
735 	dm_put_device(ti, dmz->ddev);
736 	kfree(dev);
737 
738 	return ret;
739 }
740 
741 /*
742  * Cleanup zoned device information.
743  */
dmz_put_zoned_device(struct dm_target * ti)744 static void dmz_put_zoned_device(struct dm_target *ti)
745 {
746 	struct dmz_target *dmz = ti->private;
747 
748 	dm_put_device(ti, dmz->ddev);
749 	kfree(dmz->dev);
750 	dmz->dev = NULL;
751 }
752 
753 /*
754  * Setup target.
755  */
dmz_ctr(struct dm_target * ti,unsigned int argc,char ** argv)756 static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
757 {
758 	struct dmz_target *dmz;
759 	struct dmz_dev *dev;
760 	int ret;
761 
762 	/* Check arguments */
763 	if (argc != 1) {
764 		ti->error = "Invalid argument count";
765 		return -EINVAL;
766 	}
767 
768 	/* Allocate and initialize the target descriptor */
769 	dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
770 	if (!dmz) {
771 		ti->error = "Unable to allocate the zoned target descriptor";
772 		return -ENOMEM;
773 	}
774 	ti->private = dmz;
775 
776 	/* Get the target zoned block device */
777 	ret = dmz_get_zoned_device(ti, argv[0]);
778 	if (ret) {
779 		dmz->ddev = NULL;
780 		goto err;
781 	}
782 
783 	/* Initialize metadata */
784 	dev = dmz->dev;
785 	ret = dmz_ctr_metadata(dev, &dmz->metadata);
786 	if (ret) {
787 		ti->error = "Metadata initialization failed";
788 		goto err_dev;
789 	}
790 
791 	/* Set target (no write same support) */
792 	ti->max_io_len = dev->zone_nr_sectors << 9;
793 	ti->num_flush_bios = 1;
794 	ti->num_discard_bios = 1;
795 	ti->num_write_zeroes_bios = 1;
796 	ti->per_io_data_size = sizeof(struct dmz_bioctx);
797 	ti->flush_supported = true;
798 	ti->discards_supported = true;
799 	ti->split_discard_bios = true;
800 
801 	/* The exposed capacity is the number of chunks that can be mapped */
802 	ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
803 
804 	/* Zone BIO */
805 	dmz->bio_set = bioset_create(DMZ_MIN_BIOS, 0, 0);
806 	if (!dmz->bio_set) {
807 		ti->error = "Create BIO set failed";
808 		ret = -ENOMEM;
809 		goto err_meta;
810 	}
811 
812 	/* Chunk BIO work */
813 	mutex_init(&dmz->chunk_lock);
814 	INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
815 	dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
816 					0, dev->name);
817 	if (!dmz->chunk_wq) {
818 		ti->error = "Create chunk workqueue failed";
819 		ret = -ENOMEM;
820 		goto err_bio;
821 	}
822 
823 	/* Flush work */
824 	spin_lock_init(&dmz->flush_lock);
825 	bio_list_init(&dmz->flush_list);
826 	INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
827 	dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
828 						dev->name);
829 	if (!dmz->flush_wq) {
830 		ti->error = "Create flush workqueue failed";
831 		ret = -ENOMEM;
832 		goto err_cwq;
833 	}
834 	mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
835 
836 	/* Initialize reclaim */
837 	ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
838 	if (ret) {
839 		ti->error = "Zone reclaim initialization failed";
840 		goto err_fwq;
841 	}
842 
843 	dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
844 		     (unsigned long long)ti->len,
845 		     (unsigned long long)dmz_sect2blk(ti->len));
846 
847 	return 0;
848 err_fwq:
849 	destroy_workqueue(dmz->flush_wq);
850 err_cwq:
851 	destroy_workqueue(dmz->chunk_wq);
852 err_bio:
853 	bioset_free(dmz->bio_set);
854 err_meta:
855 	dmz_dtr_metadata(dmz->metadata);
856 err_dev:
857 	dmz_put_zoned_device(ti);
858 err:
859 	kfree(dmz);
860 
861 	return ret;
862 }
863 
864 /*
865  * Cleanup target.
866  */
dmz_dtr(struct dm_target * ti)867 static void dmz_dtr(struct dm_target *ti)
868 {
869 	struct dmz_target *dmz = ti->private;
870 
871 	flush_workqueue(dmz->chunk_wq);
872 	destroy_workqueue(dmz->chunk_wq);
873 
874 	dmz_dtr_reclaim(dmz->reclaim);
875 
876 	cancel_delayed_work_sync(&dmz->flush_work);
877 	destroy_workqueue(dmz->flush_wq);
878 
879 	(void) dmz_flush_metadata(dmz->metadata);
880 
881 	dmz_dtr_metadata(dmz->metadata);
882 
883 	bioset_free(dmz->bio_set);
884 
885 	dmz_put_zoned_device(ti);
886 
887 	kfree(dmz);
888 }
889 
890 /*
891  * Setup target request queue limits.
892  */
dmz_io_hints(struct dm_target * ti,struct queue_limits * limits)893 static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
894 {
895 	struct dmz_target *dmz = ti->private;
896 	unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
897 
898 	limits->logical_block_size = DMZ_BLOCK_SIZE;
899 	limits->physical_block_size = DMZ_BLOCK_SIZE;
900 
901 	blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
902 	blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
903 
904 	limits->discard_alignment = DMZ_BLOCK_SIZE;
905 	limits->discard_granularity = DMZ_BLOCK_SIZE;
906 	limits->max_discard_sectors = chunk_sectors;
907 	limits->max_hw_discard_sectors = chunk_sectors;
908 	limits->max_write_zeroes_sectors = chunk_sectors;
909 
910 	/* FS hint to try to align to the device zone size */
911 	limits->chunk_sectors = chunk_sectors;
912 	limits->max_sectors = chunk_sectors;
913 
914 	/* We are exposing a drive-managed zoned block device */
915 	limits->zoned = BLK_ZONED_NONE;
916 }
917 
918 /*
919  * Pass on ioctl to the backend device.
920  */
dmz_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev,fmode_t * mode)921 static int dmz_prepare_ioctl(struct dm_target *ti,
922 			     struct block_device **bdev, fmode_t *mode)
923 {
924 	struct dmz_target *dmz = ti->private;
925 
926 	if (!dmz_check_bdev(dmz->dev))
927 		return -EIO;
928 
929 	*bdev = dmz->dev->bdev;
930 
931 	return 0;
932 }
933 
934 /*
935  * Stop works on suspend.
936  */
dmz_suspend(struct dm_target * ti)937 static void dmz_suspend(struct dm_target *ti)
938 {
939 	struct dmz_target *dmz = ti->private;
940 
941 	flush_workqueue(dmz->chunk_wq);
942 	dmz_suspend_reclaim(dmz->reclaim);
943 	cancel_delayed_work_sync(&dmz->flush_work);
944 }
945 
946 /*
947  * Restart works on resume or if suspend failed.
948  */
dmz_resume(struct dm_target * ti)949 static void dmz_resume(struct dm_target *ti)
950 {
951 	struct dmz_target *dmz = ti->private;
952 
953 	queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
954 	dmz_resume_reclaim(dmz->reclaim);
955 }
956 
dmz_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)957 static int dmz_iterate_devices(struct dm_target *ti,
958 			       iterate_devices_callout_fn fn, void *data)
959 {
960 	struct dmz_target *dmz = ti->private;
961 	struct dmz_dev *dev = dmz->dev;
962 	sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
963 
964 	return fn(ti, dmz->ddev, 0, capacity, data);
965 }
966 
967 static struct target_type dmz_type = {
968 	.name		 = "zoned",
969 	.version	 = {1, 0, 0},
970 	.features	 = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
971 	.module		 = THIS_MODULE,
972 	.ctr		 = dmz_ctr,
973 	.dtr		 = dmz_dtr,
974 	.map		 = dmz_map,
975 	.io_hints	 = dmz_io_hints,
976 	.prepare_ioctl	 = dmz_prepare_ioctl,
977 	.postsuspend	 = dmz_suspend,
978 	.resume		 = dmz_resume,
979 	.iterate_devices = dmz_iterate_devices,
980 };
981 
dmz_init(void)982 static int __init dmz_init(void)
983 {
984 	return dm_register_target(&dmz_type);
985 }
986 
dmz_exit(void)987 static void __exit dmz_exit(void)
988 {
989 	dm_unregister_target(&dmz_type);
990 }
991 
992 module_init(dmz_init);
993 module_exit(dmz_exit);
994 
995 MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
996 MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
997 MODULE_LICENSE("GPL");
998