1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/mm.h>
8 #include <linux/sched/mm.h>
9 #include <linux/slab.h>
10 #include <linux/bitmap.h>
11
12 #include "dm-core.h"
13
14 #define DM_MSG_PREFIX "zone"
15
16 #define DM_ZONE_INVALID_WP_OFST UINT_MAX
17
18 /*
19 * For internal zone reports bypassing the top BIO submission path.
20 */
dm_blk_do_report_zones(struct mapped_device * md,struct dm_table * t,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)21 static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t,
22 sector_t sector, unsigned int nr_zones,
23 report_zones_cb cb, void *data)
24 {
25 struct gendisk *disk = md->disk;
26 int ret;
27 struct dm_report_zones_args args = {
28 .next_sector = sector,
29 .orig_data = data,
30 .orig_cb = cb,
31 };
32
33 do {
34 struct dm_target *tgt;
35
36 tgt = dm_table_find_target(t, args.next_sector);
37 if (WARN_ON_ONCE(!tgt->type->report_zones))
38 return -EIO;
39
40 args.tgt = tgt;
41 ret = tgt->type->report_zones(tgt, &args,
42 nr_zones - args.zone_idx);
43 if (ret < 0)
44 return ret;
45 } while (args.zone_idx < nr_zones &&
46 args.next_sector < get_capacity(disk));
47
48 return args.zone_idx;
49 }
50
51 /*
52 * User facing dm device block device report zone operation. This calls the
53 * report_zones operation for each target of a device table. This operation is
54 * generally implemented by targets using dm_report_zones().
55 */
dm_blk_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)56 int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
57 unsigned int nr_zones, report_zones_cb cb, void *data)
58 {
59 struct mapped_device *md = disk->private_data;
60 struct dm_table *map;
61 int srcu_idx, ret;
62
63 if (dm_suspended_md(md))
64 return -EAGAIN;
65
66 map = dm_get_live_table(md, &srcu_idx);
67 if (!map)
68 return -EIO;
69
70 ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
71
72 dm_put_live_table(md, srcu_idx);
73
74 return ret;
75 }
76
dm_report_zones_cb(struct blk_zone * zone,unsigned int idx,void * data)77 static int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx,
78 void *data)
79 {
80 struct dm_report_zones_args *args = data;
81 sector_t sector_diff = args->tgt->begin - args->start;
82
83 /*
84 * Ignore zones beyond the target range.
85 */
86 if (zone->start >= args->start + args->tgt->len)
87 return 0;
88
89 /*
90 * Remap the start sector and write pointer position of the zone
91 * to match its position in the target range.
92 */
93 zone->start += sector_diff;
94 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
95 if (zone->cond == BLK_ZONE_COND_FULL)
96 zone->wp = zone->start + zone->len;
97 else if (zone->cond == BLK_ZONE_COND_EMPTY)
98 zone->wp = zone->start;
99 else
100 zone->wp += sector_diff;
101 }
102
103 args->next_sector = zone->start + zone->len;
104 return args->orig_cb(zone, args->zone_idx++, args->orig_data);
105 }
106
107 /*
108 * Helper for drivers of zoned targets to implement struct target_type
109 * report_zones operation.
110 */
dm_report_zones(struct block_device * bdev,sector_t start,sector_t sector,struct dm_report_zones_args * args,unsigned int nr_zones)111 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
112 struct dm_report_zones_args *args, unsigned int nr_zones)
113 {
114 /*
115 * Set the target mapping start sector first so that
116 * dm_report_zones_cb() can correctly remap zone information.
117 */
118 args->start = start;
119
120 return blkdev_report_zones(bdev, sector, nr_zones,
121 dm_report_zones_cb, args);
122 }
123 EXPORT_SYMBOL_GPL(dm_report_zones);
124
dm_is_zone_write(struct mapped_device * md,struct bio * bio)125 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
126 {
127 struct request_queue *q = md->queue;
128
129 if (!blk_queue_is_zoned(q))
130 return false;
131
132 switch (bio_op(bio)) {
133 case REQ_OP_WRITE_ZEROES:
134 case REQ_OP_WRITE:
135 return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
136 default:
137 return false;
138 }
139 }
140
dm_cleanup_zoned_dev(struct mapped_device * md)141 void dm_cleanup_zoned_dev(struct mapped_device *md)
142 {
143 if (md->disk) {
144 bitmap_free(md->disk->conv_zones_bitmap);
145 md->disk->conv_zones_bitmap = NULL;
146 bitmap_free(md->disk->seq_zones_wlock);
147 md->disk->seq_zones_wlock = NULL;
148 }
149
150 kvfree(md->zwp_offset);
151 md->zwp_offset = NULL;
152 md->nr_zones = 0;
153 }
154
dm_get_zone_wp_offset(struct blk_zone * zone)155 static unsigned int dm_get_zone_wp_offset(struct blk_zone *zone)
156 {
157 switch (zone->cond) {
158 case BLK_ZONE_COND_IMP_OPEN:
159 case BLK_ZONE_COND_EXP_OPEN:
160 case BLK_ZONE_COND_CLOSED:
161 return zone->wp - zone->start;
162 case BLK_ZONE_COND_FULL:
163 return zone->len;
164 case BLK_ZONE_COND_EMPTY:
165 case BLK_ZONE_COND_NOT_WP:
166 case BLK_ZONE_COND_OFFLINE:
167 case BLK_ZONE_COND_READONLY:
168 default:
169 /*
170 * Conventional, offline and read-only zones do not have a valid
171 * write pointer. Use 0 as for an empty zone.
172 */
173 return 0;
174 }
175 }
176
dm_zone_revalidate_cb(struct blk_zone * zone,unsigned int idx,void * data)177 static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
178 void *data)
179 {
180 struct mapped_device *md = data;
181 struct gendisk *disk = md->disk;
182
183 switch (zone->type) {
184 case BLK_ZONE_TYPE_CONVENTIONAL:
185 if (!disk->conv_zones_bitmap) {
186 disk->conv_zones_bitmap = bitmap_zalloc(disk->nr_zones,
187 GFP_NOIO);
188 if (!disk->conv_zones_bitmap)
189 return -ENOMEM;
190 }
191 set_bit(idx, disk->conv_zones_bitmap);
192 break;
193 case BLK_ZONE_TYPE_SEQWRITE_REQ:
194 case BLK_ZONE_TYPE_SEQWRITE_PREF:
195 if (!disk->seq_zones_wlock) {
196 disk->seq_zones_wlock = bitmap_zalloc(disk->nr_zones,
197 GFP_NOIO);
198 if (!disk->seq_zones_wlock)
199 return -ENOMEM;
200 }
201 if (!md->zwp_offset) {
202 md->zwp_offset =
203 kvcalloc(disk->nr_zones, sizeof(unsigned int),
204 GFP_KERNEL);
205 if (!md->zwp_offset)
206 return -ENOMEM;
207 }
208 md->zwp_offset[idx] = dm_get_zone_wp_offset(zone);
209
210 break;
211 default:
212 DMERR("Invalid zone type 0x%x at sectors %llu",
213 (int)zone->type, zone->start);
214 return -ENODEV;
215 }
216
217 return 0;
218 }
219
220 /*
221 * Revalidate the zones of a mapped device to initialize resource necessary
222 * for zone append emulation. Note that we cannot simply use the block layer
223 * blk_revalidate_disk_zones() function here as the mapped device is suspended
224 * (this is called from __bind() context).
225 */
dm_revalidate_zones(struct mapped_device * md,struct dm_table * t)226 static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
227 {
228 struct gendisk *disk = md->disk;
229 unsigned int noio_flag;
230 int ret;
231
232 /*
233 * Check if something changed. If yes, cleanup the current resources
234 * and reallocate everything.
235 */
236 if (!disk->nr_zones || disk->nr_zones != md->nr_zones)
237 dm_cleanup_zoned_dev(md);
238 if (md->nr_zones)
239 return 0;
240
241 /*
242 * Scan all zones to initialize everything. Ensure that all vmalloc
243 * operations in this context are done as if GFP_NOIO was specified.
244 */
245 noio_flag = memalloc_noio_save();
246 ret = dm_blk_do_report_zones(md, t, 0, disk->nr_zones,
247 dm_zone_revalidate_cb, md);
248 memalloc_noio_restore(noio_flag);
249 if (ret < 0)
250 goto err;
251 if (ret != disk->nr_zones) {
252 ret = -EIO;
253 goto err;
254 }
255
256 md->nr_zones = disk->nr_zones;
257
258 return 0;
259
260 err:
261 DMERR("Revalidate zones failed %d", ret);
262 dm_cleanup_zoned_dev(md);
263 return ret;
264 }
265
device_not_zone_append_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)266 static int device_not_zone_append_capable(struct dm_target *ti,
267 struct dm_dev *dev, sector_t start,
268 sector_t len, void *data)
269 {
270 return !bdev_is_zoned(dev->bdev);
271 }
272
dm_table_supports_zone_append(struct dm_table * t)273 static bool dm_table_supports_zone_append(struct dm_table *t)
274 {
275 for (unsigned int i = 0; i < t->num_targets; i++) {
276 struct dm_target *ti = dm_table_get_target(t, i);
277
278 if (ti->emulate_zone_append)
279 return false;
280
281 if (!ti->type->iterate_devices ||
282 ti->type->iterate_devices(ti, device_not_zone_append_capable, NULL))
283 return false;
284 }
285
286 return true;
287 }
288
dm_set_zones_restrictions(struct dm_table * t,struct request_queue * q)289 int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
290 {
291 struct mapped_device *md = t->md;
292
293 /*
294 * For a zoned target, the number of zones should be updated for the
295 * correct value to be exposed in sysfs queue/nr_zones.
296 */
297 WARN_ON_ONCE(queue_is_mq(q));
298 md->disk->nr_zones = bdev_nr_zones(md->disk->part0);
299
300 /* Check if zone append is natively supported */
301 if (dm_table_supports_zone_append(t)) {
302 clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
303 dm_cleanup_zoned_dev(md);
304 return 0;
305 }
306
307 /*
308 * Mark the mapped device as needing zone append emulation and
309 * initialize the emulation resources once the capacity is set.
310 */
311 set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
312 if (!get_capacity(md->disk))
313 return 0;
314
315 return dm_revalidate_zones(md, t);
316 }
317
dm_update_zone_wp_offset_cb(struct blk_zone * zone,unsigned int idx,void * data)318 static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
319 void *data)
320 {
321 unsigned int *wp_offset = data;
322
323 *wp_offset = dm_get_zone_wp_offset(zone);
324
325 return 0;
326 }
327
dm_update_zone_wp_offset(struct mapped_device * md,unsigned int zno,unsigned int * wp_ofst)328 static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
329 unsigned int *wp_ofst)
330 {
331 sector_t sector = zno * bdev_zone_sectors(md->disk->part0);
332 unsigned int noio_flag;
333 struct dm_table *t;
334 int srcu_idx, ret;
335
336 t = dm_get_live_table(md, &srcu_idx);
337 if (!t)
338 return -EIO;
339
340 /*
341 * Ensure that all memory allocations in this context are done as if
342 * GFP_NOIO was specified.
343 */
344 noio_flag = memalloc_noio_save();
345 ret = dm_blk_do_report_zones(md, t, sector, 1,
346 dm_update_zone_wp_offset_cb, wp_ofst);
347 memalloc_noio_restore(noio_flag);
348
349 dm_put_live_table(md, srcu_idx);
350
351 if (ret != 1)
352 return -EIO;
353
354 return 0;
355 }
356
357 struct orig_bio_details {
358 enum req_op op;
359 unsigned int nr_sectors;
360 };
361
362 /*
363 * First phase of BIO mapping for targets with zone append emulation:
364 * check all BIO that change a zone writer pointer and change zone
365 * append operations into regular write operations.
366 */
dm_zone_map_bio_begin(struct mapped_device * md,unsigned int zno,struct bio * clone)367 static bool dm_zone_map_bio_begin(struct mapped_device *md,
368 unsigned int zno, struct bio *clone)
369 {
370 sector_t zsectors = bdev_zone_sectors(md->disk->part0);
371 unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
372
373 /*
374 * If the target zone is in an error state, recover by inspecting the
375 * zone to get its current write pointer position. Note that since the
376 * target zone is already locked, a BIO issuing context should never
377 * see the zone write in the DM_ZONE_UPDATING_WP_OFST state.
378 */
379 if (zwp_offset == DM_ZONE_INVALID_WP_OFST) {
380 if (dm_update_zone_wp_offset(md, zno, &zwp_offset))
381 return false;
382 WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
383 }
384
385 switch (bio_op(clone)) {
386 case REQ_OP_ZONE_RESET:
387 case REQ_OP_ZONE_FINISH:
388 return true;
389 case REQ_OP_WRITE_ZEROES:
390 case REQ_OP_WRITE:
391 /* Writes must be aligned to the zone write pointer */
392 if (bdev_offset_from_zone_start(md->disk->part0,
393 clone->bi_iter.bi_sector) != zwp_offset)
394 return false;
395 break;
396 case REQ_OP_ZONE_APPEND:
397 /*
398 * Change zone append operations into a non-mergeable regular
399 * writes directed at the current write pointer position of the
400 * target zone.
401 */
402 clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
403 (clone->bi_opf & (~REQ_OP_MASK));
404 clone->bi_iter.bi_sector += zwp_offset;
405 break;
406 default:
407 DMWARN_LIMIT("Invalid BIO operation");
408 return false;
409 }
410
411 /* Cannot write to a full zone */
412 if (zwp_offset >= zsectors)
413 return false;
414
415 return true;
416 }
417
418 /*
419 * Second phase of BIO mapping for targets with zone append emulation:
420 * update the zone write pointer offset array to account for the additional
421 * data written to a zone. Note that at this point, the remapped clone BIO
422 * may already have completed, so we do not touch it.
423 */
dm_zone_map_bio_end(struct mapped_device * md,unsigned int zno,struct orig_bio_details * orig_bio_details,unsigned int nr_sectors)424 static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
425 struct orig_bio_details *orig_bio_details,
426 unsigned int nr_sectors)
427 {
428 unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
429
430 /* The clone BIO may already have been completed and failed */
431 if (zwp_offset == DM_ZONE_INVALID_WP_OFST)
432 return BLK_STS_IOERR;
433
434 /* Update the zone wp offset */
435 switch (orig_bio_details->op) {
436 case REQ_OP_ZONE_RESET:
437 WRITE_ONCE(md->zwp_offset[zno], 0);
438 return BLK_STS_OK;
439 case REQ_OP_ZONE_FINISH:
440 WRITE_ONCE(md->zwp_offset[zno],
441 bdev_zone_sectors(md->disk->part0));
442 return BLK_STS_OK;
443 case REQ_OP_WRITE_ZEROES:
444 case REQ_OP_WRITE:
445 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
446 return BLK_STS_OK;
447 case REQ_OP_ZONE_APPEND:
448 /*
449 * Check that the target did not truncate the write operation
450 * emulating a zone append.
451 */
452 if (nr_sectors != orig_bio_details->nr_sectors) {
453 DMWARN_LIMIT("Truncated write for zone append");
454 return BLK_STS_IOERR;
455 }
456 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
457 return BLK_STS_OK;
458 default:
459 DMWARN_LIMIT("Invalid BIO operation");
460 return BLK_STS_IOERR;
461 }
462 }
463
dm_zone_lock(struct gendisk * disk,unsigned int zno,struct bio * clone)464 static inline void dm_zone_lock(struct gendisk *disk, unsigned int zno,
465 struct bio *clone)
466 {
467 if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)))
468 return;
469
470 wait_on_bit_lock_io(disk->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
471 bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED);
472 }
473
dm_zone_unlock(struct gendisk * disk,unsigned int zno,struct bio * clone)474 static inline void dm_zone_unlock(struct gendisk *disk, unsigned int zno,
475 struct bio *clone)
476 {
477 if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
478 return;
479
480 WARN_ON_ONCE(!test_bit(zno, disk->seq_zones_wlock));
481 clear_bit_unlock(zno, disk->seq_zones_wlock);
482 smp_mb__after_atomic();
483 wake_up_bit(disk->seq_zones_wlock, zno);
484
485 bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
486 }
487
dm_need_zone_wp_tracking(struct bio * bio)488 static bool dm_need_zone_wp_tracking(struct bio *bio)
489 {
490 /*
491 * Special processing is not needed for operations that do not need the
492 * zone write lock, that is, all operations that target conventional
493 * zones and all operations that do not modify directly a sequential
494 * zone write pointer.
495 */
496 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
497 return false;
498 switch (bio_op(bio)) {
499 case REQ_OP_WRITE_ZEROES:
500 case REQ_OP_WRITE:
501 case REQ_OP_ZONE_RESET:
502 case REQ_OP_ZONE_FINISH:
503 case REQ_OP_ZONE_APPEND:
504 return bio_zone_is_seq(bio);
505 default:
506 return false;
507 }
508 }
509
510 /*
511 * Special IO mapping for targets needing zone append emulation.
512 */
dm_zone_map_bio(struct dm_target_io * tio)513 int dm_zone_map_bio(struct dm_target_io *tio)
514 {
515 struct dm_io *io = tio->io;
516 struct dm_target *ti = tio->ti;
517 struct mapped_device *md = io->md;
518 struct bio *clone = &tio->clone;
519 struct orig_bio_details orig_bio_details;
520 unsigned int zno;
521 blk_status_t sts;
522 int r;
523
524 /*
525 * IOs that do not change a zone write pointer do not need
526 * any additional special processing.
527 */
528 if (!dm_need_zone_wp_tracking(clone))
529 return ti->type->map(ti, clone);
530
531 /* Lock the target zone */
532 zno = bio_zone_no(clone);
533 dm_zone_lock(md->disk, zno, clone);
534
535 orig_bio_details.nr_sectors = bio_sectors(clone);
536 orig_bio_details.op = bio_op(clone);
537
538 /*
539 * Check that the bio and the target zone write pointer offset are
540 * both valid, and if the bio is a zone append, remap it to a write.
541 */
542 if (!dm_zone_map_bio_begin(md, zno, clone)) {
543 dm_zone_unlock(md->disk, zno, clone);
544 return DM_MAPIO_KILL;
545 }
546
547 /* Let the target do its work */
548 r = ti->type->map(ti, clone);
549 switch (r) {
550 case DM_MAPIO_SUBMITTED:
551 /*
552 * The target submitted the clone BIO. The target zone will
553 * be unlocked on completion of the clone.
554 */
555 sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
556 *tio->len_ptr);
557 break;
558 case DM_MAPIO_REMAPPED:
559 /*
560 * The target only remapped the clone BIO. In case of error,
561 * unlock the target zone here as the clone will not be
562 * submitted.
563 */
564 sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
565 *tio->len_ptr);
566 if (sts != BLK_STS_OK)
567 dm_zone_unlock(md->disk, zno, clone);
568 break;
569 case DM_MAPIO_REQUEUE:
570 case DM_MAPIO_KILL:
571 default:
572 dm_zone_unlock(md->disk, zno, clone);
573 sts = BLK_STS_IOERR;
574 break;
575 }
576
577 if (sts != BLK_STS_OK)
578 return DM_MAPIO_KILL;
579
580 return r;
581 }
582
583 /*
584 * IO completion callback called from clone_endio().
585 */
dm_zone_endio(struct dm_io * io,struct bio * clone)586 void dm_zone_endio(struct dm_io *io, struct bio *clone)
587 {
588 struct mapped_device *md = io->md;
589 struct gendisk *disk = md->disk;
590 struct bio *orig_bio = io->orig_bio;
591 unsigned int zwp_offset;
592 unsigned int zno;
593
594 /*
595 * For targets that do not emulate zone append, we only need to
596 * handle native zone-append bios.
597 */
598 if (!dm_emulate_zone_append(md)) {
599 /*
600 * Get the offset within the zone of the written sector
601 * and add that to the original bio sector position.
602 */
603 if (clone->bi_status == BLK_STS_OK &&
604 bio_op(clone) == REQ_OP_ZONE_APPEND) {
605 orig_bio->bi_iter.bi_sector +=
606 bdev_offset_from_zone_start(disk->part0, clone->bi_iter.bi_sector);
607 }
608
609 return;
610 }
611
612 /*
613 * For targets that do emulate zone append, if the clone BIO does not
614 * own the target zone write lock, we have nothing to do.
615 */
616 if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
617 return;
618
619 zno = bio_zone_no(orig_bio);
620
621 if (clone->bi_status != BLK_STS_OK) {
622 /*
623 * BIOs that modify a zone write pointer may leave the zone
624 * in an unknown state in case of failure (e.g. the write
625 * pointer was only partially advanced). In this case, set
626 * the target zone write pointer as invalid unless it is
627 * already being updated.
628 */
629 WRITE_ONCE(md->zwp_offset[zno], DM_ZONE_INVALID_WP_OFST);
630 } else if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
631 /*
632 * Get the written sector for zone append operation that were
633 * emulated using regular write operations.
634 */
635 zwp_offset = READ_ONCE(md->zwp_offset[zno]);
636 if (WARN_ON_ONCE(zwp_offset < bio_sectors(orig_bio)))
637 WRITE_ONCE(md->zwp_offset[zno],
638 DM_ZONE_INVALID_WP_OFST);
639 else
640 orig_bio->bi_iter.bi_sector +=
641 zwp_offset - bio_sectors(orig_bio);
642 }
643
644 dm_zone_unlock(disk, zno, clone);
645 }
646