1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * SCSI Zoned Block commands
4 *
5 * Copyright (C) 2014-2015 SUSE Linux GmbH
6 * Written by: Hannes Reinecke <hare@suse.de>
7 * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
8 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
9 */
10
11 #include <linux/blkdev.h>
12 #include <linux/vmalloc.h>
13 #include <linux/sched/mm.h>
14 #include <linux/mutex.h>
15
16 #include <asm/unaligned.h>
17
18 #include <scsi/scsi.h>
19 #include <scsi/scsi_cmnd.h>
20
21 #include "sd.h"
22
23 /**
24 * sd_zbc_get_zone_wp_offset - Get zone write pointer offset.
25 * @zone: Zone for which to return the write pointer offset.
26 *
27 * Return: offset of the write pointer from the start of the zone.
28 */
sd_zbc_get_zone_wp_offset(struct blk_zone * zone)29 static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
30 {
31 if (zone->type == ZBC_ZONE_TYPE_CONV)
32 return 0;
33
34 switch (zone->cond) {
35 case BLK_ZONE_COND_IMP_OPEN:
36 case BLK_ZONE_COND_EXP_OPEN:
37 case BLK_ZONE_COND_CLOSED:
38 return zone->wp - zone->start;
39 case BLK_ZONE_COND_FULL:
40 return zone->len;
41 case BLK_ZONE_COND_EMPTY:
42 case BLK_ZONE_COND_OFFLINE:
43 case BLK_ZONE_COND_READONLY:
44 default:
45 /*
46 * Offline and read-only zones do not have a valid
47 * write pointer. Use 0 as for an empty zone.
48 */
49 return 0;
50 }
51 }
52
53 /* Whether or not a SCSI zone descriptor describes a gap zone. */
sd_zbc_is_gap_zone(const u8 buf[64])54 static bool sd_zbc_is_gap_zone(const u8 buf[64])
55 {
56 return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP;
57 }
58
59 /**
60 * sd_zbc_parse_report - Parse a SCSI zone descriptor
61 * @sdkp: SCSI disk pointer.
62 * @buf: SCSI zone descriptor.
63 * @idx: Index of the zone relative to the first zone reported by the current
64 * sd_zbc_report_zones() call.
65 * @cb: Callback function pointer.
66 * @data: Second argument passed to @cb.
67 *
68 * Return: Value returned by @cb.
69 *
70 * Convert a SCSI zone descriptor into struct blk_zone format. Additionally,
71 * call @cb(blk_zone, @data).
72 */
sd_zbc_parse_report(struct scsi_disk * sdkp,const u8 buf[64],unsigned int idx,report_zones_cb cb,void * data)73 static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
74 unsigned int idx, report_zones_cb cb, void *data)
75 {
76 struct scsi_device *sdp = sdkp->device;
77 struct blk_zone zone = { 0 };
78 sector_t start_lba, gran;
79 int ret;
80
81 if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf)))
82 return -EINVAL;
83
84 zone.type = buf[0] & 0x0f;
85 zone.cond = (buf[1] >> 4) & 0xf;
86 if (buf[1] & 0x01)
87 zone.reset = 1;
88 if (buf[1] & 0x02)
89 zone.non_seq = 1;
90
91 start_lba = get_unaligned_be64(&buf[16]);
92 zone.start = logical_to_sectors(sdp, start_lba);
93 zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
94 zone.len = zone.capacity;
95 if (sdkp->zone_starting_lba_gran) {
96 gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran);
97 if (zone.len > gran) {
98 sd_printk(KERN_ERR, sdkp,
99 "Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n",
100 start_lba,
101 sectors_to_logical(sdp, zone.capacity),
102 sectors_to_logical(sdp, zone.len),
103 sectors_to_logical(sdp, gran));
104 return -EINVAL;
105 }
106 /*
107 * Use the starting LBA granularity instead of the zone length
108 * obtained from the REPORT ZONES command.
109 */
110 zone.len = gran;
111 }
112 if (zone.cond == ZBC_ZONE_COND_FULL)
113 zone.wp = zone.start + zone.len;
114 else
115 zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
116
117 ret = cb(&zone, idx, data);
118 if (ret)
119 return ret;
120
121 if (sdkp->rev_wp_offset)
122 sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone);
123
124 return 0;
125 }
126
127 /**
128 * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
129 * @sdkp: The target disk
130 * @buf: vmalloc-ed buffer to use for the reply
131 * @buflen: the buffer size
132 * @lba: Start LBA of the report
133 * @partial: Do partial report
134 *
135 * For internal use during device validation.
136 * Using partial=true can significantly speed up execution of a report zones
137 * command because the disk does not have to count all possible report matching
138 * zones and will only report the count of zones fitting in the command reply
139 * buffer.
140 */
sd_zbc_do_report_zones(struct scsi_disk * sdkp,unsigned char * buf,unsigned int buflen,sector_t lba,bool partial)141 static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
142 unsigned int buflen, sector_t lba,
143 bool partial)
144 {
145 struct scsi_device *sdp = sdkp->device;
146 const int timeout = sdp->request_queue->rq_timeout;
147 struct scsi_sense_hdr sshdr;
148 unsigned char cmd[16];
149 unsigned int rep_len;
150 int result;
151
152 memset(cmd, 0, 16);
153 cmd[0] = ZBC_IN;
154 cmd[1] = ZI_REPORT_ZONES;
155 put_unaligned_be64(lba, &cmd[2]);
156 put_unaligned_be32(buflen, &cmd[10]);
157 if (partial)
158 cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
159
160 result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
161 buf, buflen, &sshdr,
162 timeout, SD_MAX_RETRIES, NULL);
163 if (result) {
164 sd_printk(KERN_ERR, sdkp,
165 "REPORT ZONES start lba %llu failed\n", lba);
166 sd_print_result(sdkp, "REPORT ZONES", result);
167 if (result > 0 && scsi_sense_valid(&sshdr))
168 sd_print_sense_hdr(sdkp, &sshdr);
169 return -EIO;
170 }
171
172 rep_len = get_unaligned_be32(&buf[0]);
173 if (rep_len < 64) {
174 sd_printk(KERN_ERR, sdkp,
175 "REPORT ZONES report invalid length %u\n",
176 rep_len);
177 return -EIO;
178 }
179
180 return 0;
181 }
182
183 /**
184 * sd_zbc_alloc_report_buffer() - Allocate a buffer for report zones reply.
185 * @sdkp: The target disk
186 * @nr_zones: Maximum number of zones to report
187 * @buflen: Size of the buffer allocated
188 *
189 * Try to allocate a reply buffer for the number of requested zones.
190 * The size of the buffer allocated may be smaller than requested to
191 * satify the device constraint (max_hw_sectors, max_segments, etc).
192 *
193 * Return the address of the allocated buffer and update @buflen with
194 * the size of the allocated buffer.
195 */
sd_zbc_alloc_report_buffer(struct scsi_disk * sdkp,unsigned int nr_zones,size_t * buflen)196 static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
197 unsigned int nr_zones, size_t *buflen)
198 {
199 struct request_queue *q = sdkp->disk->queue;
200 size_t bufsize;
201 void *buf;
202
203 /*
204 * Report zone buffer size should be at most 64B times the number of
205 * zones requested plus the 64B reply header, but should be aligned
206 * to SECTOR_SIZE for ATA devices.
207 * Make sure that this size does not exceed the hardware capabilities.
208 * Furthermore, since the report zone command cannot be split, make
209 * sure that the allocated buffer can always be mapped by limiting the
210 * number of pages allocated to the HBA max segments limit.
211 */
212 nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
213 bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
214 bufsize = min_t(size_t, bufsize,
215 queue_max_hw_sectors(q) << SECTOR_SHIFT);
216 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
217
218 while (bufsize >= SECTOR_SIZE) {
219 buf = __vmalloc(bufsize,
220 GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
221 if (buf) {
222 *buflen = bufsize;
223 return buf;
224 }
225 bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
226 }
227
228 return NULL;
229 }
230
231 /**
232 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
233 * @sdkp: The target disk
234 */
sd_zbc_zone_sectors(struct scsi_disk * sdkp)235 static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
236 {
237 return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks);
238 }
239
240 /**
241 * sd_zbc_report_zones - SCSI .report_zones() callback.
242 * @disk: Disk to report zones for.
243 * @sector: Start sector.
244 * @nr_zones: Maximum number of zones to report.
245 * @cb: Callback function called to report zone information.
246 * @data: Second argument passed to @cb.
247 *
248 * Called by the block layer to iterate over zone information. See also the
249 * disk->fops->report_zones() calls in block/blk-zoned.c.
250 */
sd_zbc_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)251 int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
252 unsigned int nr_zones, report_zones_cb cb, void *data)
253 {
254 struct scsi_disk *sdkp = scsi_disk(disk);
255 sector_t lba = sectors_to_logical(sdkp->device, sector);
256 unsigned int nr, i;
257 unsigned char *buf;
258 u64 zone_length, start_lba;
259 size_t offset, buflen = 0;
260 int zone_idx = 0;
261 int ret;
262
263 if (!sd_is_zoned(sdkp))
264 /* Not a zoned device */
265 return -EOPNOTSUPP;
266
267 if (!sdkp->capacity)
268 /* Device gone or invalid */
269 return -ENODEV;
270
271 buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
272 if (!buf)
273 return -ENOMEM;
274
275 while (zone_idx < nr_zones && lba < sdkp->capacity) {
276 ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true);
277 if (ret && zone_idx) {
278 sd_printk(KERN_WARNING, sdkp,
279 "ZBC violation: %llu LBAs are not associated with a zone (zone length %llu)\n",
280 sdkp->capacity - lba, zone_length);
281 sdkp->capacity = lba;
282 set_capacity_and_notify(disk,
283 logical_to_sectors(sdkp->device,
284 sdkp->capacity));
285 ret = 0;
286 break;
287 }
288 if (ret)
289 goto out;
290
291 offset = 0;
292 nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
293 if (!nr)
294 break;
295
296 for (i = 0; i < nr && zone_idx < nr_zones; i++) {
297 offset += 64;
298 start_lba = get_unaligned_be64(&buf[offset + 16]);
299 zone_length = get_unaligned_be64(&buf[offset + 8]);
300 if ((zone_idx == 0 &&
301 (lba < start_lba ||
302 lba >= start_lba + zone_length)) ||
303 (zone_idx > 0 && start_lba != lba) ||
304 start_lba + zone_length < start_lba) {
305 sd_printk(KERN_ERR, sdkp,
306 "Zone %d at LBA %llu is invalid: %llu + %llu\n",
307 zone_idx, lba, start_lba, zone_length);
308 ret = -EINVAL;
309 goto out;
310 }
311 lba = start_lba + zone_length;
312 if (sd_zbc_is_gap_zone(&buf[offset])) {
313 if (sdkp->zone_starting_lba_gran)
314 continue;
315 sd_printk(KERN_ERR, sdkp,
316 "Gap zone without constant LBA offsets\n");
317 ret = -EINVAL;
318 goto out;
319 }
320
321 ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
322 cb, data);
323 if (ret)
324 goto out;
325
326 zone_idx++;
327 }
328 }
329
330 ret = zone_idx;
331 out:
332 kvfree(buf);
333 return ret;
334 }
335
sd_zbc_cmnd_checks(struct scsi_cmnd * cmd)336 static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
337 {
338 struct request *rq = scsi_cmd_to_rq(cmd);
339 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
340 sector_t sector = blk_rq_pos(rq);
341
342 if (!sd_is_zoned(sdkp))
343 /* Not a zoned device */
344 return BLK_STS_IOERR;
345
346 if (sdkp->device->changed)
347 return BLK_STS_IOERR;
348
349 if (!bdev_is_zone_start(sdkp->disk->part0, sector))
350 /* Unaligned request */
351 return BLK_STS_IOERR;
352
353 return BLK_STS_OK;
354 }
355
356 #define SD_ZBC_INVALID_WP_OFST (~0u)
357 #define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1)
358
sd_zbc_update_wp_offset_cb(struct blk_zone * zone,unsigned int idx,void * data)359 static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
360 void *data)
361 {
362 struct scsi_disk *sdkp = data;
363
364 lockdep_assert_held(&sdkp->zones_wp_offset_lock);
365
366 sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone);
367
368 return 0;
369 }
370
371 /*
372 * An attempt to append a zone triggered an invalid write pointer error.
373 * Reread the write pointer of the zone(s) in which the append failed.
374 */
sd_zbc_update_wp_offset_workfn(struct work_struct * work)375 static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
376 {
377 struct scsi_disk *sdkp;
378 unsigned long flags;
379 sector_t zno;
380 int ret;
381
382 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
383
384 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
385 for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) {
386 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
387 continue;
388
389 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
390 ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
391 SD_BUF_SIZE,
392 zno * sdkp->zone_info.zone_blocks, true);
393 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
394 if (!ret)
395 sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
396 zno, sd_zbc_update_wp_offset_cb,
397 sdkp);
398 }
399 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
400
401 scsi_device_put(sdkp->device);
402 }
403
404 /**
405 * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command.
406 * @cmd: the command to setup
407 * @lba: the LBA to patch
408 * @nr_blocks: the number of LBAs to be written
409 *
410 * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND.
411 * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and
412 * patching of the lba for an emulated ZONE_APPEND command.
413 *
414 * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will
415 * schedule a REPORT ZONES command and return BLK_STS_IOERR.
416 */
sd_zbc_prepare_zone_append(struct scsi_cmnd * cmd,sector_t * lba,unsigned int nr_blocks)417 blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
418 unsigned int nr_blocks)
419 {
420 struct request *rq = scsi_cmd_to_rq(cmd);
421 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
422 unsigned int wp_offset, zno = blk_rq_zone_no(rq);
423 unsigned long flags;
424 blk_status_t ret;
425
426 ret = sd_zbc_cmnd_checks(cmd);
427 if (ret != BLK_STS_OK)
428 return ret;
429
430 if (!blk_rq_zone_is_seq(rq))
431 return BLK_STS_IOERR;
432
433 /* Unlock of the write lock will happen in sd_zbc_complete() */
434 if (!blk_req_zone_write_trylock(rq))
435 return BLK_STS_ZONE_RESOURCE;
436
437 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
438 wp_offset = sdkp->zones_wp_offset[zno];
439 switch (wp_offset) {
440 case SD_ZBC_INVALID_WP_OFST:
441 /*
442 * We are about to schedule work to update a zone write pointer
443 * offset, which will cause the zone append command to be
444 * requeued. So make sure that the scsi device does not go away
445 * while the work is being processed.
446 */
447 if (scsi_device_get(sdkp->device)) {
448 ret = BLK_STS_IOERR;
449 break;
450 }
451 sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST;
452 schedule_work(&sdkp->zone_wp_offset_work);
453 fallthrough;
454 case SD_ZBC_UPDATING_WP_OFST:
455 ret = BLK_STS_DEV_RESOURCE;
456 break;
457 default:
458 wp_offset = sectors_to_logical(sdkp->device, wp_offset);
459 if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) {
460 ret = BLK_STS_IOERR;
461 break;
462 }
463
464 *lba += wp_offset;
465 }
466 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
467 if (ret)
468 blk_req_zone_write_unlock(rq);
469 return ret;
470 }
471
472 /**
473 * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
474 * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
475 * @cmd: the command to setup
476 * @op: Operation to be performed
477 * @all: All zones control
478 *
479 * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
480 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
481 */
sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd * cmd,unsigned char op,bool all)482 blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
483 unsigned char op, bool all)
484 {
485 struct request *rq = scsi_cmd_to_rq(cmd);
486 sector_t sector = blk_rq_pos(rq);
487 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
488 sector_t block = sectors_to_logical(sdkp->device, sector);
489 blk_status_t ret;
490
491 ret = sd_zbc_cmnd_checks(cmd);
492 if (ret != BLK_STS_OK)
493 return ret;
494
495 cmd->cmd_len = 16;
496 memset(cmd->cmnd, 0, cmd->cmd_len);
497 cmd->cmnd[0] = ZBC_OUT;
498 cmd->cmnd[1] = op;
499 if (all)
500 cmd->cmnd[14] = 0x1;
501 else
502 put_unaligned_be64(block, &cmd->cmnd[2]);
503
504 rq->timeout = SD_TIMEOUT;
505 cmd->sc_data_direction = DMA_NONE;
506 cmd->transfersize = 0;
507 cmd->allowed = 0;
508
509 return BLK_STS_OK;
510 }
511
sd_zbc_need_zone_wp_update(struct request * rq)512 static bool sd_zbc_need_zone_wp_update(struct request *rq)
513 {
514 switch (req_op(rq)) {
515 case REQ_OP_ZONE_APPEND:
516 case REQ_OP_ZONE_FINISH:
517 case REQ_OP_ZONE_RESET:
518 case REQ_OP_ZONE_RESET_ALL:
519 return true;
520 case REQ_OP_WRITE:
521 case REQ_OP_WRITE_ZEROES:
522 case REQ_OP_WRITE_SAME:
523 return blk_rq_zone_is_seq(rq);
524 default:
525 return false;
526 }
527 }
528
529 /**
530 * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion
531 * @cmd: Completed command
532 * @good_bytes: Command reply bytes
533 *
534 * Called from sd_zbc_complete() to handle the update of the cached zone write
535 * pointer value in case an update is needed.
536 */
sd_zbc_zone_wp_update(struct scsi_cmnd * cmd,unsigned int good_bytes)537 static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
538 unsigned int good_bytes)
539 {
540 int result = cmd->result;
541 struct request *rq = scsi_cmd_to_rq(cmd);
542 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
543 unsigned int zno = blk_rq_zone_no(rq);
544 enum req_opf op = req_op(rq);
545 unsigned long flags;
546
547 /*
548 * If we got an error for a command that needs updating the write
549 * pointer offset cache, we must mark the zone wp offset entry as
550 * invalid to force an update from disk the next time a zone append
551 * command is issued.
552 */
553 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
554
555 if (result && op != REQ_OP_ZONE_RESET_ALL) {
556 if (op == REQ_OP_ZONE_APPEND) {
557 /* Force complete completion (no retry) */
558 good_bytes = 0;
559 scsi_set_resid(cmd, blk_rq_bytes(rq));
560 }
561
562 /*
563 * Force an update of the zone write pointer offset on
564 * the next zone append access.
565 */
566 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
567 sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST;
568 goto unlock_wp_offset;
569 }
570
571 switch (op) {
572 case REQ_OP_ZONE_APPEND:
573 rq->__sector += sdkp->zones_wp_offset[zno];
574 fallthrough;
575 case REQ_OP_WRITE_ZEROES:
576 case REQ_OP_WRITE_SAME:
577 case REQ_OP_WRITE:
578 if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
579 sdkp->zones_wp_offset[zno] +=
580 good_bytes >> SECTOR_SHIFT;
581 break;
582 case REQ_OP_ZONE_RESET:
583 sdkp->zones_wp_offset[zno] = 0;
584 break;
585 case REQ_OP_ZONE_FINISH:
586 sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp);
587 break;
588 case REQ_OP_ZONE_RESET_ALL:
589 memset(sdkp->zones_wp_offset, 0,
590 sdkp->zone_info.nr_zones * sizeof(unsigned int));
591 break;
592 default:
593 break;
594 }
595
596 unlock_wp_offset:
597 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
598
599 return good_bytes;
600 }
601
602 /**
603 * sd_zbc_complete - ZBC command post processing.
604 * @cmd: Completed command
605 * @good_bytes: Command reply bytes
606 * @sshdr: command sense header
607 *
608 * Called from sd_done() to handle zone commands errors and updates to the
609 * device queue zone write pointer offset cahce.
610 */
sd_zbc_complete(struct scsi_cmnd * cmd,unsigned int good_bytes,struct scsi_sense_hdr * sshdr)611 unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
612 struct scsi_sense_hdr *sshdr)
613 {
614 int result = cmd->result;
615 struct request *rq = scsi_cmd_to_rq(cmd);
616
617 if (op_is_zone_mgmt(req_op(rq)) &&
618 result &&
619 sshdr->sense_key == ILLEGAL_REQUEST &&
620 sshdr->asc == 0x24) {
621 /*
622 * INVALID FIELD IN CDB error: a zone management command was
623 * attempted on a conventional zone. Nothing to worry about,
624 * so be quiet about the error.
625 */
626 rq->rq_flags |= RQF_QUIET;
627 } else if (sd_zbc_need_zone_wp_update(rq))
628 good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes);
629
630 if (req_op(rq) == REQ_OP_ZONE_APPEND)
631 blk_req_zone_write_unlock(rq);
632
633 return good_bytes;
634 }
635
636 /**
637 * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
638 * @sdkp: Target disk
639 * @buf: Buffer where to store the VPD page data
640 *
641 * Read VPD page B6, get information and check that reads are unconstrained.
642 */
sd_zbc_check_zoned_characteristics(struct scsi_disk * sdkp,unsigned char * buf)643 static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
644 unsigned char *buf)
645 {
646 u64 zone_starting_lba_gran;
647
648 if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
649 sd_printk(KERN_NOTICE, sdkp,
650 "Read zoned characteristics VPD page failed\n");
651 return -ENODEV;
652 }
653
654 if (sdkp->device->type != TYPE_ZBC) {
655 /* Host-aware */
656 sdkp->urswrz = 1;
657 sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
658 sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
659 sdkp->zones_max_open = 0;
660 return 0;
661 }
662
663 /* Host-managed */
664 sdkp->urswrz = buf[4] & 1;
665 sdkp->zones_optimal_open = 0;
666 sdkp->zones_optimal_nonseq = 0;
667 sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
668 /* Check zone alignment method */
669 switch (buf[23] & 0xf) {
670 case 0:
671 case ZBC_CONSTANT_ZONE_LENGTH:
672 /* Use zone length */
673 break;
674 case ZBC_CONSTANT_ZONE_START_OFFSET:
675 zone_starting_lba_gran = get_unaligned_be64(&buf[24]);
676 if (zone_starting_lba_gran == 0 ||
677 !is_power_of_2(zone_starting_lba_gran) ||
678 logical_to_sectors(sdkp->device, zone_starting_lba_gran) >
679 UINT_MAX) {
680 sd_printk(KERN_ERR, sdkp,
681 "Invalid zone starting LBA granularity %llu\n",
682 zone_starting_lba_gran);
683 return -ENODEV;
684 }
685 sdkp->zone_starting_lba_gran = zone_starting_lba_gran;
686 break;
687 default:
688 sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n");
689 return -ENODEV;
690 }
691
692 /*
693 * Check for unconstrained reads: host-managed devices with
694 * constrained reads (drives failing read after write pointer)
695 * are not supported.
696 */
697 if (!sdkp->urswrz) {
698 if (sdkp->first_scan)
699 sd_printk(KERN_NOTICE, sdkp,
700 "constrained reads devices are not supported\n");
701 return -ENODEV;
702 }
703
704 return 0;
705 }
706
707 /**
708 * sd_zbc_check_capacity - Check the device capacity
709 * @sdkp: Target disk
710 * @buf: command buffer
711 * @zblocks: zone size in logical blocks
712 *
713 * Get the device zone size and check that the device capacity as reported
714 * by READ CAPACITY matches the max_lba value (plus one) of the report zones
715 * command reply for devices with RC_BASIS == 0.
716 *
717 * Returns 0 upon success or an error code upon failure.
718 */
sd_zbc_check_capacity(struct scsi_disk * sdkp,unsigned char * buf,u32 * zblocks)719 static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
720 u32 *zblocks)
721 {
722 u64 zone_blocks;
723 sector_t max_lba;
724 unsigned char *rec;
725 int ret;
726
727 /* Do a report zone to get max_lba and the size of the first zone */
728 ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
729 if (ret)
730 return ret;
731
732 if (sdkp->rc_basis == 0) {
733 /* The max_lba field is the capacity of this device */
734 max_lba = get_unaligned_be64(&buf[8]);
735 if (sdkp->capacity != max_lba + 1) {
736 if (sdkp->first_scan)
737 sd_printk(KERN_WARNING, sdkp,
738 "Changing capacity from %llu to max LBA+1 %llu\n",
739 (unsigned long long)sdkp->capacity,
740 (unsigned long long)max_lba + 1);
741 sdkp->capacity = max_lba + 1;
742 }
743 }
744
745 if (sdkp->zone_starting_lba_gran == 0) {
746 /* Get the size of the first reported zone */
747 rec = buf + 64;
748 zone_blocks = get_unaligned_be64(&rec[8]);
749 if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
750 if (sdkp->first_scan)
751 sd_printk(KERN_NOTICE, sdkp,
752 "Zone size too large\n");
753 return -EFBIG;
754 }
755 } else {
756 zone_blocks = sdkp->zone_starting_lba_gran;
757 }
758
759 *zblocks = zone_blocks;
760
761 return 0;
762 }
763
sd_zbc_print_zones(struct scsi_disk * sdkp)764 static void sd_zbc_print_zones(struct scsi_disk *sdkp)
765 {
766 u64 remainder;
767
768 if (!sd_is_zoned(sdkp) || !sdkp->capacity)
769 return;
770
771 div64_u64_rem(sdkp->capacity, sdkp->zone_info.zone_blocks, &remainder);
772 if (remainder)
773 sd_printk(KERN_NOTICE, sdkp,
774 "%u zones of %u logical blocks + 1 runt zone\n",
775 sdkp->zone_info.nr_zones - 1,
776 sdkp->zone_info.zone_blocks);
777 else
778 sd_printk(KERN_NOTICE, sdkp,
779 "%u zones of %u logical blocks\n",
780 sdkp->zone_info.nr_zones,
781 sdkp->zone_info.zone_blocks);
782 }
783
sd_zbc_init_disk(struct scsi_disk * sdkp)784 static int sd_zbc_init_disk(struct scsi_disk *sdkp)
785 {
786 sdkp->zones_wp_offset = NULL;
787 spin_lock_init(&sdkp->zones_wp_offset_lock);
788 sdkp->rev_wp_offset = NULL;
789 mutex_init(&sdkp->rev_mutex);
790 INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
791 sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
792 if (!sdkp->zone_wp_update_buf)
793 return -ENOMEM;
794
795 return 0;
796 }
797
sd_zbc_clear_zone_info(struct scsi_disk * sdkp)798 static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
799 {
800 /* Serialize against revalidate zones */
801 mutex_lock(&sdkp->rev_mutex);
802
803 kvfree(sdkp->zones_wp_offset);
804 sdkp->zones_wp_offset = NULL;
805 kfree(sdkp->zone_wp_update_buf);
806 sdkp->zone_wp_update_buf = NULL;
807
808 sdkp->early_zone_info = (struct zoned_disk_info){ };
809 sdkp->zone_info = (struct zoned_disk_info){ };
810
811 mutex_unlock(&sdkp->rev_mutex);
812 }
813
sd_zbc_release_disk(struct scsi_disk * sdkp)814 void sd_zbc_release_disk(struct scsi_disk *sdkp)
815 {
816 if (sd_is_zoned(sdkp))
817 sd_zbc_clear_zone_info(sdkp);
818 }
819
sd_zbc_revalidate_zones_cb(struct gendisk * disk)820 static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
821 {
822 struct scsi_disk *sdkp = scsi_disk(disk);
823
824 swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
825 }
826
827 /*
828 * Call blk_revalidate_disk_zones() if any of the zoned disk properties have
829 * changed that make it necessary to call that function. Called by
830 * sd_revalidate_disk() after the gendisk capacity has been set.
831 */
sd_zbc_revalidate_zones(struct scsi_disk * sdkp)832 int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
833 {
834 struct gendisk *disk = sdkp->disk;
835 struct request_queue *q = disk->queue;
836 u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
837 unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
838 u32 max_append;
839 int ret = 0;
840 unsigned int flags;
841
842 /*
843 * For all zoned disks, initialize zone append emulation data if not
844 * already done. This is necessary also for host-aware disks used as
845 * regular disks due to the presence of partitions as these partitions
846 * may be deleted and the disk zoned model changed back from
847 * BLK_ZONED_NONE to BLK_ZONED_HA.
848 */
849 if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
850 ret = sd_zbc_init_disk(sdkp);
851 if (ret)
852 return ret;
853 }
854
855 /*
856 * There is nothing to do for regular disks, including host-aware disks
857 * that have partitions.
858 */
859 if (!blk_queue_is_zoned(q))
860 return 0;
861
862 /*
863 * Make sure revalidate zones are serialized to ensure exclusive
864 * updates of the scsi disk data.
865 */
866 mutex_lock(&sdkp->rev_mutex);
867
868 if (sdkp->zone_info.zone_blocks == zone_blocks &&
869 sdkp->zone_info.nr_zones == nr_zones &&
870 disk->queue->nr_zones == nr_zones)
871 goto unlock;
872
873 flags = memalloc_noio_save();
874 sdkp->zone_info.zone_blocks = zone_blocks;
875 sdkp->zone_info.nr_zones = nr_zones;
876 sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
877 if (!sdkp->rev_wp_offset) {
878 ret = -ENOMEM;
879 memalloc_noio_restore(flags);
880 goto unlock;
881 }
882
883 ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
884
885 memalloc_noio_restore(flags);
886 kvfree(sdkp->rev_wp_offset);
887 sdkp->rev_wp_offset = NULL;
888
889 if (ret) {
890 sdkp->zone_info = (struct zoned_disk_info){ };
891 sdkp->capacity = 0;
892 goto unlock;
893 }
894
895 max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
896 q->limits.max_segments << (PAGE_SHIFT - 9));
897 max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
898
899 blk_queue_max_zone_append_sectors(q, max_append);
900
901 sd_zbc_print_zones(sdkp);
902
903 unlock:
904 mutex_unlock(&sdkp->rev_mutex);
905
906 return ret;
907 }
908
909 /**
910 * sd_zbc_read_zones - Read zone information and update the request queue
911 * @sdkp: SCSI disk pointer.
912 * @buf: 512 byte buffer used for storing SCSI command output.
913 *
914 * Read zone information and update the request queue zone characteristics and
915 * also the zoned device information in *sdkp. Called by sd_revalidate_disk()
916 * before the gendisk capacity has been set.
917 */
sd_zbc_read_zones(struct scsi_disk * sdkp,u8 buf[SD_BUF_SIZE])918 int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
919 {
920 struct gendisk *disk = sdkp->disk;
921 struct request_queue *q = disk->queue;
922 unsigned int nr_zones;
923 u32 zone_blocks = 0;
924 int ret;
925
926 if (!sd_is_zoned(sdkp))
927 /*
928 * Device managed or normal SCSI disk,
929 * no special handling required
930 */
931 return 0;
932
933 /* READ16/WRITE16 is mandatory for ZBC disks */
934 sdkp->device->use_16_for_rw = 1;
935 sdkp->device->use_10_for_rw = 0;
936
937 if (!blk_queue_is_zoned(q)) {
938 /*
939 * This can happen for a host aware disk with partitions.
940 * The block device zone information was already cleared
941 * by blk_queue_set_zoned(). Only clear the scsi disk zone
942 * information and exit early.
943 */
944 sd_zbc_clear_zone_info(sdkp);
945 return 0;
946 }
947
948 /* Check zoned block device characteristics (unconstrained reads) */
949 ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
950 if (ret)
951 goto err;
952
953 /* Check the device capacity reported by report zones */
954 ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks);
955 if (ret != 0)
956 goto err;
957
958 /* The drive satisfies the kernel restrictions: set it up */
959 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
960 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
961 if (sdkp->zones_max_open == U32_MAX)
962 blk_queue_max_open_zones(q, 0);
963 else
964 blk_queue_max_open_zones(q, sdkp->zones_max_open);
965 blk_queue_max_active_zones(q, 0);
966 nr_zones = div64_u64(sdkp->capacity + zone_blocks - 1, zone_blocks);
967
968 /*
969 * Per ZBC and ZAC specifications, writes in sequential write required
970 * zones of host-managed devices must be aligned to the device physical
971 * block size.
972 */
973 if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
974 blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
975
976 sdkp->early_zone_info.nr_zones = nr_zones;
977 sdkp->early_zone_info.zone_blocks = zone_blocks;
978
979 return 0;
980
981 err:
982 sdkp->capacity = 0;
983
984 return ret;
985 }
986