1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
11 *
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
13 */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47
48 #include <net/checksum.h>
49
50 #include <linux/unaligned.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60
61 #include "sd.h"
62 #include "scsi_logging.h"
63
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67
68 #define MY_NAME "scsi_debug"
69
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define LOGICAL_UNIT_NOT_READY 0x4
75 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
76 #define UNRECOVERED_READ_ERR 0x11
77 #define PARAMETER_LIST_LENGTH_ERR 0x1a
78 #define INVALID_OPCODE 0x20
79 #define LBA_OUT_OF_RANGE 0x21
80 #define INVALID_FIELD_IN_CDB 0x24
81 #define INVALID_FIELD_IN_PARAM_LIST 0x26
82 #define WRITE_PROTECTED 0x27
83 #define UA_RESET_ASC 0x29
84 #define UA_CHANGED_ASC 0x2a
85 #define TARGET_CHANGED_ASC 0x3f
86 #define LUNS_CHANGED_ASCQ 0x0e
87 #define INSUFF_RES_ASC 0x55
88 #define INSUFF_RES_ASCQ 0x3
89 #define POWER_ON_RESET_ASCQ 0x0
90 #define POWER_ON_OCCURRED_ASCQ 0x1
91 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
92 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
93 #define CAPACITY_CHANGED_ASCQ 0x9
94 #define SAVING_PARAMS_UNSUP 0x39
95 #define TRANSPORT_PROBLEM 0x4b
96 #define THRESHOLD_EXCEEDED 0x5d
97 #define LOW_POWER_COND_ON 0x5e
98 #define MISCOMPARE_VERIFY_ASC 0x1d
99 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
100 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
101 #define WRITE_ERROR_ASC 0xc
102 #define UNALIGNED_WRITE_ASCQ 0x4
103 #define WRITE_BOUNDARY_ASCQ 0x5
104 #define READ_INVDATA_ASCQ 0x6
105 #define READ_BOUNDARY_ASCQ 0x7
106 #define ATTEMPT_ACCESS_GAP 0x9
107 #define INSUFF_ZONE_ASCQ 0xe
108 /* see drivers/scsi/sense_codes.h */
109
110 /* Additional Sense Code Qualifier (ASCQ) */
111 #define ACK_NAK_TO 0x3
112
113 /* Default values for driver parameters */
114 #define DEF_NUM_HOST 1
115 #define DEF_NUM_TGTS 1
116 #define DEF_MAX_LUNS 1
117 /* With these defaults, this driver will make 1 host with 1 target
118 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
119 */
120 #define DEF_ATO 1
121 #define DEF_CDB_LEN 10
122 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
123 #define DEF_DEV_SIZE_PRE_INIT 0
124 #define DEF_DEV_SIZE_MB 8
125 #define DEF_ZBC_DEV_SIZE_MB 128
126 #define DEF_DIF 0
127 #define DEF_DIX 0
128 #define DEF_PER_HOST_STORE false
129 #define DEF_D_SENSE 0
130 #define DEF_EVERY_NTH 0
131 #define DEF_FAKE_RW 0
132 #define DEF_GUARD 0
133 #define DEF_HOST_LOCK 0
134 #define DEF_LBPU 0
135 #define DEF_LBPWS 0
136 #define DEF_LBPWS10 0
137 #define DEF_LBPRZ 1
138 #define DEF_LOWEST_ALIGNED 0
139 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
140 #define DEF_NO_LUN_0 0
141 #define DEF_NUM_PARTS 0
142 #define DEF_OPTS 0
143 #define DEF_OPT_BLKS 1024
144 #define DEF_PHYSBLK_EXP 0
145 #define DEF_OPT_XFERLEN_EXP 0
146 #define DEF_PTYPE TYPE_DISK
147 #define DEF_RANDOM false
148 #define DEF_REMOVABLE false
149 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
150 #define DEF_SECTOR_SIZE 512
151 #define DEF_UNMAP_ALIGNMENT 0
152 #define DEF_UNMAP_GRANULARITY 1
153 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
154 #define DEF_UNMAP_MAX_DESC 256
155 #define DEF_VIRTUAL_GB 0
156 #define DEF_VPD_USE_HOSTNO 1
157 #define DEF_WRITESAME_LENGTH 0xFFFF
158 #define DEF_ATOMIC_WR 0
159 #define DEF_ATOMIC_WR_MAX_LENGTH 8192
160 #define DEF_ATOMIC_WR_ALIGN 2
161 #define DEF_ATOMIC_WR_GRAN 2
162 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
163 #define DEF_ATOMIC_WR_MAX_BNDRY 128
164 #define DEF_STRICT 0
165 #define DEF_STATISTICS false
166 #define DEF_SUBMIT_QUEUES 1
167 #define DEF_TUR_MS_TO_READY 0
168 #define DEF_UUID_CTL 0
169 #define JDELAY_OVERRIDDEN -9999
170
171 /* Default parameters for ZBC drives */
172 #define DEF_ZBC_ZONE_SIZE_MB 128
173 #define DEF_ZBC_MAX_OPEN_ZONES 8
174 #define DEF_ZBC_NR_CONV_ZONES 1
175
176 /* Default parameters for tape drives */
177 #define TAPE_DEF_DENSITY 0x0
178 #define TAPE_DEF_BLKSIZE 0
179
180 #define SDEBUG_LUN_0_VAL 0
181
182 /* bit mask values for sdebug_opts */
183 #define SDEBUG_OPT_NOISE 1
184 #define SDEBUG_OPT_MEDIUM_ERR 2
185 #define SDEBUG_OPT_TIMEOUT 4
186 #define SDEBUG_OPT_RECOVERED_ERR 8
187 #define SDEBUG_OPT_TRANSPORT_ERR 16
188 #define SDEBUG_OPT_DIF_ERR 32
189 #define SDEBUG_OPT_DIX_ERR 64
190 #define SDEBUG_OPT_MAC_TIMEOUT 128
191 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
192 #define SDEBUG_OPT_Q_NOISE 0x200
193 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
194 #define SDEBUG_OPT_RARE_TSF 0x800
195 #define SDEBUG_OPT_N_WCE 0x1000
196 #define SDEBUG_OPT_RESET_NOISE 0x2000
197 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
198 #define SDEBUG_OPT_HOST_BUSY 0x8000
199 #define SDEBUG_OPT_CMD_ABORT 0x10000
200 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
201 SDEBUG_OPT_RESET_NOISE)
202 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
203 SDEBUG_OPT_TRANSPORT_ERR | \
204 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
205 SDEBUG_OPT_SHORT_TRANSFER | \
206 SDEBUG_OPT_HOST_BUSY | \
207 SDEBUG_OPT_CMD_ABORT)
208 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
209 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
210
211 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
212 * priority order. In the subset implemented here lower numbers have higher
213 * priority. The UA numbers should be a sequence starting from 0 with
214 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
215 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
216 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
217 #define SDEBUG_UA_BUS_RESET 2
218 #define SDEBUG_UA_MODE_CHANGED 3
219 #define SDEBUG_UA_CAPACITY_CHANGED 4
220 #define SDEBUG_UA_LUNS_CHANGED 5
221 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
222 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
223 #define SDEBUG_NUM_UAS 8
224
225 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
226 * sector on read commands: */
227 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
228 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
229
230 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
231 * (for response) per submit queue at one time. Can be reduced by max_queue
232 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
233 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
234 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
235 * but cannot exceed SDEBUG_CANQUEUE .
236 */
237 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
238 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
239 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
240
241 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
242 #define F_D_IN 1 /* Data-in command (e.g. READ) */
243 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
244 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
245 #define F_D_UNKN 8
246 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
247 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
248 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
249 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
250 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
251 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
252 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
253 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
254 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
255 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
256
257 /* Useful combinations of the above flags */
258 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
259 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
260 #define FF_SA (F_SA_HIGH | F_SA_LOW)
261 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
262
263 #define SDEBUG_MAX_PARTS 4
264
265 #define SDEBUG_MAX_CMD_LEN 32
266
267 #define SDEB_XA_NOT_IN_USE XA_MARK_1
268
269 static struct kmem_cache *queued_cmd_cache;
270
271 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
272 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
273
274 /* Zone types (zbcr05 table 25) */
275 enum sdebug_z_type {
276 ZBC_ZTYPE_CNV = 0x1,
277 ZBC_ZTYPE_SWR = 0x2,
278 ZBC_ZTYPE_SWP = 0x3,
279 /* ZBC_ZTYPE_SOBR = 0x4, */
280 ZBC_ZTYPE_GAP = 0x5,
281 };
282
283 /* enumeration names taken from table 26, zbcr05 */
284 enum sdebug_z_cond {
285 ZBC_NOT_WRITE_POINTER = 0x0,
286 ZC1_EMPTY = 0x1,
287 ZC2_IMPLICIT_OPEN = 0x2,
288 ZC3_EXPLICIT_OPEN = 0x3,
289 ZC4_CLOSED = 0x4,
290 ZC6_READ_ONLY = 0xd,
291 ZC5_FULL = 0xe,
292 ZC7_OFFLINE = 0xf,
293 };
294
295 struct sdeb_zone_state { /* ZBC: per zone state */
296 enum sdebug_z_type z_type;
297 enum sdebug_z_cond z_cond;
298 bool z_non_seq_resource;
299 unsigned int z_size;
300 sector_t z_start;
301 sector_t z_wp;
302 };
303
304 enum sdebug_err_type {
305 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
306 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
307 /* queuecmd return failed */
308 ERR_FAIL_CMD = 2, /* make specific scsi command's */
309 /* queuecmd return succeed but */
310 /* with errors set in scsi_cmnd */
311 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
312 /* scsi_debug_abort() */
313 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
314 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
315 };
316
317 struct sdebug_err_inject {
318 int type;
319 struct list_head list;
320 int cnt;
321 unsigned char cmd;
322 struct rcu_head rcu;
323
324 union {
325 /*
326 * For ERR_FAIL_QUEUE_CMD
327 */
328 int queuecmd_ret;
329
330 /*
331 * For ERR_FAIL_CMD
332 */
333 struct {
334 unsigned char host_byte;
335 unsigned char driver_byte;
336 unsigned char status_byte;
337 unsigned char sense_key;
338 unsigned char asc;
339 unsigned char asq;
340 };
341 };
342 };
343
344 struct sdebug_dev_info {
345 struct list_head dev_list;
346 unsigned int channel;
347 unsigned int target;
348 u64 lun;
349 uuid_t lu_name;
350 struct sdebug_host_info *sdbg_host;
351 unsigned long uas_bm[1];
352 atomic_t stopped; /* 1: by SSU, 2: device start */
353 bool used;
354
355 /* For ZBC devices */
356 bool zoned;
357 unsigned int zcap;
358 unsigned int zsize;
359 unsigned int nr_zones;
360 unsigned int nr_conv_zones;
361 unsigned int nr_seq_zones;
362 unsigned int nr_imp_open;
363 unsigned int nr_exp_open;
364 unsigned int nr_closed;
365 unsigned int max_open;
366 ktime_t create_ts; /* time since bootup that this device was created */
367 struct sdeb_zone_state *zstate;
368
369 /* For tapes */
370 unsigned int tape_blksize;
371 unsigned int tape_density;
372
373 struct dentry *debugfs_entry;
374 struct spinlock list_lock;
375 struct list_head inject_err_list;
376 };
377
378 struct sdebug_target_info {
379 bool reset_fail;
380 struct dentry *debugfs_entry;
381 };
382
383 struct sdebug_host_info {
384 struct list_head host_list;
385 int si_idx; /* sdeb_store_info (per host) xarray index */
386 struct Scsi_Host *shost;
387 struct device dev;
388 struct list_head dev_info_list;
389 };
390
391 /* There is an xarray of pointers to this struct's objects, one per host */
392 struct sdeb_store_info {
393 rwlock_t macc_data_lck; /* for media data access on this store */
394 rwlock_t macc_meta_lck; /* for atomic media meta access on this store */
395 rwlock_t macc_sector_lck; /* per-sector media data access on this store */
396 u8 *storep; /* user data storage (ram) */
397 struct t10_pi_tuple *dif_storep; /* protection info */
398 void *map_storep; /* provisioning map */
399 };
400
401 #define dev_to_sdebug_host(d) \
402 container_of(d, struct sdebug_host_info, dev)
403
404 #define shost_to_sdebug_host(shost) \
405 dev_to_sdebug_host(shost->dma_dev)
406
407 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
408 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
409
410 struct sdebug_defer {
411 struct hrtimer hrt;
412 struct execute_work ew;
413 ktime_t cmpl_ts;/* time since boot to complete this cmd */
414 int issuing_cpu;
415 bool aborted; /* true when blk_abort_request() already called */
416 enum sdeb_defer_type defer_t;
417 };
418
419 struct sdebug_device_access_info {
420 bool atomic_write;
421 u64 lba;
422 u32 num;
423 struct scsi_cmnd *self;
424 };
425
426 struct sdebug_queued_cmd {
427 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
428 * instance indicates this slot is in use.
429 */
430 struct sdebug_defer sd_dp;
431 struct scsi_cmnd *scmd;
432 struct sdebug_device_access_info *i;
433 };
434
435 struct sdebug_scsi_cmd {
436 spinlock_t lock;
437 };
438
439 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
440 static atomic_t sdebug_completions; /* count of deferred completions */
441 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
442 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
443 static atomic_t sdeb_inject_pending;
444 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
445
446 struct opcode_info_t {
447 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
448 /* for terminating element */
449 u8 opcode; /* if num_attached > 0, preferred */
450 u16 sa; /* service action */
451 u32 flags; /* OR-ed set of SDEB_F_* */
452 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
453 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
454 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
455 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
456 };
457
458 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
459 enum sdeb_opcode_index {
460 SDEB_I_INVALID_OPCODE = 0,
461 SDEB_I_INQUIRY = 1,
462 SDEB_I_REPORT_LUNS = 2,
463 SDEB_I_REQUEST_SENSE = 3,
464 SDEB_I_TEST_UNIT_READY = 4,
465 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
466 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
467 SDEB_I_LOG_SENSE = 7,
468 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
469 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
470 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
471 SDEB_I_START_STOP = 11,
472 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
473 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
474 SDEB_I_MAINT_IN = 14,
475 SDEB_I_MAINT_OUT = 15,
476 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
477 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
478 SDEB_I_RESERVE = 18, /* 6, 10 */
479 SDEB_I_RELEASE = 19, /* 6, 10 */
480 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
481 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
482 SDEB_I_ATA_PT = 22, /* 12, 16 */
483 SDEB_I_SEND_DIAG = 23,
484 SDEB_I_UNMAP = 24,
485 SDEB_I_WRITE_BUFFER = 25,
486 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
487 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
488 SDEB_I_COMP_WRITE = 28,
489 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
490 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
491 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
492 SDEB_I_ATOMIC_WRITE_16 = 32,
493 SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */
494 };
495
496
497 static const unsigned char opcode_ind_arr[256] = {
498 /* 0x0; 0x0->0x1f: 6 byte cdbs */
499 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
500 0, 0, 0, 0,
501 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
502 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
503 SDEB_I_RELEASE,
504 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
505 SDEB_I_ALLOW_REMOVAL, 0,
506 /* 0x20; 0x20->0x3f: 10 byte cdbs */
507 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
508 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
509 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
510 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
511 /* 0x40; 0x40->0x5f: 10 byte cdbs */
512 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
513 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
514 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
515 SDEB_I_RELEASE,
516 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
517 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
518 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
519 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
520 0, SDEB_I_VARIABLE_LEN,
521 /* 0x80; 0x80->0x9f: 16 byte cdbs */
522 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
523 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
524 0, 0, 0, SDEB_I_VERIFY,
525 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
526 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
527 0, 0, 0, 0,
528 SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
529 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
530 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
531 SDEB_I_MAINT_OUT, 0, 0, 0,
532 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
533 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
534 0, 0, 0, 0, 0, 0, 0, 0,
535 0, 0, 0, 0, 0, 0, 0, 0,
536 /* 0xc0; 0xc0->0xff: vendor specific */
537 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
538 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
539 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
540 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
541 };
542
543 /*
544 * The following "response" functions return the SCSI mid-level's 4 byte
545 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
546 * command completion, they can mask their return value with
547 * SDEG_RES_IMMED_MASK .
548 */
549 #define SDEG_RES_IMMED_MASK 0x40000000
550
551 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
554 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
555 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
556 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
557 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
558 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
559 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
560 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
561 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
562 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
563 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
564 static int resp_get_stream_status(struct scsi_cmnd *scp,
565 struct sdebug_dev_info *devip);
566 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
567 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
568 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
569 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
570 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
571 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
572 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
573 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
574 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
575 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
576 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
577 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
578 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
579 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
580 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
581 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
582 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
583
584 static int sdebug_do_add_host(bool mk_new_store);
585 static int sdebug_add_host_helper(int per_host_idx);
586 static void sdebug_do_remove_host(bool the_end);
587 static int sdebug_add_store(void);
588 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
589 static void sdebug_erase_all_stores(bool apart_from_first);
590
591 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
592
593 /*
594 * The following are overflow arrays for cdbs that "hit" the same index in
595 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
596 * should be placed in opcode_info_arr[], the others should be placed here.
597 */
598 static const struct opcode_info_t msense_iarr[] = {
599 {0, 0x1a, 0, F_D_IN, NULL, NULL,
600 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
601 };
602
603 static const struct opcode_info_t mselect_iarr[] = {
604 {0, 0x15, 0, F_D_OUT, NULL, NULL,
605 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
606 };
607
608 static const struct opcode_info_t read_iarr[] = {
609 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
610 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
611 0, 0, 0, 0} },
612 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
613 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
614 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
615 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
616 0xc7, 0, 0, 0, 0} },
617 };
618
619 static const struct opcode_info_t write_iarr[] = {
620 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
621 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
622 0, 0, 0, 0, 0, 0} },
623 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
624 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
625 0, 0, 0} },
626 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
627 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
628 0xbf, 0xc7, 0, 0, 0, 0} },
629 };
630
631 static const struct opcode_info_t verify_iarr[] = {
632 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
633 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
634 0, 0, 0, 0, 0, 0} },
635 };
636
637 static const struct opcode_info_t sa_in_16_iarr[] = {
638 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
639 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
640 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
641 {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
642 {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
643 0, 0} }, /* GET STREAM STATUS */
644 };
645
646 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
647 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
648 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
649 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
650 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
651 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
652 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
653 };
654
655 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
656 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
657 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
658 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
659 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
660 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
661 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
662 };
663
664 static const struct opcode_info_t write_same_iarr[] = {
665 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
666 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
668 };
669
670 static const struct opcode_info_t reserve_iarr[] = {
671 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
672 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
673 };
674
675 static const struct opcode_info_t release_iarr[] = {
676 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
677 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 };
679
680 static const struct opcode_info_t sync_cache_iarr[] = {
681 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
682 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
683 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
684 };
685
686 static const struct opcode_info_t pre_fetch_iarr[] = {
687 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
688 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
689 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
690 };
691
692 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
693 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
694 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
695 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
696 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
697 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
698 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
699 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
700 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
701 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
702 };
703
704 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
705 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
706 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
707 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
708 };
709
710
711 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
712 * plus the terminating elements for logic that scans this table such as
713 * REPORT SUPPORTED OPERATION CODES. */
714 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
715 /* 0 */
716 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
717 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
718 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
719 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
720 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
721 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
722 0, 0} }, /* REPORT LUNS */
723 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
724 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
725 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
726 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
727 /* 5 */
728 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
729 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
730 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
731 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
732 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
733 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
734 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
735 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
736 0, 0, 0} },
737 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
738 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
739 0, 0} },
740 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
741 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
742 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
743 /* 10 */
744 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
745 resp_write_dt0, write_iarr, /* WRITE(16) */
746 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
747 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
748 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
749 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
750 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
751 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
752 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
753 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
754 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
755 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
756 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
757 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
758 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
759 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
760 0xff, 0, 0xc7, 0, 0, 0, 0} },
761 /* 15 */
762 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
763 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
764 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
765 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
766 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
767 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
768 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
769 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
770 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
771 0xff, 0xff} },
772 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
773 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
774 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
775 0} },
776 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
777 NULL, release_iarr, /* RELEASE(10) <no response function> */
778 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
779 0} },
780 /* 20 */
781 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
782 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
783 {0, 0x1, 0, 0, NULL, NULL, /* REWIND ?? */
784 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
785 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
786 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
787 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
788 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
789 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
790 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
791 /* 25 */
792 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
793 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
794 0, 0, 0, 0} }, /* WRITE_BUFFER */
795 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
796 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
797 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
798 0, 0, 0, 0, 0} },
799 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
800 resp_sync_cache, sync_cache_iarr,
801 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
802 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
803 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
804 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
805 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
806 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
807 resp_pre_fetch, pre_fetch_iarr,
808 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
809 0, 0, 0, 0} }, /* PRE-FETCH (10) */
810
811 /* 30 */
812 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
813 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
814 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
815 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
816 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
817 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
818 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
819 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
820 /* 31 */
821 {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
822 resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
823 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
824 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
825 /* sentinel */
826 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
827 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
828 };
829
830 static int sdebug_num_hosts;
831 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
832 static int sdebug_ato = DEF_ATO;
833 static int sdebug_cdb_len = DEF_CDB_LEN;
834 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
835 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
836 static int sdebug_dif = DEF_DIF;
837 static int sdebug_dix = DEF_DIX;
838 static int sdebug_dsense = DEF_D_SENSE;
839 static int sdebug_every_nth = DEF_EVERY_NTH;
840 static int sdebug_fake_rw = DEF_FAKE_RW;
841 static unsigned int sdebug_guard = DEF_GUARD;
842 static int sdebug_host_max_queue; /* per host */
843 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
844 static int sdebug_max_luns = DEF_MAX_LUNS;
845 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
846 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
847 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
848 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
849 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
850 static int sdebug_no_uld;
851 static int sdebug_num_parts = DEF_NUM_PARTS;
852 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
853 static int sdebug_opt_blks = DEF_OPT_BLKS;
854 static int sdebug_opts = DEF_OPTS;
855 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
856 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
857 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
858 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
859 static int sdebug_sector_size = DEF_SECTOR_SIZE;
860 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
861 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
862 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
863 static unsigned int sdebug_lbpu = DEF_LBPU;
864 static unsigned int sdebug_lbpws = DEF_LBPWS;
865 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
866 static unsigned int sdebug_lbprz = DEF_LBPRZ;
867 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
868 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
869 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
870 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
871 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
872 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
873 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
874 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
875 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
876 static unsigned int sdebug_atomic_wr_max_length_bndry =
877 DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
878 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
879 static int sdebug_uuid_ctl = DEF_UUID_CTL;
880 static bool sdebug_random = DEF_RANDOM;
881 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
882 static bool sdebug_removable = DEF_REMOVABLE;
883 static bool sdebug_clustering;
884 static bool sdebug_host_lock = DEF_HOST_LOCK;
885 static bool sdebug_strict = DEF_STRICT;
886 static bool sdebug_any_injecting_opt;
887 static bool sdebug_no_rwlock;
888 static bool sdebug_verbose;
889 static bool have_dif_prot;
890 static bool write_since_sync;
891 static bool sdebug_statistics = DEF_STATISTICS;
892 static bool sdebug_wp;
893 static bool sdebug_allow_restart;
894 static enum {
895 BLK_ZONED_NONE = 0,
896 BLK_ZONED_HA = 1,
897 BLK_ZONED_HM = 2,
898 } sdeb_zbc_model = BLK_ZONED_NONE;
899 static char *sdeb_zbc_model_s;
900
901 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
902 SAM_LUN_AM_FLAT = 0x1,
903 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
904 SAM_LUN_AM_EXTENDED = 0x3};
905 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
906 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
907
908 static unsigned int sdebug_store_sectors;
909 static sector_t sdebug_capacity; /* in sectors */
910
911 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
912 may still need them */
913 static int sdebug_heads; /* heads per disk */
914 static int sdebug_cylinders_per; /* cylinders per surface */
915 static int sdebug_sectors_per; /* sectors per cylinder */
916
917 static LIST_HEAD(sdebug_host_list);
918 static DEFINE_MUTEX(sdebug_host_list_mutex);
919
920 static struct xarray per_store_arr;
921 static struct xarray *per_store_ap = &per_store_arr;
922 static int sdeb_first_idx = -1; /* invalid index ==> none created */
923 static int sdeb_most_recent_idx = -1;
924 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
925
926 static unsigned long map_size;
927 static int num_aborts;
928 static int num_dev_resets;
929 static int num_target_resets;
930 static int num_bus_resets;
931 static int num_host_resets;
932 static int dix_writes;
933 static int dix_reads;
934 static int dif_errors;
935
936 /* ZBC global data */
937 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
938 static int sdeb_zbc_zone_cap_mb;
939 static int sdeb_zbc_zone_size_mb;
940 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
941 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
942
943 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
944 static int poll_queues; /* iouring iopoll interface.*/
945
946 static atomic_long_t writes_by_group_number[64];
947
948 static char sdebug_proc_name[] = MY_NAME;
949 static const char *my_name = MY_NAME;
950
951 static const struct bus_type pseudo_lld_bus;
952
953 static struct device_driver sdebug_driverfs_driver = {
954 .name = sdebug_proc_name,
955 .bus = &pseudo_lld_bus,
956 };
957
958 static const int check_condition_result =
959 SAM_STAT_CHECK_CONDITION;
960
961 static const int illegal_condition_result =
962 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
963
964 static const int device_qfull_result =
965 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
966
967 static const int condition_met_result = SAM_STAT_CONDITION_MET;
968
969 static struct dentry *sdebug_debugfs_root;
970 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
971
sdebug_err_free(struct rcu_head * head)972 static void sdebug_err_free(struct rcu_head *head)
973 {
974 struct sdebug_err_inject *inject =
975 container_of(head, typeof(*inject), rcu);
976
977 kfree(inject);
978 }
979
sdebug_err_add(struct scsi_device * sdev,struct sdebug_err_inject * new)980 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
981 {
982 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
983 struct sdebug_err_inject *err;
984
985 spin_lock(&devip->list_lock);
986 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
987 if (err->type == new->type && err->cmd == new->cmd) {
988 list_del_rcu(&err->list);
989 call_rcu(&err->rcu, sdebug_err_free);
990 }
991 }
992
993 list_add_tail_rcu(&new->list, &devip->inject_err_list);
994 spin_unlock(&devip->list_lock);
995 }
996
sdebug_err_remove(struct scsi_device * sdev,const char * buf,size_t count)997 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
998 {
999 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1000 struct sdebug_err_inject *err;
1001 int type;
1002 unsigned char cmd;
1003
1004 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1005 kfree(buf);
1006 return -EINVAL;
1007 }
1008
1009 spin_lock(&devip->list_lock);
1010 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1011 if (err->type == type && err->cmd == cmd) {
1012 list_del_rcu(&err->list);
1013 call_rcu(&err->rcu, sdebug_err_free);
1014 spin_unlock(&devip->list_lock);
1015 kfree(buf);
1016 return count;
1017 }
1018 }
1019 spin_unlock(&devip->list_lock);
1020
1021 kfree(buf);
1022 return -EINVAL;
1023 }
1024
sdebug_error_show(struct seq_file * m,void * p)1025 static int sdebug_error_show(struct seq_file *m, void *p)
1026 {
1027 struct scsi_device *sdev = (struct scsi_device *)m->private;
1028 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1029 struct sdebug_err_inject *err;
1030
1031 seq_puts(m, "Type\tCount\tCommand\n");
1032
1033 rcu_read_lock();
1034 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1035 switch (err->type) {
1036 case ERR_TMOUT_CMD:
1037 case ERR_ABORT_CMD_FAILED:
1038 case ERR_LUN_RESET_FAILED:
1039 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1040 err->cmd);
1041 break;
1042
1043 case ERR_FAIL_QUEUE_CMD:
1044 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1045 err->cnt, err->cmd, err->queuecmd_ret);
1046 break;
1047
1048 case ERR_FAIL_CMD:
1049 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1050 err->type, err->cnt, err->cmd,
1051 err->host_byte, err->driver_byte,
1052 err->status_byte, err->sense_key,
1053 err->asc, err->asq);
1054 break;
1055 }
1056 }
1057 rcu_read_unlock();
1058
1059 return 0;
1060 }
1061
sdebug_error_open(struct inode * inode,struct file * file)1062 static int sdebug_error_open(struct inode *inode, struct file *file)
1063 {
1064 return single_open(file, sdebug_error_show, inode->i_private);
1065 }
1066
sdebug_error_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1067 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1068 size_t count, loff_t *ppos)
1069 {
1070 char *buf;
1071 unsigned int inject_type;
1072 struct sdebug_err_inject *inject;
1073 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1074
1075 buf = kzalloc(count + 1, GFP_KERNEL);
1076 if (!buf)
1077 return -ENOMEM;
1078
1079 if (copy_from_user(buf, ubuf, count)) {
1080 kfree(buf);
1081 return -EFAULT;
1082 }
1083
1084 if (buf[0] == '-')
1085 return sdebug_err_remove(sdev, buf, count);
1086
1087 if (sscanf(buf, "%d", &inject_type) != 1) {
1088 kfree(buf);
1089 return -EINVAL;
1090 }
1091
1092 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1093 if (!inject) {
1094 kfree(buf);
1095 return -ENOMEM;
1096 }
1097
1098 switch (inject_type) {
1099 case ERR_TMOUT_CMD:
1100 case ERR_ABORT_CMD_FAILED:
1101 case ERR_LUN_RESET_FAILED:
1102 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1103 &inject->cmd) != 3)
1104 goto out_error;
1105 break;
1106
1107 case ERR_FAIL_QUEUE_CMD:
1108 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1109 &inject->cmd, &inject->queuecmd_ret) != 4)
1110 goto out_error;
1111 break;
1112
1113 case ERR_FAIL_CMD:
1114 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1115 &inject->type, &inject->cnt, &inject->cmd,
1116 &inject->host_byte, &inject->driver_byte,
1117 &inject->status_byte, &inject->sense_key,
1118 &inject->asc, &inject->asq) != 9)
1119 goto out_error;
1120 break;
1121
1122 default:
1123 goto out_error;
1124 break;
1125 }
1126
1127 kfree(buf);
1128 sdebug_err_add(sdev, inject);
1129
1130 return count;
1131
1132 out_error:
1133 kfree(buf);
1134 kfree(inject);
1135 return -EINVAL;
1136 }
1137
1138 static const struct file_operations sdebug_error_fops = {
1139 .open = sdebug_error_open,
1140 .read = seq_read,
1141 .write = sdebug_error_write,
1142 .release = single_release,
1143 };
1144
sdebug_target_reset_fail_show(struct seq_file * m,void * p)1145 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1146 {
1147 struct scsi_target *starget = (struct scsi_target *)m->private;
1148 struct sdebug_target_info *targetip =
1149 (struct sdebug_target_info *)starget->hostdata;
1150
1151 if (targetip)
1152 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1153
1154 return 0;
1155 }
1156
sdebug_target_reset_fail_open(struct inode * inode,struct file * file)1157 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1158 {
1159 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1160 }
1161
sdebug_target_reset_fail_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1162 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1163 const char __user *ubuf, size_t count, loff_t *ppos)
1164 {
1165 int ret;
1166 struct scsi_target *starget =
1167 (struct scsi_target *)file->f_inode->i_private;
1168 struct sdebug_target_info *targetip =
1169 (struct sdebug_target_info *)starget->hostdata;
1170
1171 if (targetip) {
1172 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1173 return ret < 0 ? ret : count;
1174 }
1175 return -ENODEV;
1176 }
1177
1178 static const struct file_operations sdebug_target_reset_fail_fops = {
1179 .open = sdebug_target_reset_fail_open,
1180 .read = seq_read,
1181 .write = sdebug_target_reset_fail_write,
1182 .release = single_release,
1183 };
1184
sdebug_target_alloc(struct scsi_target * starget)1185 static int sdebug_target_alloc(struct scsi_target *starget)
1186 {
1187 struct sdebug_target_info *targetip;
1188
1189 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1190 if (!targetip)
1191 return -ENOMEM;
1192
1193 async_synchronize_full_domain(&sdebug_async_domain);
1194
1195 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1196 sdebug_debugfs_root);
1197
1198 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1199 &sdebug_target_reset_fail_fops);
1200
1201 starget->hostdata = targetip;
1202
1203 return 0;
1204 }
1205
sdebug_tartget_cleanup_async(void * data,async_cookie_t cookie)1206 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1207 {
1208 struct sdebug_target_info *targetip = data;
1209
1210 debugfs_remove(targetip->debugfs_entry);
1211 kfree(targetip);
1212 }
1213
sdebug_target_destroy(struct scsi_target * starget)1214 static void sdebug_target_destroy(struct scsi_target *starget)
1215 {
1216 struct sdebug_target_info *targetip;
1217
1218 targetip = (struct sdebug_target_info *)starget->hostdata;
1219 if (targetip) {
1220 starget->hostdata = NULL;
1221 async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1222 &sdebug_async_domain);
1223 }
1224 }
1225
1226 /* Only do the extra work involved in logical block provisioning if one or
1227 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1228 * real reads and writes (i.e. not skipping them for speed).
1229 */
scsi_debug_lbp(void)1230 static inline bool scsi_debug_lbp(void)
1231 {
1232 return 0 == sdebug_fake_rw &&
1233 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1234 }
1235
scsi_debug_atomic_write(void)1236 static inline bool scsi_debug_atomic_write(void)
1237 {
1238 return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1239 }
1240
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)1241 static void *lba2fake_store(struct sdeb_store_info *sip,
1242 unsigned long long lba)
1243 {
1244 struct sdeb_store_info *lsip = sip;
1245
1246 lba = do_div(lba, sdebug_store_sectors);
1247 if (!sip || !sip->storep) {
1248 WARN_ON_ONCE(true);
1249 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1250 }
1251 return lsip->storep + lba * sdebug_sector_size;
1252 }
1253
dif_store(struct sdeb_store_info * sip,sector_t sector)1254 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1255 sector_t sector)
1256 {
1257 sector = sector_div(sector, sdebug_store_sectors);
1258
1259 return sip->dif_storep + sector;
1260 }
1261
sdebug_max_tgts_luns(void)1262 static void sdebug_max_tgts_luns(void)
1263 {
1264 struct sdebug_host_info *sdbg_host;
1265 struct Scsi_Host *hpnt;
1266
1267 mutex_lock(&sdebug_host_list_mutex);
1268 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1269 hpnt = sdbg_host->shost;
1270 if ((hpnt->this_id >= 0) &&
1271 (sdebug_num_tgts > hpnt->this_id))
1272 hpnt->max_id = sdebug_num_tgts + 1;
1273 else
1274 hpnt->max_id = sdebug_num_tgts;
1275 /* sdebug_max_luns; */
1276 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1277 }
1278 mutex_unlock(&sdebug_host_list_mutex);
1279 }
1280
1281 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1282
1283 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)1284 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1285 enum sdeb_cmd_data c_d,
1286 int in_byte, int in_bit)
1287 {
1288 unsigned char *sbuff;
1289 u8 sks[4];
1290 int sl, asc;
1291
1292 sbuff = scp->sense_buffer;
1293 if (!sbuff) {
1294 sdev_printk(KERN_ERR, scp->device,
1295 "%s: sense_buffer is NULL\n", __func__);
1296 return;
1297 }
1298 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1299 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1300 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1301 memset(sks, 0, sizeof(sks));
1302 sks[0] = 0x80;
1303 if (c_d)
1304 sks[0] |= 0x40;
1305 if (in_bit >= 0) {
1306 sks[0] |= 0x8;
1307 sks[0] |= 0x7 & in_bit;
1308 }
1309 put_unaligned_be16(in_byte, sks + 1);
1310 if (sdebug_dsense) {
1311 sl = sbuff[7] + 8;
1312 sbuff[7] = sl;
1313 sbuff[sl] = 0x2;
1314 sbuff[sl + 1] = 0x6;
1315 memcpy(sbuff + sl + 4, sks, 3);
1316 } else
1317 memcpy(sbuff + 15, sks, 3);
1318 if (sdebug_verbose)
1319 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1320 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1321 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1322 }
1323
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)1324 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1325 {
1326 if (!scp->sense_buffer) {
1327 sdev_printk(KERN_ERR, scp->device,
1328 "%s: sense_buffer is NULL\n", __func__);
1329 return;
1330 }
1331 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1332
1333 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1334
1335 if (sdebug_verbose)
1336 sdev_printk(KERN_INFO, scp->device,
1337 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1338 my_name, key, asc, asq);
1339 }
1340
mk_sense_invalid_opcode(struct scsi_cmnd * scp)1341 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1342 {
1343 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1344 }
1345
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)1346 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1347 void __user *arg)
1348 {
1349 if (sdebug_verbose) {
1350 if (0x1261 == cmd)
1351 sdev_printk(KERN_INFO, dev,
1352 "%s: BLKFLSBUF [0x1261]\n", __func__);
1353 else if (0x5331 == cmd)
1354 sdev_printk(KERN_INFO, dev,
1355 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1356 __func__);
1357 else
1358 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1359 __func__, cmd);
1360 }
1361 return -EINVAL;
1362 /* return -ENOTTY; // correct return but upsets fdisk */
1363 }
1364
config_cdb_len(struct scsi_device * sdev)1365 static void config_cdb_len(struct scsi_device *sdev)
1366 {
1367 switch (sdebug_cdb_len) {
1368 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1369 sdev->use_10_for_rw = false;
1370 sdev->use_16_for_rw = false;
1371 sdev->use_10_for_ms = false;
1372 break;
1373 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1374 sdev->use_10_for_rw = true;
1375 sdev->use_16_for_rw = false;
1376 sdev->use_10_for_ms = false;
1377 break;
1378 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1379 sdev->use_10_for_rw = true;
1380 sdev->use_16_for_rw = false;
1381 sdev->use_10_for_ms = true;
1382 break;
1383 case 16:
1384 sdev->use_10_for_rw = false;
1385 sdev->use_16_for_rw = true;
1386 sdev->use_10_for_ms = true;
1387 break;
1388 case 32: /* No knobs to suggest this so same as 16 for now */
1389 sdev->use_10_for_rw = false;
1390 sdev->use_16_for_rw = true;
1391 sdev->use_10_for_ms = true;
1392 break;
1393 default:
1394 pr_warn("unexpected cdb_len=%d, force to 10\n",
1395 sdebug_cdb_len);
1396 sdev->use_10_for_rw = true;
1397 sdev->use_16_for_rw = false;
1398 sdev->use_10_for_ms = false;
1399 sdebug_cdb_len = 10;
1400 break;
1401 }
1402 }
1403
all_config_cdb_len(void)1404 static void all_config_cdb_len(void)
1405 {
1406 struct sdebug_host_info *sdbg_host;
1407 struct Scsi_Host *shost;
1408 struct scsi_device *sdev;
1409
1410 mutex_lock(&sdebug_host_list_mutex);
1411 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1412 shost = sdbg_host->shost;
1413 shost_for_each_device(sdev, shost) {
1414 config_cdb_len(sdev);
1415 }
1416 }
1417 mutex_unlock(&sdebug_host_list_mutex);
1418 }
1419
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1420 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1421 {
1422 struct sdebug_host_info *sdhp = devip->sdbg_host;
1423 struct sdebug_dev_info *dp;
1424
1425 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1426 if ((devip->sdbg_host == dp->sdbg_host) &&
1427 (devip->target == dp->target)) {
1428 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1429 }
1430 }
1431 }
1432
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1433 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1434 {
1435 int k;
1436
1437 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1438 if (k != SDEBUG_NUM_UAS) {
1439 const char *cp = NULL;
1440
1441 switch (k) {
1442 case SDEBUG_UA_POR:
1443 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1444 POWER_ON_RESET_ASCQ);
1445 if (sdebug_verbose)
1446 cp = "power on reset";
1447 break;
1448 case SDEBUG_UA_POOCCUR:
1449 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1450 POWER_ON_OCCURRED_ASCQ);
1451 if (sdebug_verbose)
1452 cp = "power on occurred";
1453 break;
1454 case SDEBUG_UA_BUS_RESET:
1455 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1456 BUS_RESET_ASCQ);
1457 if (sdebug_verbose)
1458 cp = "bus reset";
1459 break;
1460 case SDEBUG_UA_MODE_CHANGED:
1461 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1462 MODE_CHANGED_ASCQ);
1463 if (sdebug_verbose)
1464 cp = "mode parameters changed";
1465 break;
1466 case SDEBUG_UA_CAPACITY_CHANGED:
1467 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1468 CAPACITY_CHANGED_ASCQ);
1469 if (sdebug_verbose)
1470 cp = "capacity data changed";
1471 break;
1472 case SDEBUG_UA_MICROCODE_CHANGED:
1473 mk_sense_buffer(scp, UNIT_ATTENTION,
1474 TARGET_CHANGED_ASC,
1475 MICROCODE_CHANGED_ASCQ);
1476 if (sdebug_verbose)
1477 cp = "microcode has been changed";
1478 break;
1479 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1480 mk_sense_buffer(scp, UNIT_ATTENTION,
1481 TARGET_CHANGED_ASC,
1482 MICROCODE_CHANGED_WO_RESET_ASCQ);
1483 if (sdebug_verbose)
1484 cp = "microcode has been changed without reset";
1485 break;
1486 case SDEBUG_UA_LUNS_CHANGED:
1487 /*
1488 * SPC-3 behavior is to report a UNIT ATTENTION with
1489 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1490 * on the target, until a REPORT LUNS command is
1491 * received. SPC-4 behavior is to report it only once.
1492 * NOTE: sdebug_scsi_level does not use the same
1493 * values as struct scsi_device->scsi_level.
1494 */
1495 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1496 clear_luns_changed_on_target(devip);
1497 mk_sense_buffer(scp, UNIT_ATTENTION,
1498 TARGET_CHANGED_ASC,
1499 LUNS_CHANGED_ASCQ);
1500 if (sdebug_verbose)
1501 cp = "reported luns data has changed";
1502 break;
1503 default:
1504 pr_warn("unexpected unit attention code=%d\n", k);
1505 if (sdebug_verbose)
1506 cp = "unknown";
1507 break;
1508 }
1509 clear_bit(k, devip->uas_bm);
1510 if (sdebug_verbose)
1511 sdev_printk(KERN_INFO, scp->device,
1512 "%s reports: Unit attention: %s\n",
1513 my_name, cp);
1514 return check_condition_result;
1515 }
1516 return 0;
1517 }
1518
1519 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1520 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1521 int arr_len)
1522 {
1523 int act_len;
1524 struct scsi_data_buffer *sdb = &scp->sdb;
1525
1526 if (!sdb->length)
1527 return 0;
1528 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1529 return DID_ERROR << 16;
1530
1531 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1532 arr, arr_len);
1533 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1534
1535 return 0;
1536 }
1537
1538 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1539 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1540 * calls, not required to write in ascending offset order. Assumes resid
1541 * set to scsi_bufflen() prior to any calls.
1542 */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1543 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1544 int arr_len, unsigned int off_dst)
1545 {
1546 unsigned int act_len, n;
1547 struct scsi_data_buffer *sdb = &scp->sdb;
1548 off_t skip = off_dst;
1549
1550 if (sdb->length <= off_dst)
1551 return 0;
1552 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1553 return DID_ERROR << 16;
1554
1555 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1556 arr, arr_len, skip);
1557 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1558 __func__, off_dst, scsi_bufflen(scp), act_len,
1559 scsi_get_resid(scp));
1560 n = scsi_bufflen(scp) - (off_dst + act_len);
1561 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1562 return 0;
1563 }
1564
1565 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1566 * 'arr' or -1 if error.
1567 */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1568 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1569 int arr_len)
1570 {
1571 if (!scsi_bufflen(scp))
1572 return 0;
1573 if (scp->sc_data_direction != DMA_TO_DEVICE)
1574 return -1;
1575
1576 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1577 }
1578
1579
1580 static char sdebug_inq_vendor_id[9] = "Linux ";
1581 static char sdebug_inq_product_id[17] = "scsi_debug ";
1582 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1583 /* Use some locally assigned NAAs for SAS addresses. */
1584 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1585 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1586 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1587
1588 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1589 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1590 int target_dev_id, int dev_id_num,
1591 const char *dev_id_str, int dev_id_str_len,
1592 const uuid_t *lu_name)
1593 {
1594 int num, port_a;
1595 char b[32];
1596
1597 port_a = target_dev_id + 1;
1598 /* T10 vendor identifier field format (faked) */
1599 arr[0] = 0x2; /* ASCII */
1600 arr[1] = 0x1;
1601 arr[2] = 0x0;
1602 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1603 memcpy(&arr[12], sdebug_inq_product_id, 16);
1604 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1605 num = 8 + 16 + dev_id_str_len;
1606 arr[3] = num;
1607 num += 4;
1608 if (dev_id_num >= 0) {
1609 if (sdebug_uuid_ctl) {
1610 /* Locally assigned UUID */
1611 arr[num++] = 0x1; /* binary (not necessarily sas) */
1612 arr[num++] = 0xa; /* PIV=0, lu, naa */
1613 arr[num++] = 0x0;
1614 arr[num++] = 0x12;
1615 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1616 arr[num++] = 0x0;
1617 memcpy(arr + num, lu_name, 16);
1618 num += 16;
1619 } else {
1620 /* NAA-3, Logical unit identifier (binary) */
1621 arr[num++] = 0x1; /* binary (not necessarily sas) */
1622 arr[num++] = 0x3; /* PIV=0, lu, naa */
1623 arr[num++] = 0x0;
1624 arr[num++] = 0x8;
1625 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1626 num += 8;
1627 }
1628 /* Target relative port number */
1629 arr[num++] = 0x61; /* proto=sas, binary */
1630 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1631 arr[num++] = 0x0; /* reserved */
1632 arr[num++] = 0x4; /* length */
1633 arr[num++] = 0x0; /* reserved */
1634 arr[num++] = 0x0; /* reserved */
1635 arr[num++] = 0x0;
1636 arr[num++] = 0x1; /* relative port A */
1637 }
1638 /* NAA-3, Target port identifier */
1639 arr[num++] = 0x61; /* proto=sas, binary */
1640 arr[num++] = 0x93; /* piv=1, target port, naa */
1641 arr[num++] = 0x0;
1642 arr[num++] = 0x8;
1643 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1644 num += 8;
1645 /* NAA-3, Target port group identifier */
1646 arr[num++] = 0x61; /* proto=sas, binary */
1647 arr[num++] = 0x95; /* piv=1, target port group id */
1648 arr[num++] = 0x0;
1649 arr[num++] = 0x4;
1650 arr[num++] = 0;
1651 arr[num++] = 0;
1652 put_unaligned_be16(port_group_id, arr + num);
1653 num += 2;
1654 /* NAA-3, Target device identifier */
1655 arr[num++] = 0x61; /* proto=sas, binary */
1656 arr[num++] = 0xa3; /* piv=1, target device, naa */
1657 arr[num++] = 0x0;
1658 arr[num++] = 0x8;
1659 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1660 num += 8;
1661 /* SCSI name string: Target device identifier */
1662 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1663 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1664 arr[num++] = 0x0;
1665 arr[num++] = 24;
1666 memcpy(arr + num, "naa.32222220", 12);
1667 num += 12;
1668 snprintf(b, sizeof(b), "%08X", target_dev_id);
1669 memcpy(arr + num, b, 8);
1670 num += 8;
1671 memset(arr + num, 0, 4);
1672 num += 4;
1673 return num;
1674 }
1675
1676 static unsigned char vpd84_data[] = {
1677 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1678 0x22,0x22,0x22,0x0,0xbb,0x1,
1679 0x22,0x22,0x22,0x0,0xbb,0x2,
1680 };
1681
1682 /* Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1683 static int inquiry_vpd_84(unsigned char *arr)
1684 {
1685 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1686 return sizeof(vpd84_data);
1687 }
1688
1689 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1690 static int inquiry_vpd_85(unsigned char *arr)
1691 {
1692 int num = 0;
1693 const char *na1 = "https://www.kernel.org/config";
1694 const char *na2 = "http://www.kernel.org/log";
1695 int plen, olen;
1696
1697 arr[num++] = 0x1; /* lu, storage config */
1698 arr[num++] = 0x0; /* reserved */
1699 arr[num++] = 0x0;
1700 olen = strlen(na1);
1701 plen = olen + 1;
1702 if (plen % 4)
1703 plen = ((plen / 4) + 1) * 4;
1704 arr[num++] = plen; /* length, null termianted, padded */
1705 memcpy(arr + num, na1, olen);
1706 memset(arr + num + olen, 0, plen - olen);
1707 num += plen;
1708
1709 arr[num++] = 0x4; /* lu, logging */
1710 arr[num++] = 0x0; /* reserved */
1711 arr[num++] = 0x0;
1712 olen = strlen(na2);
1713 plen = olen + 1;
1714 if (plen % 4)
1715 plen = ((plen / 4) + 1) * 4;
1716 arr[num++] = plen; /* length, null terminated, padded */
1717 memcpy(arr + num, na2, olen);
1718 memset(arr + num + olen, 0, plen - olen);
1719 num += plen;
1720
1721 return num;
1722 }
1723
1724 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1725 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1726 {
1727 int num = 0;
1728 int port_a, port_b;
1729
1730 port_a = target_dev_id + 1;
1731 port_b = port_a + 1;
1732 arr[num++] = 0x0; /* reserved */
1733 arr[num++] = 0x0; /* reserved */
1734 arr[num++] = 0x0;
1735 arr[num++] = 0x1; /* relative port 1 (primary) */
1736 memset(arr + num, 0, 6);
1737 num += 6;
1738 arr[num++] = 0x0;
1739 arr[num++] = 12; /* length tp descriptor */
1740 /* naa-5 target port identifier (A) */
1741 arr[num++] = 0x61; /* proto=sas, binary */
1742 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1743 arr[num++] = 0x0; /* reserved */
1744 arr[num++] = 0x8; /* length */
1745 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1746 num += 8;
1747 arr[num++] = 0x0; /* reserved */
1748 arr[num++] = 0x0; /* reserved */
1749 arr[num++] = 0x0;
1750 arr[num++] = 0x2; /* relative port 2 (secondary) */
1751 memset(arr + num, 0, 6);
1752 num += 6;
1753 arr[num++] = 0x0;
1754 arr[num++] = 12; /* length tp descriptor */
1755 /* naa-5 target port identifier (B) */
1756 arr[num++] = 0x61; /* proto=sas, binary */
1757 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1758 arr[num++] = 0x0; /* reserved */
1759 arr[num++] = 0x8; /* length */
1760 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1761 num += 8;
1762
1763 return num;
1764 }
1765
1766
1767 static unsigned char vpd89_data[] = {
1768 /* from 4th byte */ 0,0,0,0,
1769 'l','i','n','u','x',' ',' ',' ',
1770 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1771 '1','2','3','4',
1772 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1773 0xec,0,0,0,
1774 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1775 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1776 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1777 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1778 0x53,0x41,
1779 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1780 0x20,0x20,
1781 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1782 0x10,0x80,
1783 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1784 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1785 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1786 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1787 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1788 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1789 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1790 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1791 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1792 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1793 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1794 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1795 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1796 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1797 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1798 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1799 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1800 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1801 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1802 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1803 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1804 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1805 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1806 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1807 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1808 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1809 };
1810
1811 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1812 static int inquiry_vpd_89(unsigned char *arr)
1813 {
1814 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1815 return sizeof(vpd89_data);
1816 }
1817
1818
1819 static unsigned char vpdb0_data[] = {
1820 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1821 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1822 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1823 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1824 };
1825
1826 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1827 static int inquiry_vpd_b0(unsigned char *arr)
1828 {
1829 unsigned int gran;
1830
1831 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1832
1833 /* Optimal transfer length granularity */
1834 if (sdebug_opt_xferlen_exp != 0 &&
1835 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1836 gran = 1 << sdebug_opt_xferlen_exp;
1837 else
1838 gran = 1 << sdebug_physblk_exp;
1839 put_unaligned_be16(gran, arr + 2);
1840
1841 /* Maximum Transfer Length */
1842 if (sdebug_store_sectors > 0x400)
1843 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1844
1845 /* Optimal Transfer Length */
1846 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1847
1848 if (sdebug_lbpu) {
1849 /* Maximum Unmap LBA Count */
1850 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1851
1852 /* Maximum Unmap Block Descriptor Count */
1853 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1854 }
1855
1856 /* Unmap Granularity Alignment */
1857 if (sdebug_unmap_alignment) {
1858 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1859 arr[28] |= 0x80; /* UGAVALID */
1860 }
1861
1862 /* Optimal Unmap Granularity */
1863 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1864
1865 /* Maximum WRITE SAME Length */
1866 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1867
1868 if (sdebug_atomic_wr) {
1869 put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1870 put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1871 put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1872 put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1873 put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1874 }
1875
1876 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1877 }
1878
1879 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1880 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1881 {
1882 memset(arr, 0, 0x3c);
1883 arr[0] = 0;
1884 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1885 arr[2] = 0;
1886 arr[3] = 5; /* less than 1.8" */
1887
1888 return 0x3c;
1889 }
1890
1891 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1892 static int inquiry_vpd_b2(unsigned char *arr)
1893 {
1894 memset(arr, 0, 0x4);
1895 arr[0] = 0; /* threshold exponent */
1896 if (sdebug_lbpu)
1897 arr[1] = 1 << 7;
1898 if (sdebug_lbpws)
1899 arr[1] |= 1 << 6;
1900 if (sdebug_lbpws10)
1901 arr[1] |= 1 << 5;
1902 if (sdebug_lbprz && scsi_debug_lbp())
1903 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1904 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1905 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1906 /* threshold_percentage=0 */
1907 return 0x4;
1908 }
1909
1910 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1911 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1912 {
1913 memset(arr, 0, 0x3c);
1914 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1915 /*
1916 * Set Optimal number of open sequential write preferred zones and
1917 * Optimal number of non-sequentially written sequential write
1918 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1919 * fields set to zero, apart from Max. number of open swrz_s field.
1920 */
1921 put_unaligned_be32(0xffffffff, &arr[4]);
1922 put_unaligned_be32(0xffffffff, &arr[8]);
1923 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1924 put_unaligned_be32(devip->max_open, &arr[12]);
1925 else
1926 put_unaligned_be32(0xffffffff, &arr[12]);
1927 if (devip->zcap < devip->zsize) {
1928 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1929 put_unaligned_be64(devip->zsize, &arr[20]);
1930 } else {
1931 arr[19] = 0;
1932 }
1933 return 0x3c;
1934 }
1935
1936 #define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
1937
1938 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1939
1940 /* Block limits extension VPD page (SBC-4) */
inquiry_vpd_b7(unsigned char * arrb4)1941 static int inquiry_vpd_b7(unsigned char *arrb4)
1942 {
1943 memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1944 arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1945 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1946 return SDEBUG_BLE_LEN_AFTER_B4;
1947 }
1948
1949 #define SDEBUG_LONG_INQ_SZ 96
1950 #define SDEBUG_MAX_INQ_ARR_SZ 584
1951
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1952 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1953 {
1954 unsigned char pq_pdt;
1955 unsigned char *arr;
1956 unsigned char *cmd = scp->cmnd;
1957 u32 alloc_len, n;
1958 int ret;
1959 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1960
1961 alloc_len = get_unaligned_be16(cmd + 3);
1962 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1963 if (! arr)
1964 return DID_REQUEUE << 16;
1965 is_disk = (sdebug_ptype == TYPE_DISK);
1966 is_zbc = devip->zoned;
1967 is_disk_zbc = (is_disk || is_zbc);
1968 have_wlun = scsi_is_wlun(scp->device->lun);
1969 if (have_wlun)
1970 pq_pdt = TYPE_WLUN; /* present, wlun */
1971 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1972 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1973 else
1974 pq_pdt = (sdebug_ptype & 0x1f);
1975 arr[0] = pq_pdt;
1976 if (0x2 & cmd[1]) { /* CMDDT bit set */
1977 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1978 kfree(arr);
1979 return check_condition_result;
1980 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1981 int lu_id_num, port_group_id, target_dev_id;
1982 u32 len;
1983 char lu_id_str[6];
1984 int host_no = devip->sdbg_host->shost->host_no;
1985
1986 arr[1] = cmd[2];
1987 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1988 (devip->channel & 0x7f);
1989 if (sdebug_vpd_use_hostno == 0)
1990 host_no = 0;
1991 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1992 (devip->target * 1000) + devip->lun);
1993 target_dev_id = ((host_no + 1) * 2000) +
1994 (devip->target * 1000) - 3;
1995 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1996 if (0 == cmd[2]) { /* supported vital product data pages */
1997 n = 4;
1998 arr[n++] = 0x0; /* this page */
1999 arr[n++] = 0x80; /* unit serial number */
2000 arr[n++] = 0x83; /* device identification */
2001 arr[n++] = 0x84; /* software interface ident. */
2002 arr[n++] = 0x85; /* management network addresses */
2003 arr[n++] = 0x86; /* extended inquiry */
2004 arr[n++] = 0x87; /* mode page policy */
2005 arr[n++] = 0x88; /* SCSI ports */
2006 if (is_disk_zbc) { /* SBC or ZBC */
2007 arr[n++] = 0x89; /* ATA information */
2008 arr[n++] = 0xb0; /* Block limits */
2009 arr[n++] = 0xb1; /* Block characteristics */
2010 if (is_disk)
2011 arr[n++] = 0xb2; /* LB Provisioning */
2012 if (is_zbc)
2013 arr[n++] = 0xb6; /* ZB dev. char. */
2014 arr[n++] = 0xb7; /* Block limits extension */
2015 }
2016 arr[3] = n - 4; /* number of supported VPD pages */
2017 } else if (0x80 == cmd[2]) { /* unit serial number */
2018 arr[3] = len;
2019 memcpy(&arr[4], lu_id_str, len);
2020 } else if (0x83 == cmd[2]) { /* device identification */
2021 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2022 target_dev_id, lu_id_num,
2023 lu_id_str, len,
2024 &devip->lu_name);
2025 } else if (0x84 == cmd[2]) { /* Software interface ident. */
2026 arr[3] = inquiry_vpd_84(&arr[4]);
2027 } else if (0x85 == cmd[2]) { /* Management network addresses */
2028 arr[3] = inquiry_vpd_85(&arr[4]);
2029 } else if (0x86 == cmd[2]) { /* extended inquiry */
2030 arr[3] = 0x3c; /* number of following entries */
2031 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2032 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
2033 else if (have_dif_prot)
2034 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
2035 else
2036 arr[4] = 0x0; /* no protection stuff */
2037 /*
2038 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2039 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2040 */
2041 arr[5] = 0x17;
2042 } else if (0x87 == cmd[2]) { /* mode page policy */
2043 arr[3] = 0x8; /* number of following entries */
2044 arr[4] = 0x2; /* disconnect-reconnect mp */
2045 arr[6] = 0x80; /* mlus, shared */
2046 arr[8] = 0x18; /* protocol specific lu */
2047 arr[10] = 0x82; /* mlus, per initiator port */
2048 } else if (0x88 == cmd[2]) { /* SCSI Ports */
2049 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2050 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2051 n = inquiry_vpd_89(&arr[4]);
2052 put_unaligned_be16(n, arr + 2);
2053 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2054 arr[3] = inquiry_vpd_b0(&arr[4]);
2055 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2056 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2057 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2058 arr[3] = inquiry_vpd_b2(&arr[4]);
2059 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2060 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2061 } else if (cmd[2] == 0xb7) { /* block limits extension page */
2062 arr[3] = inquiry_vpd_b7(&arr[4]);
2063 } else {
2064 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2065 kfree(arr);
2066 return check_condition_result;
2067 }
2068 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2069 ret = fill_from_dev_buffer(scp, arr,
2070 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2071 kfree(arr);
2072 return ret;
2073 }
2074 /* drops through here for a standard inquiry */
2075 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2076 arr[2] = sdebug_scsi_level;
2077 arr[3] = 2; /* response_data_format==2 */
2078 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2079 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2080 if (sdebug_vpd_use_hostno == 0)
2081 arr[5] |= 0x10; /* claim: implicit TPGS */
2082 arr[6] = 0x10; /* claim: MultiP */
2083 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2084 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2085 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2086 memcpy(&arr[16], sdebug_inq_product_id, 16);
2087 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2088 /* Use Vendor Specific area to place driver date in ASCII hex */
2089 memcpy(&arr[36], sdebug_version_date, 8);
2090 /* version descriptors (2 bytes each) follow */
2091 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2092 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2093 n = 62;
2094 if (is_disk) { /* SBC-4 no version claimed */
2095 put_unaligned_be16(0x600, arr + n);
2096 n += 2;
2097 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2098 put_unaligned_be16(0x525, arr + n);
2099 n += 2;
2100 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2101 put_unaligned_be16(0x624, arr + n);
2102 n += 2;
2103 }
2104 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2105 ret = fill_from_dev_buffer(scp, arr,
2106 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2107 kfree(arr);
2108 return ret;
2109 }
2110
2111 /* See resp_iec_m_pg() for how this data is manipulated */
2112 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2113 0, 0, 0x0, 0x0};
2114
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2115 static int resp_requests(struct scsi_cmnd *scp,
2116 struct sdebug_dev_info *devip)
2117 {
2118 unsigned char *cmd = scp->cmnd;
2119 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2120 bool dsense = !!(cmd[1] & 1);
2121 u32 alloc_len = cmd[4];
2122 u32 len = 18;
2123 int stopped_state = atomic_read(&devip->stopped);
2124
2125 memset(arr, 0, sizeof(arr));
2126 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2127 if (dsense) {
2128 arr[0] = 0x72;
2129 arr[1] = NOT_READY;
2130 arr[2] = LOGICAL_UNIT_NOT_READY;
2131 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2132 len = 8;
2133 } else {
2134 arr[0] = 0x70;
2135 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2136 arr[7] = 0xa; /* 18 byte sense buffer */
2137 arr[12] = LOGICAL_UNIT_NOT_READY;
2138 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2139 }
2140 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2141 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2142 if (dsense) {
2143 arr[0] = 0x72;
2144 arr[1] = 0x0; /* NO_SENSE in sense_key */
2145 arr[2] = THRESHOLD_EXCEEDED;
2146 arr[3] = 0xff; /* Failure prediction(false) */
2147 len = 8;
2148 } else {
2149 arr[0] = 0x70;
2150 arr[2] = 0x0; /* NO_SENSE in sense_key */
2151 arr[7] = 0xa; /* 18 byte sense buffer */
2152 arr[12] = THRESHOLD_EXCEEDED;
2153 arr[13] = 0xff; /* Failure prediction(false) */
2154 }
2155 } else { /* nothing to report */
2156 if (dsense) {
2157 len = 8;
2158 memset(arr, 0, len);
2159 arr[0] = 0x72;
2160 } else {
2161 memset(arr, 0, len);
2162 arr[0] = 0x70;
2163 arr[7] = 0xa;
2164 }
2165 }
2166 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2167 }
2168
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2169 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2170 {
2171 unsigned char *cmd = scp->cmnd;
2172 int power_cond, want_stop, stopped_state;
2173 bool changing;
2174
2175 power_cond = (cmd[4] & 0xf0) >> 4;
2176 if (power_cond) {
2177 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2178 return check_condition_result;
2179 }
2180 want_stop = !(cmd[4] & 1);
2181 stopped_state = atomic_read(&devip->stopped);
2182 if (stopped_state == 2) {
2183 ktime_t now_ts = ktime_get_boottime();
2184
2185 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2186 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2187
2188 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2189 /* tur_ms_to_ready timer extinguished */
2190 atomic_set(&devip->stopped, 0);
2191 stopped_state = 0;
2192 }
2193 }
2194 if (stopped_state == 2) {
2195 if (want_stop) {
2196 stopped_state = 1; /* dummy up success */
2197 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2198 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2199 return check_condition_result;
2200 }
2201 }
2202 }
2203 changing = (stopped_state != want_stop);
2204 if (changing)
2205 atomic_xchg(&devip->stopped, want_stop);
2206 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2207 return SDEG_RES_IMMED_MASK;
2208 else
2209 return 0;
2210 }
2211
get_sdebug_capacity(void)2212 static sector_t get_sdebug_capacity(void)
2213 {
2214 static const unsigned int gibibyte = 1073741824;
2215
2216 if (sdebug_virtual_gb > 0)
2217 return (sector_t)sdebug_virtual_gb *
2218 (gibibyte / sdebug_sector_size);
2219 else
2220 return sdebug_store_sectors;
2221 }
2222
2223 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2224 static int resp_readcap(struct scsi_cmnd *scp,
2225 struct sdebug_dev_info *devip)
2226 {
2227 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2228 unsigned int capac;
2229
2230 /* following just in case virtual_gb changed */
2231 sdebug_capacity = get_sdebug_capacity();
2232 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2233 if (sdebug_capacity < 0xffffffff) {
2234 capac = (unsigned int)sdebug_capacity - 1;
2235 put_unaligned_be32(capac, arr + 0);
2236 } else
2237 put_unaligned_be32(0xffffffff, arr + 0);
2238 put_unaligned_be16(sdebug_sector_size, arr + 6);
2239 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2240 }
2241
2242 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2243 static int resp_readcap16(struct scsi_cmnd *scp,
2244 struct sdebug_dev_info *devip)
2245 {
2246 unsigned char *cmd = scp->cmnd;
2247 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2248 u32 alloc_len;
2249
2250 alloc_len = get_unaligned_be32(cmd + 10);
2251 /* following just in case virtual_gb changed */
2252 sdebug_capacity = get_sdebug_capacity();
2253 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2254 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2255 put_unaligned_be32(sdebug_sector_size, arr + 8);
2256 arr[13] = sdebug_physblk_exp & 0xf;
2257 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2258
2259 if (scsi_debug_lbp()) {
2260 arr[14] |= 0x80; /* LBPME */
2261 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2262 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2263 * in the wider field maps to 0 in this field.
2264 */
2265 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2266 arr[14] |= 0x40;
2267 }
2268
2269 /*
2270 * Since the scsi_debug READ CAPACITY implementation always reports the
2271 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2272 */
2273 if (devip->zoned)
2274 arr[12] |= 1 << 4;
2275
2276 arr[15] = sdebug_lowest_aligned & 0xff;
2277
2278 if (have_dif_prot) {
2279 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2280 arr[12] |= 1; /* PROT_EN */
2281 }
2282
2283 return fill_from_dev_buffer(scp, arr,
2284 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2285 }
2286
2287 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2288
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2289 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2290 struct sdebug_dev_info *devip)
2291 {
2292 unsigned char *cmd = scp->cmnd;
2293 unsigned char *arr;
2294 int host_no = devip->sdbg_host->shost->host_no;
2295 int port_group_a, port_group_b, port_a, port_b;
2296 u32 alen, n, rlen;
2297 int ret;
2298
2299 alen = get_unaligned_be32(cmd + 6);
2300 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2301 if (! arr)
2302 return DID_REQUEUE << 16;
2303 /*
2304 * EVPD page 0x88 states we have two ports, one
2305 * real and a fake port with no device connected.
2306 * So we create two port groups with one port each
2307 * and set the group with port B to unavailable.
2308 */
2309 port_a = 0x1; /* relative port A */
2310 port_b = 0x2; /* relative port B */
2311 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2312 (devip->channel & 0x7f);
2313 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2314 (devip->channel & 0x7f) + 0x80;
2315
2316 /*
2317 * The asymmetric access state is cycled according to the host_id.
2318 */
2319 n = 4;
2320 if (sdebug_vpd_use_hostno == 0) {
2321 arr[n++] = host_no % 3; /* Asymm access state */
2322 arr[n++] = 0x0F; /* claim: all states are supported */
2323 } else {
2324 arr[n++] = 0x0; /* Active/Optimized path */
2325 arr[n++] = 0x01; /* only support active/optimized paths */
2326 }
2327 put_unaligned_be16(port_group_a, arr + n);
2328 n += 2;
2329 arr[n++] = 0; /* Reserved */
2330 arr[n++] = 0; /* Status code */
2331 arr[n++] = 0; /* Vendor unique */
2332 arr[n++] = 0x1; /* One port per group */
2333 arr[n++] = 0; /* Reserved */
2334 arr[n++] = 0; /* Reserved */
2335 put_unaligned_be16(port_a, arr + n);
2336 n += 2;
2337 arr[n++] = 3; /* Port unavailable */
2338 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2339 put_unaligned_be16(port_group_b, arr + n);
2340 n += 2;
2341 arr[n++] = 0; /* Reserved */
2342 arr[n++] = 0; /* Status code */
2343 arr[n++] = 0; /* Vendor unique */
2344 arr[n++] = 0x1; /* One port per group */
2345 arr[n++] = 0; /* Reserved */
2346 arr[n++] = 0; /* Reserved */
2347 put_unaligned_be16(port_b, arr + n);
2348 n += 2;
2349
2350 rlen = n - 4;
2351 put_unaligned_be32(rlen, arr + 0);
2352
2353 /*
2354 * Return the smallest value of either
2355 * - The allocated length
2356 * - The constructed command length
2357 * - The maximum array size
2358 */
2359 rlen = min(alen, n);
2360 ret = fill_from_dev_buffer(scp, arr,
2361 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2362 kfree(arr);
2363 return ret;
2364 }
2365
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2366 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2367 struct sdebug_dev_info *devip)
2368 {
2369 bool rctd;
2370 u8 reporting_opts, req_opcode, sdeb_i, supp;
2371 u16 req_sa, u;
2372 u32 alloc_len, a_len;
2373 int k, offset, len, errsts, count, bump, na;
2374 const struct opcode_info_t *oip;
2375 const struct opcode_info_t *r_oip;
2376 u8 *arr;
2377 u8 *cmd = scp->cmnd;
2378
2379 rctd = !!(cmd[2] & 0x80);
2380 reporting_opts = cmd[2] & 0x7;
2381 req_opcode = cmd[3];
2382 req_sa = get_unaligned_be16(cmd + 4);
2383 alloc_len = get_unaligned_be32(cmd + 6);
2384 if (alloc_len < 4 || alloc_len > 0xffff) {
2385 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2386 return check_condition_result;
2387 }
2388 if (alloc_len > 8192)
2389 a_len = 8192;
2390 else
2391 a_len = alloc_len;
2392 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2393 if (NULL == arr) {
2394 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2395 INSUFF_RES_ASCQ);
2396 return check_condition_result;
2397 }
2398 switch (reporting_opts) {
2399 case 0: /* all commands */
2400 /* count number of commands */
2401 for (count = 0, oip = opcode_info_arr;
2402 oip->num_attached != 0xff; ++oip) {
2403 if (F_INV_OP & oip->flags)
2404 continue;
2405 count += (oip->num_attached + 1);
2406 }
2407 bump = rctd ? 20 : 8;
2408 put_unaligned_be32(count * bump, arr);
2409 for (offset = 4, oip = opcode_info_arr;
2410 oip->num_attached != 0xff && offset < a_len; ++oip) {
2411 if (F_INV_OP & oip->flags)
2412 continue;
2413 na = oip->num_attached;
2414 arr[offset] = oip->opcode;
2415 put_unaligned_be16(oip->sa, arr + offset + 2);
2416 if (rctd)
2417 arr[offset + 5] |= 0x2;
2418 if (FF_SA & oip->flags)
2419 arr[offset + 5] |= 0x1;
2420 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2421 if (rctd)
2422 put_unaligned_be16(0xa, arr + offset + 8);
2423 r_oip = oip;
2424 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2425 if (F_INV_OP & oip->flags)
2426 continue;
2427 offset += bump;
2428 arr[offset] = oip->opcode;
2429 put_unaligned_be16(oip->sa, arr + offset + 2);
2430 if (rctd)
2431 arr[offset + 5] |= 0x2;
2432 if (FF_SA & oip->flags)
2433 arr[offset + 5] |= 0x1;
2434 put_unaligned_be16(oip->len_mask[0],
2435 arr + offset + 6);
2436 if (rctd)
2437 put_unaligned_be16(0xa,
2438 arr + offset + 8);
2439 }
2440 oip = r_oip;
2441 offset += bump;
2442 }
2443 break;
2444 case 1: /* one command: opcode only */
2445 case 2: /* one command: opcode plus service action */
2446 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2447 sdeb_i = opcode_ind_arr[req_opcode];
2448 oip = &opcode_info_arr[sdeb_i];
2449 if (F_INV_OP & oip->flags) {
2450 supp = 1;
2451 offset = 4;
2452 } else {
2453 if (1 == reporting_opts) {
2454 if (FF_SA & oip->flags) {
2455 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2456 2, 2);
2457 kfree(arr);
2458 return check_condition_result;
2459 }
2460 req_sa = 0;
2461 } else if (2 == reporting_opts &&
2462 0 == (FF_SA & oip->flags)) {
2463 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2464 kfree(arr); /* point at requested sa */
2465 return check_condition_result;
2466 }
2467 if (0 == (FF_SA & oip->flags) &&
2468 req_opcode == oip->opcode)
2469 supp = 3;
2470 else if (0 == (FF_SA & oip->flags)) {
2471 na = oip->num_attached;
2472 for (k = 0, oip = oip->arrp; k < na;
2473 ++k, ++oip) {
2474 if (req_opcode == oip->opcode)
2475 break;
2476 }
2477 supp = (k >= na) ? 1 : 3;
2478 } else if (req_sa != oip->sa) {
2479 na = oip->num_attached;
2480 for (k = 0, oip = oip->arrp; k < na;
2481 ++k, ++oip) {
2482 if (req_sa == oip->sa)
2483 break;
2484 }
2485 supp = (k >= na) ? 1 : 3;
2486 } else
2487 supp = 3;
2488 if (3 == supp) {
2489 u = oip->len_mask[0];
2490 put_unaligned_be16(u, arr + 2);
2491 arr[4] = oip->opcode;
2492 for (k = 1; k < u; ++k)
2493 arr[4 + k] = (k < 16) ?
2494 oip->len_mask[k] : 0xff;
2495 offset = 4 + u;
2496 } else
2497 offset = 4;
2498 }
2499 arr[1] = (rctd ? 0x80 : 0) | supp;
2500 if (rctd) {
2501 put_unaligned_be16(0xa, arr + offset);
2502 offset += 12;
2503 }
2504 break;
2505 default:
2506 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2507 kfree(arr);
2508 return check_condition_result;
2509 }
2510 offset = (offset < a_len) ? offset : a_len;
2511 len = (offset < alloc_len) ? offset : alloc_len;
2512 errsts = fill_from_dev_buffer(scp, arr, len);
2513 kfree(arr);
2514 return errsts;
2515 }
2516
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2517 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2518 struct sdebug_dev_info *devip)
2519 {
2520 bool repd;
2521 u32 alloc_len, len;
2522 u8 arr[16];
2523 u8 *cmd = scp->cmnd;
2524
2525 memset(arr, 0, sizeof(arr));
2526 repd = !!(cmd[2] & 0x80);
2527 alloc_len = get_unaligned_be32(cmd + 6);
2528 if (alloc_len < 4) {
2529 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2530 return check_condition_result;
2531 }
2532 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2533 arr[1] = 0x1; /* ITNRS */
2534 if (repd) {
2535 arr[3] = 0xc;
2536 len = 16;
2537 } else
2538 len = 4;
2539
2540 len = (len < alloc_len) ? len : alloc_len;
2541 return fill_from_dev_buffer(scp, arr, len);
2542 }
2543
2544 /* <<Following mode page info copied from ST318451LW>> */
2545
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2546 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2547 { /* Read-Write Error Recovery page for mode_sense */
2548 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2549 5, 0, 0xff, 0xff};
2550
2551 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2552 if (1 == pcontrol)
2553 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2554 return sizeof(err_recov_pg);
2555 }
2556
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2557 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2558 { /* Disconnect-Reconnect page for mode_sense */
2559 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2560 0, 0, 0, 0, 0, 0, 0, 0};
2561
2562 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2563 if (1 == pcontrol)
2564 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2565 return sizeof(disconnect_pg);
2566 }
2567
resp_format_pg(unsigned char * p,int pcontrol,int target)2568 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2569 { /* Format device page for mode_sense */
2570 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2571 0, 0, 0, 0, 0, 0, 0, 0,
2572 0, 0, 0, 0, 0x40, 0, 0, 0};
2573
2574 memcpy(p, format_pg, sizeof(format_pg));
2575 put_unaligned_be16(sdebug_sectors_per, p + 10);
2576 put_unaligned_be16(sdebug_sector_size, p + 12);
2577 if (sdebug_removable)
2578 p[20] |= 0x20; /* should agree with INQUIRY */
2579 if (1 == pcontrol)
2580 memset(p + 2, 0, sizeof(format_pg) - 2);
2581 return sizeof(format_pg);
2582 }
2583
2584 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2585 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2586 0, 0, 0, 0};
2587
resp_caching_pg(unsigned char * p,int pcontrol,int target)2588 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2589 { /* Caching page for mode_sense */
2590 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2591 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2592 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2593 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2594
2595 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2596 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2597 memcpy(p, caching_pg, sizeof(caching_pg));
2598 if (1 == pcontrol)
2599 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2600 else if (2 == pcontrol)
2601 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2602 return sizeof(caching_pg);
2603 }
2604
2605 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2606 0, 0, 0x2, 0x4b};
2607
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2608 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2609 { /* Control mode page for mode_sense */
2610 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2611 0, 0, 0, 0};
2612 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2613 0, 0, 0x2, 0x4b};
2614
2615 if (sdebug_dsense)
2616 ctrl_m_pg[2] |= 0x4;
2617 else
2618 ctrl_m_pg[2] &= ~0x4;
2619
2620 if (sdebug_ato)
2621 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2622
2623 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2624 if (1 == pcontrol)
2625 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2626 else if (2 == pcontrol)
2627 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2628 return sizeof(ctrl_m_pg);
2629 }
2630
2631 /* IO Advice Hints Grouping mode page */
resp_grouping_m_pg(unsigned char * p,int pcontrol,int target)2632 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2633 {
2634 /* IO Advice Hints Grouping mode page */
2635 struct grouping_m_pg {
2636 u8 page_code; /* OR 0x40 when subpage_code > 0 */
2637 u8 subpage_code;
2638 __be16 page_length;
2639 u8 reserved[12];
2640 struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2641 };
2642 static const struct grouping_m_pg gr_m_pg = {
2643 .page_code = 0xa | 0x40,
2644 .subpage_code = 5,
2645 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2646 .descr = {
2647 { .st_enble = 1 },
2648 { .st_enble = 1 },
2649 { .st_enble = 1 },
2650 { .st_enble = 1 },
2651 { .st_enble = 1 },
2652 { .st_enble = 0 },
2653 }
2654 };
2655
2656 BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2657 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2658 memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2659 if (1 == pcontrol) {
2660 /* There are no changeable values so clear from byte 4 on. */
2661 memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2662 }
2663 return sizeof(gr_m_pg);
2664 }
2665
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2666 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2667 { /* Informational Exceptions control mode page for mode_sense */
2668 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2669 0, 0, 0x0, 0x0};
2670 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2671 0, 0, 0x0, 0x0};
2672
2673 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2674 if (1 == pcontrol)
2675 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2676 else if (2 == pcontrol)
2677 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2678 return sizeof(iec_m_pg);
2679 }
2680
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2681 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2682 { /* SAS SSP mode page - short format for mode_sense */
2683 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2684 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2685
2686 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2687 if (1 == pcontrol)
2688 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2689 return sizeof(sas_sf_m_pg);
2690 }
2691
2692
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2693 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2694 int target_dev_id)
2695 { /* SAS phy control and discover mode page for mode_sense */
2696 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2697 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2698 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2699 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2700 0x2, 0, 0, 0, 0, 0, 0, 0,
2701 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2702 0, 0, 0, 0, 0, 0, 0, 0,
2703 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2704 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2705 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2706 0x3, 0, 0, 0, 0, 0, 0, 0,
2707 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2708 0, 0, 0, 0, 0, 0, 0, 0,
2709 };
2710 int port_a, port_b;
2711
2712 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2713 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2714 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2715 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2716 port_a = target_dev_id + 1;
2717 port_b = port_a + 1;
2718 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2719 put_unaligned_be32(port_a, p + 20);
2720 put_unaligned_be32(port_b, p + 48 + 20);
2721 if (1 == pcontrol)
2722 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2723 return sizeof(sas_pcd_m_pg);
2724 }
2725
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2726 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2727 { /* SAS SSP shared protocol specific port mode subpage */
2728 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2729 0, 0, 0, 0, 0, 0, 0, 0,
2730 };
2731
2732 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2733 if (1 == pcontrol)
2734 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2735 return sizeof(sas_sha_m_pg);
2736 }
2737
2738 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2739 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2740
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2741 static int resp_mode_sense(struct scsi_cmnd *scp,
2742 struct sdebug_dev_info *devip)
2743 {
2744 int pcontrol, pcode, subpcode, bd_len;
2745 unsigned char dev_spec;
2746 u32 alloc_len, offset, len;
2747 int target_dev_id;
2748 int target = scp->device->id;
2749 unsigned char *ap;
2750 unsigned char *arr __free(kfree);
2751 unsigned char *cmd = scp->cmnd;
2752 bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2753
2754 arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2755 if (!arr)
2756 return -ENOMEM;
2757 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2758 pcontrol = (cmd[2] & 0xc0) >> 6;
2759 pcode = cmd[2] & 0x3f;
2760 subpcode = cmd[3];
2761 msense_6 = (MODE_SENSE == cmd[0]);
2762 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2763 is_disk = (sdebug_ptype == TYPE_DISK);
2764 is_zbc = devip->zoned;
2765 is_tape = (sdebug_ptype == TYPE_TAPE);
2766 if ((is_disk || is_zbc || is_tape) && !dbd)
2767 bd_len = llbaa ? 16 : 8;
2768 else
2769 bd_len = 0;
2770 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2771 if (0x3 == pcontrol) { /* Saving values not supported */
2772 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2773 return check_condition_result;
2774 }
2775 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2776 (devip->target * 1000) - 3;
2777 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2778 if (is_disk || is_zbc) {
2779 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2780 if (sdebug_wp)
2781 dev_spec |= 0x80;
2782 } else
2783 dev_spec = 0x0;
2784 if (msense_6) {
2785 arr[2] = dev_spec;
2786 arr[3] = bd_len;
2787 offset = 4;
2788 } else {
2789 arr[3] = dev_spec;
2790 if (16 == bd_len)
2791 arr[4] = 0x1; /* set LONGLBA bit */
2792 arr[7] = bd_len; /* assume 255 or less */
2793 offset = 8;
2794 }
2795 ap = arr + offset;
2796 if ((bd_len > 0) && (!sdebug_capacity))
2797 sdebug_capacity = get_sdebug_capacity();
2798
2799 if (8 == bd_len) {
2800 if (sdebug_capacity > 0xfffffffe)
2801 put_unaligned_be32(0xffffffff, ap + 0);
2802 else
2803 put_unaligned_be32(sdebug_capacity, ap + 0);
2804 if (is_tape) {
2805 ap[0] = devip->tape_density;
2806 put_unaligned_be16(devip->tape_blksize, ap + 6);
2807 } else
2808 put_unaligned_be16(sdebug_sector_size, ap + 6);
2809 offset += bd_len;
2810 ap = arr + offset;
2811 } else if (16 == bd_len) {
2812 if (is_tape) {
2813 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
2814 return check_condition_result;
2815 }
2816 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2817 put_unaligned_be32(sdebug_sector_size, ap + 12);
2818 offset += bd_len;
2819 ap = arr + offset;
2820 }
2821 if (cmd[2] == 0)
2822 goto only_bd; /* Only block descriptor requested */
2823
2824 /*
2825 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2826 * len += resp_*_pg(ap + len, pcontrol, target);
2827 */
2828 switch (pcode) {
2829 case 0x1: /* Read-Write error recovery page, direct access */
2830 if (subpcode > 0x0 && subpcode < 0xff)
2831 goto bad_subpcode;
2832 len = resp_err_recov_pg(ap, pcontrol, target);
2833 offset += len;
2834 break;
2835 case 0x2: /* Disconnect-Reconnect page, all devices */
2836 if (subpcode > 0x0 && subpcode < 0xff)
2837 goto bad_subpcode;
2838 len = resp_disconnect_pg(ap, pcontrol, target);
2839 offset += len;
2840 break;
2841 case 0x3: /* Format device page, direct access */
2842 if (subpcode > 0x0 && subpcode < 0xff)
2843 goto bad_subpcode;
2844 if (is_disk) {
2845 len = resp_format_pg(ap, pcontrol, target);
2846 offset += len;
2847 } else {
2848 goto bad_pcode;
2849 }
2850 break;
2851 case 0x8: /* Caching page, direct access */
2852 if (subpcode > 0x0 && subpcode < 0xff)
2853 goto bad_subpcode;
2854 if (is_disk || is_zbc) {
2855 len = resp_caching_pg(ap, pcontrol, target);
2856 offset += len;
2857 } else {
2858 goto bad_pcode;
2859 }
2860 break;
2861 case 0xa: /* Control Mode page, all devices */
2862 switch (subpcode) {
2863 case 0:
2864 len = resp_ctrl_m_pg(ap, pcontrol, target);
2865 break;
2866 case 0x05:
2867 len = resp_grouping_m_pg(ap, pcontrol, target);
2868 break;
2869 case 0xff:
2870 len = resp_ctrl_m_pg(ap, pcontrol, target);
2871 len += resp_grouping_m_pg(ap + len, pcontrol, target);
2872 break;
2873 default:
2874 goto bad_subpcode;
2875 }
2876 offset += len;
2877 break;
2878 case 0x19: /* if spc==1 then sas phy, control+discover */
2879 if (subpcode > 0x2 && subpcode < 0xff)
2880 goto bad_subpcode;
2881 len = 0;
2882 if ((0x0 == subpcode) || (0xff == subpcode))
2883 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2884 if ((0x1 == subpcode) || (0xff == subpcode))
2885 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2886 target_dev_id);
2887 if ((0x2 == subpcode) || (0xff == subpcode))
2888 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2889 offset += len;
2890 break;
2891 case 0x1c: /* Informational Exceptions Mode page, all devices */
2892 if (subpcode > 0x0 && subpcode < 0xff)
2893 goto bad_subpcode;
2894 len = resp_iec_m_pg(ap, pcontrol, target);
2895 offset += len;
2896 break;
2897 case 0x3f: /* Read all Mode pages */
2898 if (subpcode > 0x0 && subpcode < 0xff)
2899 goto bad_subpcode;
2900 len = resp_err_recov_pg(ap, pcontrol, target);
2901 len += resp_disconnect_pg(ap + len, pcontrol, target);
2902 if (is_disk) {
2903 len += resp_format_pg(ap + len, pcontrol, target);
2904 len += resp_caching_pg(ap + len, pcontrol, target);
2905 } else if (is_zbc) {
2906 len += resp_caching_pg(ap + len, pcontrol, target);
2907 }
2908 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2909 if (0xff == subpcode)
2910 len += resp_grouping_m_pg(ap + len, pcontrol, target);
2911 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2912 if (0xff == subpcode) {
2913 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2914 target_dev_id);
2915 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2916 }
2917 len += resp_iec_m_pg(ap + len, pcontrol, target);
2918 offset += len;
2919 break;
2920 default:
2921 goto bad_pcode;
2922 }
2923 only_bd:
2924 if (msense_6)
2925 arr[0] = offset - 1;
2926 else
2927 put_unaligned_be16((offset - 2), arr + 0);
2928 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2929
2930 bad_pcode:
2931 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2932 return check_condition_result;
2933
2934 bad_subpcode:
2935 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2936 return check_condition_result;
2937 }
2938
2939 #define SDEBUG_MAX_MSELECT_SZ 512
2940
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2941 static int resp_mode_select(struct scsi_cmnd *scp,
2942 struct sdebug_dev_info *devip)
2943 {
2944 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2945 int param_len, res, mpage;
2946 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2947 unsigned char *cmd = scp->cmnd;
2948 int mselect6 = (MODE_SELECT == cmd[0]);
2949
2950 memset(arr, 0, sizeof(arr));
2951 pf = cmd[1] & 0x10;
2952 sp = cmd[1] & 0x1;
2953 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2954 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2955 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2956 return check_condition_result;
2957 }
2958 res = fetch_to_dev_buffer(scp, arr, param_len);
2959 if (-1 == res)
2960 return DID_ERROR << 16;
2961 else if (sdebug_verbose && (res < param_len))
2962 sdev_printk(KERN_INFO, scp->device,
2963 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2964 __func__, param_len, res);
2965 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2966 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2967 off = (mselect6 ? 4 : 8);
2968 if (sdebug_ptype == TYPE_TAPE) {
2969 int blksize;
2970
2971 if (bd_len != 8) {
2972 mk_sense_invalid_fld(scp, SDEB_IN_DATA,
2973 mselect6 ? 3 : 6, -1);
2974 return check_condition_result;
2975 }
2976 blksize = get_unaligned_be16(arr + off + 6);
2977 if ((blksize % 4) != 0) {
2978 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off + 6, -1);
2979 return check_condition_result;
2980 }
2981 devip->tape_density = arr[off];
2982 devip->tape_blksize = blksize;
2983 }
2984 off += bd_len;
2985 if (off >= res)
2986 return 0; /* No page written, just descriptors */
2987 if (md_len > 2) {
2988 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2989 return check_condition_result;
2990 }
2991 mpage = arr[off] & 0x3f;
2992 ps = !!(arr[off] & 0x80);
2993 if (ps) {
2994 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2995 return check_condition_result;
2996 }
2997 spf = !!(arr[off] & 0x40);
2998 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2999 (arr[off + 1] + 2);
3000 if ((pg_len + off) > param_len) {
3001 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3002 PARAMETER_LIST_LENGTH_ERR, 0);
3003 return check_condition_result;
3004 }
3005 switch (mpage) {
3006 case 0x8: /* Caching Mode page */
3007 if (caching_pg[1] == arr[off + 1]) {
3008 memcpy(caching_pg + 2, arr + off + 2,
3009 sizeof(caching_pg) - 2);
3010 goto set_mode_changed_ua;
3011 }
3012 break;
3013 case 0xa: /* Control Mode page */
3014 if (ctrl_m_pg[1] == arr[off + 1]) {
3015 memcpy(ctrl_m_pg + 2, arr + off + 2,
3016 sizeof(ctrl_m_pg) - 2);
3017 if (ctrl_m_pg[4] & 0x8)
3018 sdebug_wp = true;
3019 else
3020 sdebug_wp = false;
3021 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3022 goto set_mode_changed_ua;
3023 }
3024 break;
3025 case 0x1c: /* Informational Exceptions Mode page */
3026 if (iec_m_pg[1] == arr[off + 1]) {
3027 memcpy(iec_m_pg + 2, arr + off + 2,
3028 sizeof(iec_m_pg) - 2);
3029 goto set_mode_changed_ua;
3030 }
3031 break;
3032 default:
3033 break;
3034 }
3035 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3036 return check_condition_result;
3037 set_mode_changed_ua:
3038 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3039 return 0;
3040 }
3041
resp_temp_l_pg(unsigned char * arr)3042 static int resp_temp_l_pg(unsigned char *arr)
3043 {
3044 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
3045 0x0, 0x1, 0x3, 0x2, 0x0, 65,
3046 };
3047
3048 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3049 return sizeof(temp_l_pg);
3050 }
3051
resp_ie_l_pg(unsigned char * arr)3052 static int resp_ie_l_pg(unsigned char *arr)
3053 {
3054 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3055 };
3056
3057 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3058 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
3059 arr[4] = THRESHOLD_EXCEEDED;
3060 arr[5] = 0xff;
3061 }
3062 return sizeof(ie_l_pg);
3063 }
3064
resp_env_rep_l_spg(unsigned char * arr)3065 static int resp_env_rep_l_spg(unsigned char *arr)
3066 {
3067 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
3068 0x0, 40, 72, 0xff, 45, 18, 0, 0,
3069 0x1, 0x0, 0x23, 0x8,
3070 0x0, 55, 72, 35, 55, 45, 0, 0,
3071 };
3072
3073 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3074 return sizeof(env_rep_l_spg);
3075 }
3076
3077 #define SDEBUG_MAX_LSENSE_SZ 512
3078
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3079 static int resp_log_sense(struct scsi_cmnd *scp,
3080 struct sdebug_dev_info *devip)
3081 {
3082 int ppc, sp, pcode, subpcode;
3083 u32 alloc_len, len, n;
3084 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3085 unsigned char *cmd = scp->cmnd;
3086
3087 memset(arr, 0, sizeof(arr));
3088 ppc = cmd[1] & 0x2;
3089 sp = cmd[1] & 0x1;
3090 if (ppc || sp) {
3091 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3092 return check_condition_result;
3093 }
3094 pcode = cmd[2] & 0x3f;
3095 subpcode = cmd[3] & 0xff;
3096 alloc_len = get_unaligned_be16(cmd + 7);
3097 arr[0] = pcode;
3098 if (0 == subpcode) {
3099 switch (pcode) {
3100 case 0x0: /* Supported log pages log page */
3101 n = 4;
3102 arr[n++] = 0x0; /* this page */
3103 arr[n++] = 0xd; /* Temperature */
3104 arr[n++] = 0x2f; /* Informational exceptions */
3105 arr[3] = n - 4;
3106 break;
3107 case 0xd: /* Temperature log page */
3108 arr[3] = resp_temp_l_pg(arr + 4);
3109 break;
3110 case 0x2f: /* Informational exceptions log page */
3111 arr[3] = resp_ie_l_pg(arr + 4);
3112 break;
3113 default:
3114 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3115 return check_condition_result;
3116 }
3117 } else if (0xff == subpcode) {
3118 arr[0] |= 0x40;
3119 arr[1] = subpcode;
3120 switch (pcode) {
3121 case 0x0: /* Supported log pages and subpages log page */
3122 n = 4;
3123 arr[n++] = 0x0;
3124 arr[n++] = 0x0; /* 0,0 page */
3125 arr[n++] = 0x0;
3126 arr[n++] = 0xff; /* this page */
3127 arr[n++] = 0xd;
3128 arr[n++] = 0x0; /* Temperature */
3129 arr[n++] = 0xd;
3130 arr[n++] = 0x1; /* Environment reporting */
3131 arr[n++] = 0xd;
3132 arr[n++] = 0xff; /* all 0xd subpages */
3133 arr[n++] = 0x2f;
3134 arr[n++] = 0x0; /* Informational exceptions */
3135 arr[n++] = 0x2f;
3136 arr[n++] = 0xff; /* all 0x2f subpages */
3137 arr[3] = n - 4;
3138 break;
3139 case 0xd: /* Temperature subpages */
3140 n = 4;
3141 arr[n++] = 0xd;
3142 arr[n++] = 0x0; /* Temperature */
3143 arr[n++] = 0xd;
3144 arr[n++] = 0x1; /* Environment reporting */
3145 arr[n++] = 0xd;
3146 arr[n++] = 0xff; /* these subpages */
3147 arr[3] = n - 4;
3148 break;
3149 case 0x2f: /* Informational exceptions subpages */
3150 n = 4;
3151 arr[n++] = 0x2f;
3152 arr[n++] = 0x0; /* Informational exceptions */
3153 arr[n++] = 0x2f;
3154 arr[n++] = 0xff; /* these subpages */
3155 arr[3] = n - 4;
3156 break;
3157 default:
3158 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3159 return check_condition_result;
3160 }
3161 } else if (subpcode > 0) {
3162 arr[0] |= 0x40;
3163 arr[1] = subpcode;
3164 if (pcode == 0xd && subpcode == 1)
3165 arr[3] = resp_env_rep_l_spg(arr + 4);
3166 else {
3167 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3168 return check_condition_result;
3169 }
3170 } else {
3171 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3172 return check_condition_result;
3173 }
3174 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3175 return fill_from_dev_buffer(scp, arr,
3176 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3177 }
3178
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)3179 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3180 {
3181 return devip->nr_zones != 0;
3182 }
3183
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)3184 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3185 unsigned long long lba)
3186 {
3187 u32 zno = div_u64(lba, devip->zsize);
3188 struct sdeb_zone_state *zsp;
3189
3190 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3191 return &devip->zstate[zno];
3192
3193 /*
3194 * If the zone capacity is less than the zone size, adjust for gap
3195 * zones.
3196 */
3197 zno = 2 * zno - devip->nr_conv_zones;
3198 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3199 zsp = &devip->zstate[zno];
3200 if (lba >= zsp->z_start + zsp->z_size)
3201 zsp++;
3202 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3203 return zsp;
3204 }
3205
zbc_zone_is_conv(struct sdeb_zone_state * zsp)3206 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3207 {
3208 return zsp->z_type == ZBC_ZTYPE_CNV;
3209 }
3210
zbc_zone_is_gap(struct sdeb_zone_state * zsp)3211 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3212 {
3213 return zsp->z_type == ZBC_ZTYPE_GAP;
3214 }
3215
zbc_zone_is_seq(struct sdeb_zone_state * zsp)3216 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3217 {
3218 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3219 }
3220
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3221 static void zbc_close_zone(struct sdebug_dev_info *devip,
3222 struct sdeb_zone_state *zsp)
3223 {
3224 enum sdebug_z_cond zc;
3225
3226 if (!zbc_zone_is_seq(zsp))
3227 return;
3228
3229 zc = zsp->z_cond;
3230 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3231 return;
3232
3233 if (zc == ZC2_IMPLICIT_OPEN)
3234 devip->nr_imp_open--;
3235 else
3236 devip->nr_exp_open--;
3237
3238 if (zsp->z_wp == zsp->z_start) {
3239 zsp->z_cond = ZC1_EMPTY;
3240 } else {
3241 zsp->z_cond = ZC4_CLOSED;
3242 devip->nr_closed++;
3243 }
3244 }
3245
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)3246 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3247 {
3248 struct sdeb_zone_state *zsp = &devip->zstate[0];
3249 unsigned int i;
3250
3251 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3252 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3253 zbc_close_zone(devip, zsp);
3254 return;
3255 }
3256 }
3257 }
3258
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)3259 static void zbc_open_zone(struct sdebug_dev_info *devip,
3260 struct sdeb_zone_state *zsp, bool explicit)
3261 {
3262 enum sdebug_z_cond zc;
3263
3264 if (!zbc_zone_is_seq(zsp))
3265 return;
3266
3267 zc = zsp->z_cond;
3268 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3269 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3270 return;
3271
3272 /* Close an implicit open zone if necessary */
3273 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3274 zbc_close_zone(devip, zsp);
3275 else if (devip->max_open &&
3276 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3277 zbc_close_imp_open_zone(devip);
3278
3279 if (zsp->z_cond == ZC4_CLOSED)
3280 devip->nr_closed--;
3281 if (explicit) {
3282 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3283 devip->nr_exp_open++;
3284 } else {
3285 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3286 devip->nr_imp_open++;
3287 }
3288 }
3289
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)3290 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3291 struct sdeb_zone_state *zsp)
3292 {
3293 switch (zsp->z_cond) {
3294 case ZC2_IMPLICIT_OPEN:
3295 devip->nr_imp_open--;
3296 break;
3297 case ZC3_EXPLICIT_OPEN:
3298 devip->nr_exp_open--;
3299 break;
3300 default:
3301 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3302 zsp->z_start, zsp->z_cond);
3303 break;
3304 }
3305 zsp->z_cond = ZC5_FULL;
3306 }
3307
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)3308 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3309 unsigned long long lba, unsigned int num)
3310 {
3311 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3312 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3313
3314 if (!zbc_zone_is_seq(zsp))
3315 return;
3316
3317 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3318 zsp->z_wp += num;
3319 if (zsp->z_wp >= zend)
3320 zbc_set_zone_full(devip, zsp);
3321 return;
3322 }
3323
3324 while (num) {
3325 if (lba != zsp->z_wp)
3326 zsp->z_non_seq_resource = true;
3327
3328 end = lba + num;
3329 if (end >= zend) {
3330 n = zend - lba;
3331 zsp->z_wp = zend;
3332 } else if (end > zsp->z_wp) {
3333 n = num;
3334 zsp->z_wp = end;
3335 } else {
3336 n = num;
3337 }
3338 if (zsp->z_wp >= zend)
3339 zbc_set_zone_full(devip, zsp);
3340
3341 num -= n;
3342 lba += n;
3343 if (num) {
3344 zsp++;
3345 zend = zsp->z_start + zsp->z_size;
3346 }
3347 }
3348 }
3349
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3350 static int check_zbc_access_params(struct scsi_cmnd *scp,
3351 unsigned long long lba, unsigned int num, bool write)
3352 {
3353 struct scsi_device *sdp = scp->device;
3354 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3355 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3356 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3357
3358 if (!write) {
3359 /* For host-managed, reads cannot cross zone types boundaries */
3360 if (zsp->z_type != zsp_end->z_type) {
3361 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3362 LBA_OUT_OF_RANGE,
3363 READ_INVDATA_ASCQ);
3364 return check_condition_result;
3365 }
3366 return 0;
3367 }
3368
3369 /* Writing into a gap zone is not allowed */
3370 if (zbc_zone_is_gap(zsp)) {
3371 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3372 ATTEMPT_ACCESS_GAP);
3373 return check_condition_result;
3374 }
3375
3376 /* No restrictions for writes within conventional zones */
3377 if (zbc_zone_is_conv(zsp)) {
3378 if (!zbc_zone_is_conv(zsp_end)) {
3379 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3380 LBA_OUT_OF_RANGE,
3381 WRITE_BOUNDARY_ASCQ);
3382 return check_condition_result;
3383 }
3384 return 0;
3385 }
3386
3387 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3388 /* Writes cannot cross sequential zone boundaries */
3389 if (zsp_end != zsp) {
3390 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3391 LBA_OUT_OF_RANGE,
3392 WRITE_BOUNDARY_ASCQ);
3393 return check_condition_result;
3394 }
3395 /* Cannot write full zones */
3396 if (zsp->z_cond == ZC5_FULL) {
3397 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3398 INVALID_FIELD_IN_CDB, 0);
3399 return check_condition_result;
3400 }
3401 /* Writes must be aligned to the zone WP */
3402 if (lba != zsp->z_wp) {
3403 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3404 LBA_OUT_OF_RANGE,
3405 UNALIGNED_WRITE_ASCQ);
3406 return check_condition_result;
3407 }
3408 }
3409
3410 /* Handle implicit open of closed and empty zones */
3411 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3412 if (devip->max_open &&
3413 devip->nr_exp_open >= devip->max_open) {
3414 mk_sense_buffer(scp, DATA_PROTECT,
3415 INSUFF_RES_ASC,
3416 INSUFF_ZONE_ASCQ);
3417 return check_condition_result;
3418 }
3419 zbc_open_zone(devip, zsp, false);
3420 }
3421
3422 return 0;
3423 }
3424
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)3425 static inline int check_device_access_params
3426 (struct scsi_cmnd *scp, unsigned long long lba,
3427 unsigned int num, bool write)
3428 {
3429 struct scsi_device *sdp = scp->device;
3430 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3431
3432 if (lba + num > sdebug_capacity) {
3433 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3434 return check_condition_result;
3435 }
3436 /* transfer length excessive (tie in to block limits VPD page) */
3437 if (num > sdebug_store_sectors) {
3438 /* needs work to find which cdb byte 'num' comes from */
3439 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3440 return check_condition_result;
3441 }
3442 if (write && unlikely(sdebug_wp)) {
3443 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3444 return check_condition_result;
3445 }
3446 if (sdebug_dev_is_zoned(devip))
3447 return check_zbc_access_params(scp, lba, num, write);
3448
3449 return 0;
3450 }
3451
3452 /*
3453 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3454 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3455 * that access any of the "stores" in struct sdeb_store_info should call this
3456 * function with bug_if_fake_rw set to true.
3457 */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)3458 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3459 bool bug_if_fake_rw)
3460 {
3461 if (sdebug_fake_rw) {
3462 BUG_ON(bug_if_fake_rw); /* See note above */
3463 return NULL;
3464 }
3465 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3466 }
3467
3468 static inline void
sdeb_read_lock(rwlock_t * lock)3469 sdeb_read_lock(rwlock_t *lock)
3470 {
3471 if (sdebug_no_rwlock)
3472 __acquire(lock);
3473 else
3474 read_lock(lock);
3475 }
3476
3477 static inline void
sdeb_read_unlock(rwlock_t * lock)3478 sdeb_read_unlock(rwlock_t *lock)
3479 {
3480 if (sdebug_no_rwlock)
3481 __release(lock);
3482 else
3483 read_unlock(lock);
3484 }
3485
3486 static inline void
sdeb_write_lock(rwlock_t * lock)3487 sdeb_write_lock(rwlock_t *lock)
3488 {
3489 if (sdebug_no_rwlock)
3490 __acquire(lock);
3491 else
3492 write_lock(lock);
3493 }
3494
3495 static inline void
sdeb_write_unlock(rwlock_t * lock)3496 sdeb_write_unlock(rwlock_t *lock)
3497 {
3498 if (sdebug_no_rwlock)
3499 __release(lock);
3500 else
3501 write_unlock(lock);
3502 }
3503
3504 static inline void
sdeb_data_read_lock(struct sdeb_store_info * sip)3505 sdeb_data_read_lock(struct sdeb_store_info *sip)
3506 {
3507 BUG_ON(!sip);
3508
3509 sdeb_read_lock(&sip->macc_data_lck);
3510 }
3511
3512 static inline void
sdeb_data_read_unlock(struct sdeb_store_info * sip)3513 sdeb_data_read_unlock(struct sdeb_store_info *sip)
3514 {
3515 BUG_ON(!sip);
3516
3517 sdeb_read_unlock(&sip->macc_data_lck);
3518 }
3519
3520 static inline void
sdeb_data_write_lock(struct sdeb_store_info * sip)3521 sdeb_data_write_lock(struct sdeb_store_info *sip)
3522 {
3523 BUG_ON(!sip);
3524
3525 sdeb_write_lock(&sip->macc_data_lck);
3526 }
3527
3528 static inline void
sdeb_data_write_unlock(struct sdeb_store_info * sip)3529 sdeb_data_write_unlock(struct sdeb_store_info *sip)
3530 {
3531 BUG_ON(!sip);
3532
3533 sdeb_write_unlock(&sip->macc_data_lck);
3534 }
3535
3536 static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info * sip)3537 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
3538 {
3539 BUG_ON(!sip);
3540
3541 sdeb_read_lock(&sip->macc_sector_lck);
3542 }
3543
3544 static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info * sip)3545 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
3546 {
3547 BUG_ON(!sip);
3548
3549 sdeb_read_unlock(&sip->macc_sector_lck);
3550 }
3551
3552 static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info * sip)3553 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
3554 {
3555 BUG_ON(!sip);
3556
3557 sdeb_write_lock(&sip->macc_sector_lck);
3558 }
3559
3560 static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info * sip)3561 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
3562 {
3563 BUG_ON(!sip);
3564
3565 sdeb_write_unlock(&sip->macc_sector_lck);
3566 }
3567
3568 /*
3569 * Atomic locking:
3570 * We simplify the atomic model to allow only 1x atomic write and many non-
3571 * atomic reads or writes for all LBAs.
3572
3573 * A RW lock has a similar bahaviour:
3574 * Only 1x writer and many readers.
3575
3576 * So use a RW lock for per-device read and write locking:
3577 * An atomic access grabs the lock as a writer and non-atomic grabs the lock
3578 * as a reader.
3579 */
3580
3581 static inline void
sdeb_data_lock(struct sdeb_store_info * sip,bool atomic)3582 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
3583 {
3584 if (atomic)
3585 sdeb_data_write_lock(sip);
3586 else
3587 sdeb_data_read_lock(sip);
3588 }
3589
3590 static inline void
sdeb_data_unlock(struct sdeb_store_info * sip,bool atomic)3591 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
3592 {
3593 if (atomic)
3594 sdeb_data_write_unlock(sip);
3595 else
3596 sdeb_data_read_unlock(sip);
3597 }
3598
3599 /* Allow many reads but only 1x write per sector */
3600 static inline void
sdeb_data_sector_lock(struct sdeb_store_info * sip,bool do_write)3601 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
3602 {
3603 if (do_write)
3604 sdeb_data_sector_write_lock(sip);
3605 else
3606 sdeb_data_sector_read_lock(sip);
3607 }
3608
3609 static inline void
sdeb_data_sector_unlock(struct sdeb_store_info * sip,bool do_write)3610 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
3611 {
3612 if (do_write)
3613 sdeb_data_sector_write_unlock(sip);
3614 else
3615 sdeb_data_sector_read_unlock(sip);
3616 }
3617
3618 static inline void
sdeb_meta_read_lock(struct sdeb_store_info * sip)3619 sdeb_meta_read_lock(struct sdeb_store_info *sip)
3620 {
3621 if (sdebug_no_rwlock) {
3622 if (sip)
3623 __acquire(&sip->macc_meta_lck);
3624 else
3625 __acquire(&sdeb_fake_rw_lck);
3626 } else {
3627 if (sip)
3628 read_lock(&sip->macc_meta_lck);
3629 else
3630 read_lock(&sdeb_fake_rw_lck);
3631 }
3632 }
3633
3634 static inline void
sdeb_meta_read_unlock(struct sdeb_store_info * sip)3635 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
3636 {
3637 if (sdebug_no_rwlock) {
3638 if (sip)
3639 __release(&sip->macc_meta_lck);
3640 else
3641 __release(&sdeb_fake_rw_lck);
3642 } else {
3643 if (sip)
3644 read_unlock(&sip->macc_meta_lck);
3645 else
3646 read_unlock(&sdeb_fake_rw_lck);
3647 }
3648 }
3649
3650 static inline void
sdeb_meta_write_lock(struct sdeb_store_info * sip)3651 sdeb_meta_write_lock(struct sdeb_store_info *sip)
3652 {
3653 if (sdebug_no_rwlock) {
3654 if (sip)
3655 __acquire(&sip->macc_meta_lck);
3656 else
3657 __acquire(&sdeb_fake_rw_lck);
3658 } else {
3659 if (sip)
3660 write_lock(&sip->macc_meta_lck);
3661 else
3662 write_lock(&sdeb_fake_rw_lck);
3663 }
3664 }
3665
3666 static inline void
sdeb_meta_write_unlock(struct sdeb_store_info * sip)3667 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
3668 {
3669 if (sdebug_no_rwlock) {
3670 if (sip)
3671 __release(&sip->macc_meta_lck);
3672 else
3673 __release(&sdeb_fake_rw_lck);
3674 } else {
3675 if (sip)
3676 write_unlock(&sip->macc_meta_lck);
3677 else
3678 write_unlock(&sdeb_fake_rw_lck);
3679 }
3680 }
3681
3682 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,u8 group_number,bool do_write,bool atomic)3683 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3684 u32 sg_skip, u64 lba, u32 num, u8 group_number,
3685 bool do_write, bool atomic)
3686 {
3687 int ret;
3688 u64 block;
3689 enum dma_data_direction dir;
3690 struct scsi_data_buffer *sdb = &scp->sdb;
3691 u8 *fsp;
3692 int i, total = 0;
3693
3694 /*
3695 * Even though reads are inherently atomic (in this driver), we expect
3696 * the atomic flag only for writes.
3697 */
3698 if (!do_write && atomic)
3699 return -1;
3700
3701 if (do_write) {
3702 dir = DMA_TO_DEVICE;
3703 write_since_sync = true;
3704 } else {
3705 dir = DMA_FROM_DEVICE;
3706 }
3707
3708 if (!sdb->length || !sip)
3709 return 0;
3710 if (scp->sc_data_direction != dir)
3711 return -1;
3712
3713 if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
3714 atomic_long_inc(&writes_by_group_number[group_number]);
3715
3716 fsp = sip->storep;
3717
3718 block = do_div(lba, sdebug_store_sectors);
3719
3720 /* Only allow 1x atomic write or multiple non-atomic writes at any given time */
3721 sdeb_data_lock(sip, atomic);
3722 for (i = 0; i < num; i++) {
3723 /* We shouldn't need to lock for atomic writes, but do it anyway */
3724 sdeb_data_sector_lock(sip, do_write);
3725 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3726 fsp + (block * sdebug_sector_size),
3727 sdebug_sector_size, sg_skip, do_write);
3728 sdeb_data_sector_unlock(sip, do_write);
3729 total += ret;
3730 if (ret != sdebug_sector_size)
3731 break;
3732 sg_skip += sdebug_sector_size;
3733 if (++block >= sdebug_store_sectors)
3734 block = 0;
3735 }
3736 sdeb_data_unlock(sip, atomic);
3737
3738 return total;
3739 }
3740
3741 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3742 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3743 {
3744 struct scsi_data_buffer *sdb = &scp->sdb;
3745
3746 if (!sdb->length)
3747 return 0;
3748 if (scp->sc_data_direction != DMA_TO_DEVICE)
3749 return -1;
3750 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3751 num * sdebug_sector_size, 0, true);
3752 }
3753
3754 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3755 * arr into sip->storep+lba and return true. If comparison fails then
3756 * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3757 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3758 const u8 *arr, bool compare_only)
3759 {
3760 bool res;
3761 u64 block, rest = 0;
3762 u32 store_blks = sdebug_store_sectors;
3763 u32 lb_size = sdebug_sector_size;
3764 u8 *fsp = sip->storep;
3765
3766 block = do_div(lba, store_blks);
3767 if (block + num > store_blks)
3768 rest = block + num - store_blks;
3769
3770 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3771 if (!res)
3772 return res;
3773 if (rest)
3774 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3775 rest * lb_size);
3776 if (!res)
3777 return res;
3778 if (compare_only)
3779 return true;
3780 arr += num * lb_size;
3781 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3782 if (rest)
3783 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3784 return res;
3785 }
3786
dif_compute_csum(const void * buf,int len)3787 static __be16 dif_compute_csum(const void *buf, int len)
3788 {
3789 __be16 csum;
3790
3791 if (sdebug_guard)
3792 csum = (__force __be16)ip_compute_csum(buf, len);
3793 else
3794 csum = cpu_to_be16(crc_t10dif(buf, len));
3795
3796 return csum;
3797 }
3798
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3799 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3800 sector_t sector, u32 ei_lba)
3801 {
3802 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3803
3804 if (sdt->guard_tag != csum) {
3805 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3806 (unsigned long)sector,
3807 be16_to_cpu(sdt->guard_tag),
3808 be16_to_cpu(csum));
3809 return 0x01;
3810 }
3811 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3812 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3813 pr_err("REF check failed on sector %lu\n",
3814 (unsigned long)sector);
3815 return 0x03;
3816 }
3817 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3818 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3819 pr_err("REF check failed on sector %lu\n",
3820 (unsigned long)sector);
3821 return 0x03;
3822 }
3823 return 0;
3824 }
3825
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3826 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3827 unsigned int sectors, bool read)
3828 {
3829 size_t resid;
3830 void *paddr;
3831 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3832 scp->device->hostdata, true);
3833 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3834 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3835 struct sg_mapping_iter miter;
3836
3837 /* Bytes of protection data to copy into sgl */
3838 resid = sectors * sizeof(*dif_storep);
3839
3840 sg_miter_start(&miter, scsi_prot_sglist(scp),
3841 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3842 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3843
3844 while (sg_miter_next(&miter) && resid > 0) {
3845 size_t len = min_t(size_t, miter.length, resid);
3846 void *start = dif_store(sip, sector);
3847 size_t rest = 0;
3848
3849 if (dif_store_end < start + len)
3850 rest = start + len - dif_store_end;
3851
3852 paddr = miter.addr;
3853
3854 if (read)
3855 memcpy(paddr, start, len - rest);
3856 else
3857 memcpy(start, paddr, len - rest);
3858
3859 if (rest) {
3860 if (read)
3861 memcpy(paddr + len - rest, dif_storep, rest);
3862 else
3863 memcpy(dif_storep, paddr + len - rest, rest);
3864 }
3865
3866 sector += len / sizeof(*dif_storep);
3867 resid -= len;
3868 }
3869 sg_miter_stop(&miter);
3870 }
3871
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3872 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3873 unsigned int sectors, u32 ei_lba)
3874 {
3875 int ret = 0;
3876 unsigned int i;
3877 sector_t sector;
3878 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3879 scp->device->hostdata, true);
3880 struct t10_pi_tuple *sdt;
3881
3882 for (i = 0; i < sectors; i++, ei_lba++) {
3883 sector = start_sec + i;
3884 sdt = dif_store(sip, sector);
3885
3886 if (sdt->app_tag == cpu_to_be16(0xffff))
3887 continue;
3888
3889 /*
3890 * Because scsi_debug acts as both initiator and
3891 * target we proceed to verify the PI even if
3892 * RDPROTECT=3. This is done so the "initiator" knows
3893 * which type of error to return. Otherwise we would
3894 * have to iterate over the PI twice.
3895 */
3896 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3897 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3898 sector, ei_lba);
3899 if (ret) {
3900 dif_errors++;
3901 break;
3902 }
3903 }
3904 }
3905
3906 dif_copy_prot(scp, start_sec, sectors, true);
3907 dix_reads++;
3908
3909 return ret;
3910 }
3911
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3912 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3913 {
3914 bool check_prot;
3915 u32 num;
3916 u32 ei_lba;
3917 int ret;
3918 u64 lba;
3919 struct sdeb_store_info *sip = devip2sip(devip, true);
3920 u8 *cmd = scp->cmnd;
3921 bool meta_data_locked = false;
3922
3923 switch (cmd[0]) {
3924 case READ_16:
3925 ei_lba = 0;
3926 lba = get_unaligned_be64(cmd + 2);
3927 num = get_unaligned_be32(cmd + 10);
3928 check_prot = true;
3929 break;
3930 case READ_10:
3931 ei_lba = 0;
3932 lba = get_unaligned_be32(cmd + 2);
3933 num = get_unaligned_be16(cmd + 7);
3934 check_prot = true;
3935 break;
3936 case READ_6:
3937 ei_lba = 0;
3938 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3939 (u32)(cmd[1] & 0x1f) << 16;
3940 num = (0 == cmd[4]) ? 256 : cmd[4];
3941 check_prot = true;
3942 break;
3943 case READ_12:
3944 ei_lba = 0;
3945 lba = get_unaligned_be32(cmd + 2);
3946 num = get_unaligned_be32(cmd + 6);
3947 check_prot = true;
3948 break;
3949 case XDWRITEREAD_10:
3950 ei_lba = 0;
3951 lba = get_unaligned_be32(cmd + 2);
3952 num = get_unaligned_be16(cmd + 7);
3953 check_prot = false;
3954 break;
3955 default: /* assume READ(32) */
3956 lba = get_unaligned_be64(cmd + 12);
3957 ei_lba = get_unaligned_be32(cmd + 20);
3958 num = get_unaligned_be32(cmd + 28);
3959 check_prot = false;
3960 break;
3961 }
3962 if (unlikely(have_dif_prot && check_prot)) {
3963 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3964 (cmd[1] & 0xe0)) {
3965 mk_sense_invalid_opcode(scp);
3966 return check_condition_result;
3967 }
3968 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3969 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3970 (cmd[1] & 0xe0) == 0)
3971 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3972 "to DIF device\n");
3973 }
3974 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3975 atomic_read(&sdeb_inject_pending))) {
3976 num /= 2;
3977 atomic_set(&sdeb_inject_pending, 0);
3978 }
3979
3980 /*
3981 * When checking device access params, for reads we only check data
3982 * versus what is set at init time, so no need to lock.
3983 */
3984 ret = check_device_access_params(scp, lba, num, false);
3985 if (ret)
3986 return ret;
3987 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3988 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3989 ((lba + num) > sdebug_medium_error_start))) {
3990 /* claim unrecoverable read error */
3991 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3992 /* set info field and valid bit for fixed descriptor */
3993 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3994 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3995 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3996 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3997 put_unaligned_be32(ret, scp->sense_buffer + 3);
3998 }
3999 scsi_set_resid(scp, scsi_bufflen(scp));
4000 return check_condition_result;
4001 }
4002
4003 if (sdebug_dev_is_zoned(devip) ||
4004 (sdebug_dix && scsi_prot_sg_count(scp))) {
4005 sdeb_meta_read_lock(sip);
4006 meta_data_locked = true;
4007 }
4008
4009 /* DIX + T10 DIF */
4010 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4011 switch (prot_verify_read(scp, lba, num, ei_lba)) {
4012 case 1: /* Guard tag error */
4013 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4014 sdeb_meta_read_unlock(sip);
4015 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4016 return check_condition_result;
4017 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4018 sdeb_meta_read_unlock(sip);
4019 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4020 return illegal_condition_result;
4021 }
4022 break;
4023 case 3: /* Reference tag error */
4024 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4025 sdeb_meta_read_unlock(sip);
4026 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4027 return check_condition_result;
4028 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4029 sdeb_meta_read_unlock(sip);
4030 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4031 return illegal_condition_result;
4032 }
4033 break;
4034 }
4035 }
4036
4037 ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4038 if (meta_data_locked)
4039 sdeb_meta_read_unlock(sip);
4040 if (unlikely(ret == -1))
4041 return DID_ERROR << 16;
4042
4043 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4044
4045 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4046 atomic_read(&sdeb_inject_pending))) {
4047 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4048 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4049 atomic_set(&sdeb_inject_pending, 0);
4050 return check_condition_result;
4051 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4052 /* Logical block guard check failed */
4053 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4054 atomic_set(&sdeb_inject_pending, 0);
4055 return illegal_condition_result;
4056 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4057 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4058 atomic_set(&sdeb_inject_pending, 0);
4059 return illegal_condition_result;
4060 }
4061 }
4062 return 0;
4063 }
4064
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)4065 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4066 unsigned int sectors, u32 ei_lba)
4067 {
4068 int ret;
4069 struct t10_pi_tuple *sdt;
4070 void *daddr;
4071 sector_t sector = start_sec;
4072 int ppage_offset;
4073 int dpage_offset;
4074 struct sg_mapping_iter diter;
4075 struct sg_mapping_iter piter;
4076
4077 BUG_ON(scsi_sg_count(SCpnt) == 0);
4078 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4079
4080 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4081 scsi_prot_sg_count(SCpnt),
4082 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4083 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4084 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4085
4086 /* For each protection page */
4087 while (sg_miter_next(&piter)) {
4088 dpage_offset = 0;
4089 if (WARN_ON(!sg_miter_next(&diter))) {
4090 ret = 0x01;
4091 goto out;
4092 }
4093
4094 for (ppage_offset = 0; ppage_offset < piter.length;
4095 ppage_offset += sizeof(struct t10_pi_tuple)) {
4096 /* If we're at the end of the current
4097 * data page advance to the next one
4098 */
4099 if (dpage_offset >= diter.length) {
4100 if (WARN_ON(!sg_miter_next(&diter))) {
4101 ret = 0x01;
4102 goto out;
4103 }
4104 dpage_offset = 0;
4105 }
4106
4107 sdt = piter.addr + ppage_offset;
4108 daddr = diter.addr + dpage_offset;
4109
4110 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4111 ret = dif_verify(sdt, daddr, sector, ei_lba);
4112 if (ret)
4113 goto out;
4114 }
4115
4116 sector++;
4117 ei_lba++;
4118 dpage_offset += sdebug_sector_size;
4119 }
4120 diter.consumed = dpage_offset;
4121 sg_miter_stop(&diter);
4122 }
4123 sg_miter_stop(&piter);
4124
4125 dif_copy_prot(SCpnt, start_sec, sectors, false);
4126 dix_writes++;
4127
4128 return 0;
4129
4130 out:
4131 dif_errors++;
4132 sg_miter_stop(&diter);
4133 sg_miter_stop(&piter);
4134 return ret;
4135 }
4136
lba_to_map_index(sector_t lba)4137 static unsigned long lba_to_map_index(sector_t lba)
4138 {
4139 if (sdebug_unmap_alignment)
4140 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4141 sector_div(lba, sdebug_unmap_granularity);
4142 return lba;
4143 }
4144
map_index_to_lba(unsigned long index)4145 static sector_t map_index_to_lba(unsigned long index)
4146 {
4147 sector_t lba = index * sdebug_unmap_granularity;
4148
4149 if (sdebug_unmap_alignment)
4150 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4151 return lba;
4152 }
4153
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)4154 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4155 unsigned int *num)
4156 {
4157 sector_t end;
4158 unsigned int mapped;
4159 unsigned long index;
4160 unsigned long next;
4161
4162 index = lba_to_map_index(lba);
4163 mapped = test_bit(index, sip->map_storep);
4164
4165 if (mapped)
4166 next = find_next_zero_bit(sip->map_storep, map_size, index);
4167 else
4168 next = find_next_bit(sip->map_storep, map_size, index);
4169
4170 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
4171 *num = end - lba;
4172 return mapped;
4173 }
4174
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4175 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4176 unsigned int len)
4177 {
4178 sector_t end = lba + len;
4179
4180 while (lba < end) {
4181 unsigned long index = lba_to_map_index(lba);
4182
4183 if (index < map_size)
4184 set_bit(index, sip->map_storep);
4185
4186 lba = map_index_to_lba(index + 1);
4187 }
4188 }
4189
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)4190 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4191 unsigned int len)
4192 {
4193 sector_t end = lba + len;
4194 u8 *fsp = sip->storep;
4195
4196 while (lba < end) {
4197 unsigned long index = lba_to_map_index(lba);
4198
4199 if (lba == map_index_to_lba(index) &&
4200 lba + sdebug_unmap_granularity <= end &&
4201 index < map_size) {
4202 clear_bit(index, sip->map_storep);
4203 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
4204 memset(fsp + lba * sdebug_sector_size,
4205 (sdebug_lbprz & 1) ? 0 : 0xff,
4206 sdebug_sector_size *
4207 sdebug_unmap_granularity);
4208 }
4209 if (sip->dif_storep) {
4210 memset(sip->dif_storep + lba, 0xff,
4211 sizeof(*sip->dif_storep) *
4212 sdebug_unmap_granularity);
4213 }
4214 }
4215 lba = map_index_to_lba(index + 1);
4216 }
4217 }
4218
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4219 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4220 {
4221 bool check_prot;
4222 u32 num;
4223 u8 group = 0;
4224 u32 ei_lba;
4225 int ret;
4226 u64 lba;
4227 struct sdeb_store_info *sip = devip2sip(devip, true);
4228 u8 *cmd = scp->cmnd;
4229 bool meta_data_locked = false;
4230
4231 switch (cmd[0]) {
4232 case WRITE_16:
4233 ei_lba = 0;
4234 lba = get_unaligned_be64(cmd + 2);
4235 num = get_unaligned_be32(cmd + 10);
4236 group = cmd[14] & 0x3f;
4237 check_prot = true;
4238 break;
4239 case WRITE_10:
4240 ei_lba = 0;
4241 lba = get_unaligned_be32(cmd + 2);
4242 group = cmd[6] & 0x3f;
4243 num = get_unaligned_be16(cmd + 7);
4244 check_prot = true;
4245 break;
4246 case WRITE_6:
4247 ei_lba = 0;
4248 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4249 (u32)(cmd[1] & 0x1f) << 16;
4250 num = (0 == cmd[4]) ? 256 : cmd[4];
4251 check_prot = true;
4252 break;
4253 case WRITE_12:
4254 ei_lba = 0;
4255 lba = get_unaligned_be32(cmd + 2);
4256 num = get_unaligned_be32(cmd + 6);
4257 group = cmd[6] & 0x3f;
4258 check_prot = true;
4259 break;
4260 case 0x53: /* XDWRITEREAD(10) */
4261 ei_lba = 0;
4262 lba = get_unaligned_be32(cmd + 2);
4263 group = cmd[6] & 0x1f;
4264 num = get_unaligned_be16(cmd + 7);
4265 check_prot = false;
4266 break;
4267 default: /* assume WRITE(32) */
4268 group = cmd[6] & 0x3f;
4269 lba = get_unaligned_be64(cmd + 12);
4270 ei_lba = get_unaligned_be32(cmd + 20);
4271 num = get_unaligned_be32(cmd + 28);
4272 check_prot = false;
4273 break;
4274 }
4275 if (unlikely(have_dif_prot && check_prot)) {
4276 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4277 (cmd[1] & 0xe0)) {
4278 mk_sense_invalid_opcode(scp);
4279 return check_condition_result;
4280 }
4281 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4282 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4283 (cmd[1] & 0xe0) == 0)
4284 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4285 "to DIF device\n");
4286 }
4287
4288 if (sdebug_dev_is_zoned(devip) ||
4289 (sdebug_dix && scsi_prot_sg_count(scp)) ||
4290 scsi_debug_lbp()) {
4291 sdeb_meta_write_lock(sip);
4292 meta_data_locked = true;
4293 }
4294
4295 ret = check_device_access_params(scp, lba, num, true);
4296 if (ret) {
4297 if (meta_data_locked)
4298 sdeb_meta_write_unlock(sip);
4299 return ret;
4300 }
4301
4302 /* DIX + T10 DIF */
4303 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4304 switch (prot_verify_write(scp, lba, num, ei_lba)) {
4305 case 1: /* Guard tag error */
4306 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4307 sdeb_meta_write_unlock(sip);
4308 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4309 return illegal_condition_result;
4310 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4311 sdeb_meta_write_unlock(sip);
4312 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4313 return check_condition_result;
4314 }
4315 break;
4316 case 3: /* Reference tag error */
4317 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4318 sdeb_meta_write_unlock(sip);
4319 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4320 return illegal_condition_result;
4321 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4322 sdeb_meta_write_unlock(sip);
4323 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4324 return check_condition_result;
4325 }
4326 break;
4327 }
4328 }
4329
4330 ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
4331 if (unlikely(scsi_debug_lbp()))
4332 map_region(sip, lba, num);
4333
4334 /* If ZBC zone then bump its write pointer */
4335 if (sdebug_dev_is_zoned(devip))
4336 zbc_inc_wp(devip, lba, num);
4337 if (meta_data_locked)
4338 sdeb_meta_write_unlock(sip);
4339
4340 if (unlikely(-1 == ret))
4341 return DID_ERROR << 16;
4342 else if (unlikely(sdebug_verbose &&
4343 (ret < (num * sdebug_sector_size))))
4344 sdev_printk(KERN_INFO, scp->device,
4345 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4346 my_name, num * sdebug_sector_size, ret);
4347
4348 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4349 atomic_read(&sdeb_inject_pending))) {
4350 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4351 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4352 atomic_set(&sdeb_inject_pending, 0);
4353 return check_condition_result;
4354 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4355 /* Logical block guard check failed */
4356 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4357 atomic_set(&sdeb_inject_pending, 0);
4358 return illegal_condition_result;
4359 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4360 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4361 atomic_set(&sdeb_inject_pending, 0);
4362 return illegal_condition_result;
4363 }
4364 }
4365 return 0;
4366 }
4367
4368 /*
4369 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4370 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4371 */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4372 static int resp_write_scat(struct scsi_cmnd *scp,
4373 struct sdebug_dev_info *devip)
4374 {
4375 u8 *cmd = scp->cmnd;
4376 u8 *lrdp = NULL;
4377 u8 *up;
4378 struct sdeb_store_info *sip = devip2sip(devip, true);
4379 u8 wrprotect;
4380 u16 lbdof, num_lrd, k;
4381 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4382 u32 lb_size = sdebug_sector_size;
4383 u32 ei_lba;
4384 u64 lba;
4385 u8 group;
4386 int ret, res;
4387 bool is_16;
4388 static const u32 lrd_size = 32; /* + parameter list header size */
4389
4390 if (cmd[0] == VARIABLE_LENGTH_CMD) {
4391 is_16 = false;
4392 group = cmd[6] & 0x3f;
4393 wrprotect = (cmd[10] >> 5) & 0x7;
4394 lbdof = get_unaligned_be16(cmd + 12);
4395 num_lrd = get_unaligned_be16(cmd + 16);
4396 bt_len = get_unaligned_be32(cmd + 28);
4397 } else { /* that leaves WRITE SCATTERED(16) */
4398 is_16 = true;
4399 wrprotect = (cmd[2] >> 5) & 0x7;
4400 lbdof = get_unaligned_be16(cmd + 4);
4401 num_lrd = get_unaligned_be16(cmd + 8);
4402 bt_len = get_unaligned_be32(cmd + 10);
4403 group = cmd[14] & 0x3f;
4404 if (unlikely(have_dif_prot)) {
4405 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4406 wrprotect) {
4407 mk_sense_invalid_opcode(scp);
4408 return illegal_condition_result;
4409 }
4410 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4411 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4412 wrprotect == 0)
4413 sdev_printk(KERN_ERR, scp->device,
4414 "Unprotected WR to DIF device\n");
4415 }
4416 }
4417 if ((num_lrd == 0) || (bt_len == 0))
4418 return 0; /* T10 says these do-nothings are not errors */
4419 if (lbdof == 0) {
4420 if (sdebug_verbose)
4421 sdev_printk(KERN_INFO, scp->device,
4422 "%s: %s: LB Data Offset field bad\n",
4423 my_name, __func__);
4424 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4425 return illegal_condition_result;
4426 }
4427 lbdof_blen = lbdof * lb_size;
4428 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4429 if (sdebug_verbose)
4430 sdev_printk(KERN_INFO, scp->device,
4431 "%s: %s: LBA range descriptors don't fit\n",
4432 my_name, __func__);
4433 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4434 return illegal_condition_result;
4435 }
4436 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4437 if (lrdp == NULL)
4438 return SCSI_MLQUEUE_HOST_BUSY;
4439 if (sdebug_verbose)
4440 sdev_printk(KERN_INFO, scp->device,
4441 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4442 my_name, __func__, lbdof_blen);
4443 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4444 if (res == -1) {
4445 ret = DID_ERROR << 16;
4446 goto err_out;
4447 }
4448
4449 /* Just keep it simple and always lock for now */
4450 sdeb_meta_write_lock(sip);
4451 sg_off = lbdof_blen;
4452 /* Spec says Buffer xfer Length field in number of LBs in dout */
4453 cum_lb = 0;
4454 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4455 lba = get_unaligned_be64(up + 0);
4456 num = get_unaligned_be32(up + 8);
4457 if (sdebug_verbose)
4458 sdev_printk(KERN_INFO, scp->device,
4459 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
4460 my_name, __func__, k, lba, num, sg_off);
4461 if (num == 0)
4462 continue;
4463 ret = check_device_access_params(scp, lba, num, true);
4464 if (ret)
4465 goto err_out_unlock;
4466 num_by = num * lb_size;
4467 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4468
4469 if ((cum_lb + num) > bt_len) {
4470 if (sdebug_verbose)
4471 sdev_printk(KERN_INFO, scp->device,
4472 "%s: %s: sum of blocks > data provided\n",
4473 my_name, __func__);
4474 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4475 0);
4476 ret = illegal_condition_result;
4477 goto err_out_unlock;
4478 }
4479
4480 /* DIX + T10 DIF */
4481 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4482 int prot_ret = prot_verify_write(scp, lba, num,
4483 ei_lba);
4484
4485 if (prot_ret) {
4486 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4487 prot_ret);
4488 ret = illegal_condition_result;
4489 goto err_out_unlock;
4490 }
4491 }
4492
4493 /*
4494 * Write ranges atomically to keep as close to pre-atomic
4495 * writes behaviour as possible.
4496 */
4497 ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
4498 /* If ZBC zone then bump its write pointer */
4499 if (sdebug_dev_is_zoned(devip))
4500 zbc_inc_wp(devip, lba, num);
4501 if (unlikely(scsi_debug_lbp()))
4502 map_region(sip, lba, num);
4503 if (unlikely(-1 == ret)) {
4504 ret = DID_ERROR << 16;
4505 goto err_out_unlock;
4506 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4507 sdev_printk(KERN_INFO, scp->device,
4508 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4509 my_name, num_by, ret);
4510
4511 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4512 atomic_read(&sdeb_inject_pending))) {
4513 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4514 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4515 atomic_set(&sdeb_inject_pending, 0);
4516 ret = check_condition_result;
4517 goto err_out_unlock;
4518 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4519 /* Logical block guard check failed */
4520 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4521 atomic_set(&sdeb_inject_pending, 0);
4522 ret = illegal_condition_result;
4523 goto err_out_unlock;
4524 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4525 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4526 atomic_set(&sdeb_inject_pending, 0);
4527 ret = illegal_condition_result;
4528 goto err_out_unlock;
4529 }
4530 }
4531 sg_off += num_by;
4532 cum_lb += num;
4533 }
4534 ret = 0;
4535 err_out_unlock:
4536 sdeb_meta_write_unlock(sip);
4537 err_out:
4538 kfree(lrdp);
4539 return ret;
4540 }
4541
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)4542 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4543 u32 ei_lba, bool unmap, bool ndob)
4544 {
4545 struct scsi_device *sdp = scp->device;
4546 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4547 unsigned long long i;
4548 u64 block, lbaa;
4549 u32 lb_size = sdebug_sector_size;
4550 int ret;
4551 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4552 scp->device->hostdata, true);
4553 u8 *fs1p;
4554 u8 *fsp;
4555 bool meta_data_locked = false;
4556
4557 if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
4558 sdeb_meta_write_lock(sip);
4559 meta_data_locked = true;
4560 }
4561
4562 ret = check_device_access_params(scp, lba, num, true);
4563 if (ret)
4564 goto out;
4565
4566 if (unmap && scsi_debug_lbp()) {
4567 unmap_region(sip, lba, num);
4568 goto out;
4569 }
4570 lbaa = lba;
4571 block = do_div(lbaa, sdebug_store_sectors);
4572 /* if ndob then zero 1 logical block, else fetch 1 logical block */
4573 fsp = sip->storep;
4574 fs1p = fsp + (block * lb_size);
4575 sdeb_data_write_lock(sip);
4576 if (ndob) {
4577 memset(fs1p, 0, lb_size);
4578 ret = 0;
4579 } else
4580 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4581
4582 if (-1 == ret) {
4583 ret = DID_ERROR << 16;
4584 goto out;
4585 } else if (sdebug_verbose && !ndob && (ret < lb_size))
4586 sdev_printk(KERN_INFO, scp->device,
4587 "%s: %s: lb size=%u, IO sent=%d bytes\n",
4588 my_name, "write same", lb_size, ret);
4589
4590 /* Copy first sector to remaining blocks */
4591 for (i = 1 ; i < num ; i++) {
4592 lbaa = lba + i;
4593 block = do_div(lbaa, sdebug_store_sectors);
4594 memmove(fsp + (block * lb_size), fs1p, lb_size);
4595 }
4596 if (scsi_debug_lbp())
4597 map_region(sip, lba, num);
4598 /* If ZBC zone then bump its write pointer */
4599 if (sdebug_dev_is_zoned(devip))
4600 zbc_inc_wp(devip, lba, num);
4601 sdeb_data_write_unlock(sip);
4602 ret = 0;
4603 out:
4604 if (meta_data_locked)
4605 sdeb_meta_write_unlock(sip);
4606 return ret;
4607 }
4608
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4609 static int resp_write_same_10(struct scsi_cmnd *scp,
4610 struct sdebug_dev_info *devip)
4611 {
4612 u8 *cmd = scp->cmnd;
4613 u32 lba;
4614 u16 num;
4615 u32 ei_lba = 0;
4616 bool unmap = false;
4617
4618 if (cmd[1] & 0x8) {
4619 if (sdebug_lbpws10 == 0) {
4620 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4621 return check_condition_result;
4622 } else
4623 unmap = true;
4624 }
4625 lba = get_unaligned_be32(cmd + 2);
4626 num = get_unaligned_be16(cmd + 7);
4627 if (num > sdebug_write_same_length) {
4628 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4629 return check_condition_result;
4630 }
4631 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4632 }
4633
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4634 static int resp_write_same_16(struct scsi_cmnd *scp,
4635 struct sdebug_dev_info *devip)
4636 {
4637 u8 *cmd = scp->cmnd;
4638 u64 lba;
4639 u32 num;
4640 u32 ei_lba = 0;
4641 bool unmap = false;
4642 bool ndob = false;
4643
4644 if (cmd[1] & 0x8) { /* UNMAP */
4645 if (sdebug_lbpws == 0) {
4646 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4647 return check_condition_result;
4648 } else
4649 unmap = true;
4650 }
4651 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
4652 ndob = true;
4653 lba = get_unaligned_be64(cmd + 2);
4654 num = get_unaligned_be32(cmd + 10);
4655 if (num > sdebug_write_same_length) {
4656 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4657 return check_condition_result;
4658 }
4659 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4660 }
4661
4662 /* Note the mode field is in the same position as the (lower) service action
4663 * field. For the Report supported operation codes command, SPC-4 suggests
4664 * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4665 static int resp_write_buffer(struct scsi_cmnd *scp,
4666 struct sdebug_dev_info *devip)
4667 {
4668 u8 *cmd = scp->cmnd;
4669 struct scsi_device *sdp = scp->device;
4670 struct sdebug_dev_info *dp;
4671 u8 mode;
4672
4673 mode = cmd[1] & 0x1f;
4674 switch (mode) {
4675 case 0x4: /* download microcode (MC) and activate (ACT) */
4676 /* set UAs on this device only */
4677 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4678 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4679 break;
4680 case 0x5: /* download MC, save and ACT */
4681 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4682 break;
4683 case 0x6: /* download MC with offsets and ACT */
4684 /* set UAs on most devices (LUs) in this target */
4685 list_for_each_entry(dp,
4686 &devip->sdbg_host->dev_info_list,
4687 dev_list)
4688 if (dp->target == sdp->id) {
4689 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4690 if (devip != dp)
4691 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4692 dp->uas_bm);
4693 }
4694 break;
4695 case 0x7: /* download MC with offsets, save, and ACT */
4696 /* set UA on all devices (LUs) in this target */
4697 list_for_each_entry(dp,
4698 &devip->sdbg_host->dev_info_list,
4699 dev_list)
4700 if (dp->target == sdp->id)
4701 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4702 dp->uas_bm);
4703 break;
4704 default:
4705 /* do nothing for this command for other mode values */
4706 break;
4707 }
4708 return 0;
4709 }
4710
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4711 static int resp_comp_write(struct scsi_cmnd *scp,
4712 struct sdebug_dev_info *devip)
4713 {
4714 u8 *cmd = scp->cmnd;
4715 u8 *arr;
4716 struct sdeb_store_info *sip = devip2sip(devip, true);
4717 u64 lba;
4718 u32 dnum;
4719 u32 lb_size = sdebug_sector_size;
4720 u8 num;
4721 int ret;
4722 int retval = 0;
4723
4724 lba = get_unaligned_be64(cmd + 2);
4725 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4726 if (0 == num)
4727 return 0; /* degenerate case, not an error */
4728 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4729 (cmd[1] & 0xe0)) {
4730 mk_sense_invalid_opcode(scp);
4731 return check_condition_result;
4732 }
4733 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4734 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4735 (cmd[1] & 0xe0) == 0)
4736 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4737 "to DIF device\n");
4738 ret = check_device_access_params(scp, lba, num, false);
4739 if (ret)
4740 return ret;
4741 dnum = 2 * num;
4742 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4743 if (NULL == arr) {
4744 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4745 INSUFF_RES_ASCQ);
4746 return check_condition_result;
4747 }
4748
4749 ret = do_dout_fetch(scp, dnum, arr);
4750 if (ret == -1) {
4751 retval = DID_ERROR << 16;
4752 goto cleanup_free;
4753 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4754 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4755 "indicated=%u, IO sent=%d bytes\n", my_name,
4756 dnum * lb_size, ret);
4757
4758 sdeb_data_write_lock(sip);
4759 sdeb_meta_write_lock(sip);
4760 if (!comp_write_worker(sip, lba, num, arr, false)) {
4761 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4762 retval = check_condition_result;
4763 goto cleanup_unlock;
4764 }
4765
4766 /* Cover sip->map_storep (which map_region()) sets with data lock */
4767 if (scsi_debug_lbp())
4768 map_region(sip, lba, num);
4769 cleanup_unlock:
4770 sdeb_meta_write_unlock(sip);
4771 sdeb_data_write_unlock(sip);
4772 cleanup_free:
4773 kfree(arr);
4774 return retval;
4775 }
4776
4777 struct unmap_block_desc {
4778 __be64 lba;
4779 __be32 blocks;
4780 __be32 __reserved;
4781 };
4782
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4783 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4784 {
4785 unsigned char *buf;
4786 struct unmap_block_desc *desc;
4787 struct sdeb_store_info *sip = devip2sip(devip, true);
4788 unsigned int i, payload_len, descriptors;
4789 int ret;
4790
4791 if (!scsi_debug_lbp())
4792 return 0; /* fib and say its done */
4793 payload_len = get_unaligned_be16(scp->cmnd + 7);
4794 BUG_ON(scsi_bufflen(scp) != payload_len);
4795
4796 descriptors = (payload_len - 8) / 16;
4797 if (descriptors > sdebug_unmap_max_desc) {
4798 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4799 return check_condition_result;
4800 }
4801
4802 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4803 if (!buf) {
4804 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4805 INSUFF_RES_ASCQ);
4806 return check_condition_result;
4807 }
4808
4809 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4810
4811 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4812 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4813
4814 desc = (void *)&buf[8];
4815
4816 sdeb_meta_write_lock(sip);
4817
4818 for (i = 0 ; i < descriptors ; i++) {
4819 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4820 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4821
4822 ret = check_device_access_params(scp, lba, num, true);
4823 if (ret)
4824 goto out;
4825
4826 unmap_region(sip, lba, num);
4827 }
4828
4829 ret = 0;
4830
4831 out:
4832 sdeb_meta_write_unlock(sip);
4833 kfree(buf);
4834
4835 return ret;
4836 }
4837
4838 #define SDEBUG_GET_LBA_STATUS_LEN 32
4839
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4840 static int resp_get_lba_status(struct scsi_cmnd *scp,
4841 struct sdebug_dev_info *devip)
4842 {
4843 u8 *cmd = scp->cmnd;
4844 u64 lba;
4845 u32 alloc_len, mapped, num;
4846 int ret;
4847 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4848
4849 lba = get_unaligned_be64(cmd + 2);
4850 alloc_len = get_unaligned_be32(cmd + 10);
4851
4852 if (alloc_len < 24)
4853 return 0;
4854
4855 ret = check_device_access_params(scp, lba, 1, false);
4856 if (ret)
4857 return ret;
4858
4859 if (scsi_debug_lbp()) {
4860 struct sdeb_store_info *sip = devip2sip(devip, true);
4861
4862 mapped = map_state(sip, lba, &num);
4863 } else {
4864 mapped = 1;
4865 /* following just in case virtual_gb changed */
4866 sdebug_capacity = get_sdebug_capacity();
4867 if (sdebug_capacity - lba <= 0xffffffff)
4868 num = sdebug_capacity - lba;
4869 else
4870 num = 0xffffffff;
4871 }
4872
4873 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4874 put_unaligned_be32(20, arr); /* Parameter Data Length */
4875 put_unaligned_be64(lba, arr + 8); /* LBA */
4876 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4877 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4878
4879 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4880 }
4881
resp_get_stream_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4882 static int resp_get_stream_status(struct scsi_cmnd *scp,
4883 struct sdebug_dev_info *devip)
4884 {
4885 u16 starting_stream_id, stream_id;
4886 const u8 *cmd = scp->cmnd;
4887 u32 alloc_len, offset;
4888 u8 arr[256] = {};
4889 struct scsi_stream_status_header *h = (void *)arr;
4890
4891 starting_stream_id = get_unaligned_be16(cmd + 4);
4892 alloc_len = get_unaligned_be32(cmd + 10);
4893
4894 if (alloc_len < 8) {
4895 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4896 return check_condition_result;
4897 }
4898
4899 if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4900 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4901 return check_condition_result;
4902 }
4903
4904 /*
4905 * The GET STREAM STATUS command only reports status information
4906 * about open streams. Treat the non-permanent stream as open.
4907 */
4908 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4909 &h->number_of_open_streams);
4910
4911 for (offset = 8, stream_id = starting_stream_id;
4912 offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4913 stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4914 offset += 8, stream_id++) {
4915 struct scsi_stream_status *stream_status = (void *)arr + offset;
4916
4917 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4918 put_unaligned_be16(stream_id,
4919 &stream_status->stream_identifier);
4920 stream_status->rel_lifetime = stream_id + 1;
4921 }
4922 put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4923
4924 return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4925 }
4926
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4927 static int resp_sync_cache(struct scsi_cmnd *scp,
4928 struct sdebug_dev_info *devip)
4929 {
4930 int res = 0;
4931 u64 lba;
4932 u32 num_blocks;
4933 u8 *cmd = scp->cmnd;
4934
4935 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4936 lba = get_unaligned_be32(cmd + 2);
4937 num_blocks = get_unaligned_be16(cmd + 7);
4938 } else { /* SYNCHRONIZE_CACHE(16) */
4939 lba = get_unaligned_be64(cmd + 2);
4940 num_blocks = get_unaligned_be32(cmd + 10);
4941 }
4942 if (lba + num_blocks > sdebug_capacity) {
4943 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4944 return check_condition_result;
4945 }
4946 if (!write_since_sync || (cmd[1] & 0x2))
4947 res = SDEG_RES_IMMED_MASK;
4948 else /* delay if write_since_sync and IMMED clear */
4949 write_since_sync = false;
4950 return res;
4951 }
4952
4953 /*
4954 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4955 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4956 * a GOOD status otherwise. Model a disk with a big cache and yield
4957 * CONDITION MET. Actually tries to bring range in main memory into the
4958 * cache associated with the CPU(s).
4959 */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4960 static int resp_pre_fetch(struct scsi_cmnd *scp,
4961 struct sdebug_dev_info *devip)
4962 {
4963 int res = 0;
4964 u64 lba;
4965 u64 block, rest = 0;
4966 u32 nblks;
4967 u8 *cmd = scp->cmnd;
4968 struct sdeb_store_info *sip = devip2sip(devip, true);
4969 u8 *fsp = sip->storep;
4970
4971 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4972 lba = get_unaligned_be32(cmd + 2);
4973 nblks = get_unaligned_be16(cmd + 7);
4974 } else { /* PRE-FETCH(16) */
4975 lba = get_unaligned_be64(cmd + 2);
4976 nblks = get_unaligned_be32(cmd + 10);
4977 }
4978 if (lba + nblks > sdebug_capacity) {
4979 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4980 return check_condition_result;
4981 }
4982 if (!fsp)
4983 goto fini;
4984 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4985 block = do_div(lba, sdebug_store_sectors);
4986 if (block + nblks > sdebug_store_sectors)
4987 rest = block + nblks - sdebug_store_sectors;
4988
4989 /* Try to bring the PRE-FETCH range into CPU's cache */
4990 sdeb_data_read_lock(sip);
4991 prefetch_range(fsp + (sdebug_sector_size * block),
4992 (nblks - rest) * sdebug_sector_size);
4993 if (rest)
4994 prefetch_range(fsp, rest * sdebug_sector_size);
4995
4996 sdeb_data_read_unlock(sip);
4997 fini:
4998 if (cmd[1] & 0x2)
4999 res = SDEG_RES_IMMED_MASK;
5000 return res | condition_met_result;
5001 }
5002
5003 #define RL_BUCKET_ELEMS 8
5004
5005 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5006 * (W-LUN), the normal Linux scanning logic does not associate it with a
5007 * device (e.g. /dev/sg7). The following magic will make that association:
5008 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5009 * where <n> is a host number. If there are multiple targets in a host then
5010 * the above will associate a W-LUN to each target. To only get a W-LUN
5011 * for target 2, then use "echo '- 2 49409' > scan" .
5012 */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5013 static int resp_report_luns(struct scsi_cmnd *scp,
5014 struct sdebug_dev_info *devip)
5015 {
5016 unsigned char *cmd = scp->cmnd;
5017 unsigned int alloc_len;
5018 unsigned char select_report;
5019 u64 lun;
5020 struct scsi_lun *lun_p;
5021 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5022 unsigned int lun_cnt; /* normal LUN count (max: 256) */
5023 unsigned int wlun_cnt; /* report luns W-LUN count */
5024 unsigned int tlun_cnt; /* total LUN count */
5025 unsigned int rlen; /* response length (in bytes) */
5026 int k, j, n, res;
5027 unsigned int off_rsp = 0;
5028 const int sz_lun = sizeof(struct scsi_lun);
5029
5030 clear_luns_changed_on_target(devip);
5031
5032 select_report = cmd[2];
5033 alloc_len = get_unaligned_be32(cmd + 6);
5034
5035 if (alloc_len < 4) {
5036 pr_err("alloc len too small %d\n", alloc_len);
5037 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5038 return check_condition_result;
5039 }
5040
5041 switch (select_report) {
5042 case 0: /* all LUNs apart from W-LUNs */
5043 lun_cnt = sdebug_max_luns;
5044 wlun_cnt = 0;
5045 break;
5046 case 1: /* only W-LUNs */
5047 lun_cnt = 0;
5048 wlun_cnt = 1;
5049 break;
5050 case 2: /* all LUNs */
5051 lun_cnt = sdebug_max_luns;
5052 wlun_cnt = 1;
5053 break;
5054 case 0x10: /* only administrative LUs */
5055 case 0x11: /* see SPC-5 */
5056 case 0x12: /* only subsiduary LUs owned by referenced LU */
5057 default:
5058 pr_debug("select report invalid %d\n", select_report);
5059 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5060 return check_condition_result;
5061 }
5062
5063 if (sdebug_no_lun_0 && (lun_cnt > 0))
5064 --lun_cnt;
5065
5066 tlun_cnt = lun_cnt + wlun_cnt;
5067 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
5068 scsi_set_resid(scp, scsi_bufflen(scp));
5069 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5070 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5071
5072 /* loops rely on sizeof response header same as sizeof lun (both 8) */
5073 lun = sdebug_no_lun_0 ? 1 : 0;
5074 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5075 memset(arr, 0, sizeof(arr));
5076 lun_p = (struct scsi_lun *)&arr[0];
5077 if (k == 0) {
5078 put_unaligned_be32(rlen, &arr[0]);
5079 ++lun_p;
5080 j = 1;
5081 }
5082 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5083 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5084 break;
5085 int_to_scsilun(lun++, lun_p);
5086 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5087 lun_p->scsi_lun[0] |= 0x40;
5088 }
5089 if (j < RL_BUCKET_ELEMS)
5090 break;
5091 n = j * sz_lun;
5092 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5093 if (res)
5094 return res;
5095 off_rsp += n;
5096 }
5097 if (wlun_cnt) {
5098 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5099 ++j;
5100 }
5101 if (j > 0)
5102 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5103 return res;
5104 }
5105
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5106 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5107 {
5108 bool is_bytchk3 = false;
5109 u8 bytchk;
5110 int ret, j;
5111 u32 vnum, a_num, off;
5112 const u32 lb_size = sdebug_sector_size;
5113 u64 lba;
5114 u8 *arr;
5115 u8 *cmd = scp->cmnd;
5116 struct sdeb_store_info *sip = devip2sip(devip, true);
5117
5118 bytchk = (cmd[1] >> 1) & 0x3;
5119 if (bytchk == 0) {
5120 return 0; /* always claim internal verify okay */
5121 } else if (bytchk == 2) {
5122 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5123 return check_condition_result;
5124 } else if (bytchk == 3) {
5125 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
5126 }
5127 switch (cmd[0]) {
5128 case VERIFY_16:
5129 lba = get_unaligned_be64(cmd + 2);
5130 vnum = get_unaligned_be32(cmd + 10);
5131 break;
5132 case VERIFY: /* is VERIFY(10) */
5133 lba = get_unaligned_be32(cmd + 2);
5134 vnum = get_unaligned_be16(cmd + 7);
5135 break;
5136 default:
5137 mk_sense_invalid_opcode(scp);
5138 return check_condition_result;
5139 }
5140 if (vnum == 0)
5141 return 0; /* not an error */
5142 a_num = is_bytchk3 ? 1 : vnum;
5143 /* Treat following check like one for read (i.e. no write) access */
5144 ret = check_device_access_params(scp, lba, a_num, false);
5145 if (ret)
5146 return ret;
5147
5148 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5149 if (!arr) {
5150 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5151 INSUFF_RES_ASCQ);
5152 return check_condition_result;
5153 }
5154 /* Not changing store, so only need read access */
5155 sdeb_data_read_lock(sip);
5156
5157 ret = do_dout_fetch(scp, a_num, arr);
5158 if (ret == -1) {
5159 ret = DID_ERROR << 16;
5160 goto cleanup;
5161 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5162 sdev_printk(KERN_INFO, scp->device,
5163 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5164 my_name, __func__, a_num * lb_size, ret);
5165 }
5166 if (is_bytchk3) {
5167 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5168 memcpy(arr + off, arr, lb_size);
5169 }
5170 ret = 0;
5171 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5172 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5173 ret = check_condition_result;
5174 goto cleanup;
5175 }
5176 cleanup:
5177 sdeb_data_read_unlock(sip);
5178 kfree(arr);
5179 return ret;
5180 }
5181
5182 #define RZONES_DESC_HD 64
5183
5184 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5185 static int resp_report_zones(struct scsi_cmnd *scp,
5186 struct sdebug_dev_info *devip)
5187 {
5188 unsigned int rep_max_zones, nrz = 0;
5189 int ret = 0;
5190 u32 alloc_len, rep_opts, rep_len;
5191 bool partial;
5192 u64 lba, zs_lba;
5193 u8 *arr = NULL, *desc;
5194 u8 *cmd = scp->cmnd;
5195 struct sdeb_zone_state *zsp = NULL;
5196 struct sdeb_store_info *sip = devip2sip(devip, false);
5197
5198 if (!sdebug_dev_is_zoned(devip)) {
5199 mk_sense_invalid_opcode(scp);
5200 return check_condition_result;
5201 }
5202 zs_lba = get_unaligned_be64(cmd + 2);
5203 alloc_len = get_unaligned_be32(cmd + 10);
5204 if (alloc_len == 0)
5205 return 0; /* not an error */
5206 rep_opts = cmd[14] & 0x3f;
5207 partial = cmd[14] & 0x80;
5208
5209 if (zs_lba >= sdebug_capacity) {
5210 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5211 return check_condition_result;
5212 }
5213
5214 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5215
5216 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5217 if (!arr) {
5218 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5219 INSUFF_RES_ASCQ);
5220 return check_condition_result;
5221 }
5222
5223 sdeb_meta_read_lock(sip);
5224
5225 desc = arr + 64;
5226 for (lba = zs_lba; lba < sdebug_capacity;
5227 lba = zsp->z_start + zsp->z_size) {
5228 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5229 break;
5230 zsp = zbc_zone(devip, lba);
5231 switch (rep_opts) {
5232 case 0x00:
5233 /* All zones */
5234 break;
5235 case 0x01:
5236 /* Empty zones */
5237 if (zsp->z_cond != ZC1_EMPTY)
5238 continue;
5239 break;
5240 case 0x02:
5241 /* Implicit open zones */
5242 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5243 continue;
5244 break;
5245 case 0x03:
5246 /* Explicit open zones */
5247 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5248 continue;
5249 break;
5250 case 0x04:
5251 /* Closed zones */
5252 if (zsp->z_cond != ZC4_CLOSED)
5253 continue;
5254 break;
5255 case 0x05:
5256 /* Full zones */
5257 if (zsp->z_cond != ZC5_FULL)
5258 continue;
5259 break;
5260 case 0x06:
5261 case 0x07:
5262 case 0x10:
5263 /*
5264 * Read-only, offline, reset WP recommended are
5265 * not emulated: no zones to report;
5266 */
5267 continue;
5268 case 0x11:
5269 /* non-seq-resource set */
5270 if (!zsp->z_non_seq_resource)
5271 continue;
5272 break;
5273 case 0x3e:
5274 /* All zones except gap zones. */
5275 if (zbc_zone_is_gap(zsp))
5276 continue;
5277 break;
5278 case 0x3f:
5279 /* Not write pointer (conventional) zones */
5280 if (zbc_zone_is_seq(zsp))
5281 continue;
5282 break;
5283 default:
5284 mk_sense_buffer(scp, ILLEGAL_REQUEST,
5285 INVALID_FIELD_IN_CDB, 0);
5286 ret = check_condition_result;
5287 goto fini;
5288 }
5289
5290 if (nrz < rep_max_zones) {
5291 /* Fill zone descriptor */
5292 desc[0] = zsp->z_type;
5293 desc[1] = zsp->z_cond << 4;
5294 if (zsp->z_non_seq_resource)
5295 desc[1] |= 1 << 1;
5296 put_unaligned_be64((u64)zsp->z_size, desc + 8);
5297 put_unaligned_be64((u64)zsp->z_start, desc + 16);
5298 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5299 desc += 64;
5300 }
5301
5302 if (partial && nrz >= rep_max_zones)
5303 break;
5304
5305 nrz++;
5306 }
5307
5308 /* Report header */
5309 /* Zone list length. */
5310 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5311 /* Maximum LBA */
5312 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5313 /* Zone starting LBA granularity. */
5314 if (devip->zcap < devip->zsize)
5315 put_unaligned_be64(devip->zsize, arr + 16);
5316
5317 rep_len = (unsigned long)desc - (unsigned long)arr;
5318 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5319
5320 fini:
5321 sdeb_meta_read_unlock(sip);
5322 kfree(arr);
5323 return ret;
5324 }
5325
resp_atomic_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5326 static int resp_atomic_write(struct scsi_cmnd *scp,
5327 struct sdebug_dev_info *devip)
5328 {
5329 struct sdeb_store_info *sip;
5330 u8 *cmd = scp->cmnd;
5331 u16 boundary, len;
5332 u64 lba, lba_tmp;
5333 int ret;
5334
5335 if (!scsi_debug_atomic_write()) {
5336 mk_sense_invalid_opcode(scp);
5337 return check_condition_result;
5338 }
5339
5340 sip = devip2sip(devip, true);
5341
5342 lba = get_unaligned_be64(cmd + 2);
5343 boundary = get_unaligned_be16(cmd + 10);
5344 len = get_unaligned_be16(cmd + 12);
5345
5346 lba_tmp = lba;
5347 if (sdebug_atomic_wr_align &&
5348 do_div(lba_tmp, sdebug_atomic_wr_align)) {
5349 /* Does not meet alignment requirement */
5350 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5351 return check_condition_result;
5352 }
5353
5354 if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
5355 /* Does not meet alignment requirement */
5356 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5357 return check_condition_result;
5358 }
5359
5360 if (boundary > 0) {
5361 if (boundary > sdebug_atomic_wr_max_bndry) {
5362 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5363 return check_condition_result;
5364 }
5365
5366 if (len > sdebug_atomic_wr_max_length_bndry) {
5367 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5368 return check_condition_result;
5369 }
5370 } else {
5371 if (len > sdebug_atomic_wr_max_length) {
5372 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
5373 return check_condition_result;
5374 }
5375 }
5376
5377 ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
5378 if (unlikely(ret == -1))
5379 return DID_ERROR << 16;
5380 if (unlikely(ret != len * sdebug_sector_size))
5381 return DID_ERROR << 16;
5382 return 0;
5383 }
5384
5385 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)5386 static void zbc_open_all(struct sdebug_dev_info *devip)
5387 {
5388 struct sdeb_zone_state *zsp = &devip->zstate[0];
5389 unsigned int i;
5390
5391 for (i = 0; i < devip->nr_zones; i++, zsp++) {
5392 if (zsp->z_cond == ZC4_CLOSED)
5393 zbc_open_zone(devip, &devip->zstate[i], true);
5394 }
5395 }
5396
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5397 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5398 {
5399 int res = 0;
5400 u64 z_id;
5401 enum sdebug_z_cond zc;
5402 u8 *cmd = scp->cmnd;
5403 struct sdeb_zone_state *zsp;
5404 bool all = cmd[14] & 0x01;
5405 struct sdeb_store_info *sip = devip2sip(devip, false);
5406
5407 if (!sdebug_dev_is_zoned(devip)) {
5408 mk_sense_invalid_opcode(scp);
5409 return check_condition_result;
5410 }
5411 sdeb_meta_write_lock(sip);
5412
5413 if (all) {
5414 /* Check if all closed zones can be open */
5415 if (devip->max_open &&
5416 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5417 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5418 INSUFF_ZONE_ASCQ);
5419 res = check_condition_result;
5420 goto fini;
5421 }
5422 /* Open all closed zones */
5423 zbc_open_all(devip);
5424 goto fini;
5425 }
5426
5427 /* Open the specified zone */
5428 z_id = get_unaligned_be64(cmd + 2);
5429 if (z_id >= sdebug_capacity) {
5430 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5431 res = check_condition_result;
5432 goto fini;
5433 }
5434
5435 zsp = zbc_zone(devip, z_id);
5436 if (z_id != zsp->z_start) {
5437 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5438 res = check_condition_result;
5439 goto fini;
5440 }
5441 if (zbc_zone_is_conv(zsp)) {
5442 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5443 res = check_condition_result;
5444 goto fini;
5445 }
5446
5447 zc = zsp->z_cond;
5448 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5449 goto fini;
5450
5451 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5452 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5453 INSUFF_ZONE_ASCQ);
5454 res = check_condition_result;
5455 goto fini;
5456 }
5457
5458 zbc_open_zone(devip, zsp, true);
5459 fini:
5460 sdeb_meta_write_unlock(sip);
5461 return res;
5462 }
5463
zbc_close_all(struct sdebug_dev_info * devip)5464 static void zbc_close_all(struct sdebug_dev_info *devip)
5465 {
5466 unsigned int i;
5467
5468 for (i = 0; i < devip->nr_zones; i++)
5469 zbc_close_zone(devip, &devip->zstate[i]);
5470 }
5471
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5472 static int resp_close_zone(struct scsi_cmnd *scp,
5473 struct sdebug_dev_info *devip)
5474 {
5475 int res = 0;
5476 u64 z_id;
5477 u8 *cmd = scp->cmnd;
5478 struct sdeb_zone_state *zsp;
5479 bool all = cmd[14] & 0x01;
5480 struct sdeb_store_info *sip = devip2sip(devip, false);
5481
5482 if (!sdebug_dev_is_zoned(devip)) {
5483 mk_sense_invalid_opcode(scp);
5484 return check_condition_result;
5485 }
5486
5487 sdeb_meta_write_lock(sip);
5488
5489 if (all) {
5490 zbc_close_all(devip);
5491 goto fini;
5492 }
5493
5494 /* Close specified zone */
5495 z_id = get_unaligned_be64(cmd + 2);
5496 if (z_id >= sdebug_capacity) {
5497 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5498 res = check_condition_result;
5499 goto fini;
5500 }
5501
5502 zsp = zbc_zone(devip, z_id);
5503 if (z_id != zsp->z_start) {
5504 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5505 res = check_condition_result;
5506 goto fini;
5507 }
5508 if (zbc_zone_is_conv(zsp)) {
5509 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5510 res = check_condition_result;
5511 goto fini;
5512 }
5513
5514 zbc_close_zone(devip, zsp);
5515 fini:
5516 sdeb_meta_write_unlock(sip);
5517 return res;
5518 }
5519
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)5520 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5521 struct sdeb_zone_state *zsp, bool empty)
5522 {
5523 enum sdebug_z_cond zc = zsp->z_cond;
5524
5525 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5526 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5527 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5528 zbc_close_zone(devip, zsp);
5529 if (zsp->z_cond == ZC4_CLOSED)
5530 devip->nr_closed--;
5531 zsp->z_wp = zsp->z_start + zsp->z_size;
5532 zsp->z_cond = ZC5_FULL;
5533 }
5534 }
5535
zbc_finish_all(struct sdebug_dev_info * devip)5536 static void zbc_finish_all(struct sdebug_dev_info *devip)
5537 {
5538 unsigned int i;
5539
5540 for (i = 0; i < devip->nr_zones; i++)
5541 zbc_finish_zone(devip, &devip->zstate[i], false);
5542 }
5543
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5544 static int resp_finish_zone(struct scsi_cmnd *scp,
5545 struct sdebug_dev_info *devip)
5546 {
5547 struct sdeb_zone_state *zsp;
5548 int res = 0;
5549 u64 z_id;
5550 u8 *cmd = scp->cmnd;
5551 bool all = cmd[14] & 0x01;
5552 struct sdeb_store_info *sip = devip2sip(devip, false);
5553
5554 if (!sdebug_dev_is_zoned(devip)) {
5555 mk_sense_invalid_opcode(scp);
5556 return check_condition_result;
5557 }
5558
5559 sdeb_meta_write_lock(sip);
5560
5561 if (all) {
5562 zbc_finish_all(devip);
5563 goto fini;
5564 }
5565
5566 /* Finish the specified zone */
5567 z_id = get_unaligned_be64(cmd + 2);
5568 if (z_id >= sdebug_capacity) {
5569 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5570 res = check_condition_result;
5571 goto fini;
5572 }
5573
5574 zsp = zbc_zone(devip, z_id);
5575 if (z_id != zsp->z_start) {
5576 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5577 res = check_condition_result;
5578 goto fini;
5579 }
5580 if (zbc_zone_is_conv(zsp)) {
5581 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5582 res = check_condition_result;
5583 goto fini;
5584 }
5585
5586 zbc_finish_zone(devip, zsp, true);
5587 fini:
5588 sdeb_meta_write_unlock(sip);
5589 return res;
5590 }
5591
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)5592 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5593 struct sdeb_zone_state *zsp)
5594 {
5595 enum sdebug_z_cond zc;
5596 struct sdeb_store_info *sip = devip2sip(devip, false);
5597
5598 if (!zbc_zone_is_seq(zsp))
5599 return;
5600
5601 zc = zsp->z_cond;
5602 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5603 zbc_close_zone(devip, zsp);
5604
5605 if (zsp->z_cond == ZC4_CLOSED)
5606 devip->nr_closed--;
5607
5608 if (zsp->z_wp > zsp->z_start)
5609 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5610 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5611
5612 zsp->z_non_seq_resource = false;
5613 zsp->z_wp = zsp->z_start;
5614 zsp->z_cond = ZC1_EMPTY;
5615 }
5616
zbc_rwp_all(struct sdebug_dev_info * devip)5617 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5618 {
5619 unsigned int i;
5620
5621 for (i = 0; i < devip->nr_zones; i++)
5622 zbc_rwp_zone(devip, &devip->zstate[i]);
5623 }
5624
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)5625 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5626 {
5627 struct sdeb_zone_state *zsp;
5628 int res = 0;
5629 u64 z_id;
5630 u8 *cmd = scp->cmnd;
5631 bool all = cmd[14] & 0x01;
5632 struct sdeb_store_info *sip = devip2sip(devip, false);
5633
5634 if (!sdebug_dev_is_zoned(devip)) {
5635 mk_sense_invalid_opcode(scp);
5636 return check_condition_result;
5637 }
5638
5639 sdeb_meta_write_lock(sip);
5640
5641 if (all) {
5642 zbc_rwp_all(devip);
5643 goto fini;
5644 }
5645
5646 z_id = get_unaligned_be64(cmd + 2);
5647 if (z_id >= sdebug_capacity) {
5648 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5649 res = check_condition_result;
5650 goto fini;
5651 }
5652
5653 zsp = zbc_zone(devip, z_id);
5654 if (z_id != zsp->z_start) {
5655 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5656 res = check_condition_result;
5657 goto fini;
5658 }
5659 if (zbc_zone_is_conv(zsp)) {
5660 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5661 res = check_condition_result;
5662 goto fini;
5663 }
5664
5665 zbc_rwp_zone(devip, zsp);
5666 fini:
5667 sdeb_meta_write_unlock(sip);
5668 return res;
5669 }
5670
get_tag(struct scsi_cmnd * cmnd)5671 static u32 get_tag(struct scsi_cmnd *cmnd)
5672 {
5673 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5674 }
5675
5676 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)5677 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5678 {
5679 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5680 unsigned long flags;
5681 struct scsi_cmnd *scp = sqcp->scmd;
5682 struct sdebug_scsi_cmd *sdsc;
5683 bool aborted;
5684
5685 if (sdebug_statistics) {
5686 atomic_inc(&sdebug_completions);
5687 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5688 atomic_inc(&sdebug_miss_cpus);
5689 }
5690
5691 if (!scp) {
5692 pr_err("scmd=NULL\n");
5693 goto out;
5694 }
5695
5696 sdsc = scsi_cmd_priv(scp);
5697 spin_lock_irqsave(&sdsc->lock, flags);
5698 aborted = sd_dp->aborted;
5699 if (unlikely(aborted))
5700 sd_dp->aborted = false;
5701 ASSIGN_QUEUED_CMD(scp, NULL);
5702
5703 spin_unlock_irqrestore(&sdsc->lock, flags);
5704
5705 if (aborted) {
5706 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5707 blk_abort_request(scsi_cmd_to_rq(scp));
5708 goto out;
5709 }
5710
5711 scsi_done(scp); /* callback to mid level */
5712 out:
5713 sdebug_free_queued_cmd(sqcp);
5714 }
5715
5716 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)5717 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5718 {
5719 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5720 hrt);
5721 sdebug_q_cmd_complete(sd_dp);
5722 return HRTIMER_NORESTART;
5723 }
5724
5725 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)5726 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5727 {
5728 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5729 ew.work);
5730 sdebug_q_cmd_complete(sd_dp);
5731 }
5732
5733 static bool got_shared_uuid;
5734 static uuid_t shared_uuid;
5735
sdebug_is_zone_start(struct sdebug_dev_info * devip,u64 zstart)5736 static bool sdebug_is_zone_start(struct sdebug_dev_info *devip, u64 zstart)
5737 {
5738 u32 remainder;
5739
5740 div_u64_rem(zstart, devip->zsize, &remainder);
5741 return remainder == 0;
5742 }
5743
sdebug_device_create_zones(struct sdebug_dev_info * devip)5744 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5745 {
5746 struct sdeb_zone_state *zsp;
5747 sector_t capacity = get_sdebug_capacity();
5748 sector_t conv_capacity;
5749 sector_t zstart = 0;
5750 unsigned int i;
5751
5752 /*
5753 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5754 * a zone size allowing for at least 4 zones on the device. Otherwise,
5755 * use the specified zone size checking that at least 2 zones can be
5756 * created for the device.
5757 */
5758 if (!sdeb_zbc_zone_size_mb) {
5759 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5760 >> ilog2(sdebug_sector_size);
5761 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5762 devip->zsize >>= 1;
5763 if (devip->zsize < 2) {
5764 pr_err("Device capacity too small\n");
5765 return -EINVAL;
5766 }
5767 } else {
5768 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5769 >> ilog2(sdebug_sector_size);
5770 if (devip->zsize >= capacity) {
5771 pr_err("Zone size too large for device capacity\n");
5772 return -EINVAL;
5773 }
5774 }
5775
5776 devip->nr_zones = div_u64(capacity + devip->zsize - 1, devip->zsize);
5777
5778 if (sdeb_zbc_zone_cap_mb == 0) {
5779 devip->zcap = devip->zsize;
5780 } else {
5781 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5782 ilog2(sdebug_sector_size);
5783 if (devip->zcap > devip->zsize) {
5784 pr_err("Zone capacity too large\n");
5785 return -EINVAL;
5786 }
5787 }
5788
5789 conv_capacity = (sector_t)sdeb_zbc_nr_conv * devip->zsize;
5790 if (conv_capacity >= capacity) {
5791 pr_err("Number of conventional zones too large\n");
5792 return -EINVAL;
5793 }
5794 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5795 devip->nr_seq_zones = div_u64(capacity - conv_capacity +
5796 devip->zsize - 1, devip->zsize);
5797 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5798
5799 /* Add gap zones if zone capacity is smaller than the zone size */
5800 if (devip->zcap < devip->zsize)
5801 devip->nr_zones += devip->nr_seq_zones;
5802
5803 if (devip->zoned) {
5804 /* zbc_max_open_zones can be 0, meaning "not reported" */
5805 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5806 devip->max_open = (devip->nr_zones - 1) / 2;
5807 else
5808 devip->max_open = sdeb_zbc_max_open;
5809 }
5810
5811 devip->zstate = kcalloc(devip->nr_zones,
5812 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5813 if (!devip->zstate)
5814 return -ENOMEM;
5815
5816 for (i = 0; i < devip->nr_zones; i++) {
5817 zsp = &devip->zstate[i];
5818
5819 zsp->z_start = zstart;
5820
5821 if (i < devip->nr_conv_zones) {
5822 zsp->z_type = ZBC_ZTYPE_CNV;
5823 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5824 zsp->z_wp = (sector_t)-1;
5825 zsp->z_size =
5826 min_t(u64, devip->zsize, capacity - zstart);
5827 } else if (sdebug_is_zone_start(devip, zstart)) {
5828 if (devip->zoned)
5829 zsp->z_type = ZBC_ZTYPE_SWR;
5830 else
5831 zsp->z_type = ZBC_ZTYPE_SWP;
5832 zsp->z_cond = ZC1_EMPTY;
5833 zsp->z_wp = zsp->z_start;
5834 zsp->z_size =
5835 min_t(u64, devip->zcap, capacity - zstart);
5836 } else {
5837 zsp->z_type = ZBC_ZTYPE_GAP;
5838 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5839 zsp->z_wp = (sector_t)-1;
5840 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5841 capacity - zstart);
5842 }
5843
5844 WARN_ON_ONCE((int)zsp->z_size <= 0);
5845 zstart += zsp->z_size;
5846 }
5847
5848 return 0;
5849 }
5850
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5851 static struct sdebug_dev_info *sdebug_device_create(
5852 struct sdebug_host_info *sdbg_host, gfp_t flags)
5853 {
5854 struct sdebug_dev_info *devip;
5855
5856 devip = kzalloc(sizeof(*devip), flags);
5857 if (devip) {
5858 if (sdebug_uuid_ctl == 1)
5859 uuid_gen(&devip->lu_name);
5860 else if (sdebug_uuid_ctl == 2) {
5861 if (got_shared_uuid)
5862 devip->lu_name = shared_uuid;
5863 else {
5864 uuid_gen(&shared_uuid);
5865 got_shared_uuid = true;
5866 devip->lu_name = shared_uuid;
5867 }
5868 }
5869 devip->sdbg_host = sdbg_host;
5870 if (sdeb_zbc_in_use) {
5871 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5872 if (sdebug_device_create_zones(devip)) {
5873 kfree(devip);
5874 return NULL;
5875 }
5876 } else {
5877 devip->zoned = false;
5878 }
5879 if (sdebug_ptype == TYPE_TAPE) {
5880 devip->tape_density = TAPE_DEF_DENSITY;
5881 devip->tape_blksize = TAPE_DEF_BLKSIZE;
5882 }
5883 devip->create_ts = ktime_get_boottime();
5884 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5885 spin_lock_init(&devip->list_lock);
5886 INIT_LIST_HEAD(&devip->inject_err_list);
5887 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5888 }
5889 return devip;
5890 }
5891
find_build_dev_info(struct scsi_device * sdev)5892 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5893 {
5894 struct sdebug_host_info *sdbg_host;
5895 struct sdebug_dev_info *open_devip = NULL;
5896 struct sdebug_dev_info *devip;
5897
5898 sdbg_host = shost_to_sdebug_host(sdev->host);
5899
5900 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5901 if ((devip->used) && (devip->channel == sdev->channel) &&
5902 (devip->target == sdev->id) &&
5903 (devip->lun == sdev->lun))
5904 return devip;
5905 else {
5906 if ((!devip->used) && (!open_devip))
5907 open_devip = devip;
5908 }
5909 }
5910 if (!open_devip) { /* try and make a new one */
5911 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5912 if (!open_devip) {
5913 pr_err("out of memory at line %d\n", __LINE__);
5914 return NULL;
5915 }
5916 }
5917
5918 open_devip->channel = sdev->channel;
5919 open_devip->target = sdev->id;
5920 open_devip->lun = sdev->lun;
5921 open_devip->sdbg_host = sdbg_host;
5922 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5923 open_devip->used = true;
5924 return open_devip;
5925 }
5926
scsi_debug_slave_alloc(struct scsi_device * sdp)5927 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5928 {
5929 if (sdebug_verbose)
5930 pr_info("slave_alloc <%u %u %u %llu>\n",
5931 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5932
5933 return 0;
5934 }
5935
scsi_debug_slave_configure(struct scsi_device * sdp)5936 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5937 {
5938 struct sdebug_dev_info *devip =
5939 (struct sdebug_dev_info *)sdp->hostdata;
5940 struct dentry *dentry;
5941
5942 if (sdebug_verbose)
5943 pr_info("slave_configure <%u %u %u %llu>\n",
5944 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5945 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5946 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5947 if (devip == NULL) {
5948 devip = find_build_dev_info(sdp);
5949 if (devip == NULL)
5950 return 1; /* no resources, will be marked offline */
5951 }
5952 sdp->hostdata = devip;
5953 if (sdebug_no_uld)
5954 sdp->no_uld_attach = 1;
5955 config_cdb_len(sdp);
5956
5957 if (sdebug_allow_restart)
5958 sdp->allow_restart = 1;
5959
5960 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5961 sdebug_debugfs_root);
5962 if (IS_ERR_OR_NULL(devip->debugfs_entry))
5963 pr_info("%s: failed to create debugfs directory for device %s\n",
5964 __func__, dev_name(&sdp->sdev_gendev));
5965
5966 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5967 &sdebug_error_fops);
5968 if (IS_ERR_OR_NULL(dentry))
5969 pr_info("%s: failed to create error file for device %s\n",
5970 __func__, dev_name(&sdp->sdev_gendev));
5971
5972 return 0;
5973 }
5974
scsi_debug_slave_destroy(struct scsi_device * sdp)5975 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5976 {
5977 struct sdebug_dev_info *devip =
5978 (struct sdebug_dev_info *)sdp->hostdata;
5979 struct sdebug_err_inject *err;
5980
5981 if (sdebug_verbose)
5982 pr_info("slave_destroy <%u %u %u %llu>\n",
5983 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5984
5985 if (!devip)
5986 return;
5987
5988 spin_lock(&devip->list_lock);
5989 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5990 list_del_rcu(&err->list);
5991 call_rcu(&err->rcu, sdebug_err_free);
5992 }
5993 spin_unlock(&devip->list_lock);
5994
5995 debugfs_remove(devip->debugfs_entry);
5996
5997 /* make this slot available for re-use */
5998 devip->used = false;
5999 sdp->hostdata = NULL;
6000 }
6001
6002 /* Returns true if we require the queued memory to be freed by the caller. */
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)6003 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
6004 enum sdeb_defer_type defer_t)
6005 {
6006 if (defer_t == SDEB_DEFER_HRT) {
6007 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6008
6009 switch (res) {
6010 case 0: /* Not active, it must have already run */
6011 case -1: /* -1 It's executing the CB */
6012 return false;
6013 case 1: /* Was active, we've now cancelled */
6014 default:
6015 return true;
6016 }
6017 } else if (defer_t == SDEB_DEFER_WQ) {
6018 /* Cancel if pending */
6019 if (cancel_work_sync(&sd_dp->ew.work))
6020 return true;
6021 /* Was not pending, so it must have run */
6022 return false;
6023 } else if (defer_t == SDEB_DEFER_POLL) {
6024 return true;
6025 }
6026
6027 return false;
6028 }
6029
6030
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)6031 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6032 {
6033 enum sdeb_defer_type l_defer_t;
6034 struct sdebug_defer *sd_dp;
6035 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6036 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
6037
6038 lockdep_assert_held(&sdsc->lock);
6039
6040 if (!sqcp)
6041 return false;
6042 sd_dp = &sqcp->sd_dp;
6043 l_defer_t = READ_ONCE(sd_dp->defer_t);
6044 ASSIGN_QUEUED_CMD(cmnd, NULL);
6045
6046 if (stop_qc_helper(sd_dp, l_defer_t))
6047 sdebug_free_queued_cmd(sqcp);
6048
6049 return true;
6050 }
6051
6052 /*
6053 * Called from scsi_debug_abort() only, which is for timed-out cmd.
6054 */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)6055 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6056 {
6057 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6058 unsigned long flags;
6059 bool res;
6060
6061 spin_lock_irqsave(&sdsc->lock, flags);
6062 res = scsi_debug_stop_cmnd(cmnd);
6063 spin_unlock_irqrestore(&sdsc->lock, flags);
6064
6065 return res;
6066 }
6067
6068 /*
6069 * All we can do is set the cmnd as internally aborted and wait for it to
6070 * finish. We cannot call scsi_done() as normal completion path may do that.
6071 */
sdebug_stop_cmnd(struct request * rq,void * data)6072 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6073 {
6074 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6075
6076 return true;
6077 }
6078
6079 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)6080 static void stop_all_queued(void)
6081 {
6082 struct sdebug_host_info *sdhp;
6083
6084 mutex_lock(&sdebug_host_list_mutex);
6085 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6086 struct Scsi_Host *shost = sdhp->shost;
6087
6088 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6089 }
6090 mutex_unlock(&sdebug_host_list_mutex);
6091 }
6092
sdebug_fail_abort(struct scsi_cmnd * cmnd)6093 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6094 {
6095 struct scsi_device *sdp = cmnd->device;
6096 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6097 struct sdebug_err_inject *err;
6098 unsigned char *cmd = cmnd->cmnd;
6099 int ret = 0;
6100
6101 if (devip == NULL)
6102 return 0;
6103
6104 rcu_read_lock();
6105 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6106 if (err->type == ERR_ABORT_CMD_FAILED &&
6107 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6108 ret = !!err->cnt;
6109 if (err->cnt < 0)
6110 err->cnt++;
6111
6112 rcu_read_unlock();
6113 return ret;
6114 }
6115 }
6116 rcu_read_unlock();
6117
6118 return 0;
6119 }
6120
scsi_debug_abort(struct scsi_cmnd * SCpnt)6121 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6122 {
6123 bool ok = scsi_debug_abort_cmnd(SCpnt);
6124 u8 *cmd = SCpnt->cmnd;
6125 u8 opcode = cmd[0];
6126
6127 ++num_aborts;
6128
6129 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6130 sdev_printk(KERN_INFO, SCpnt->device,
6131 "%s: command%s found\n", __func__,
6132 ok ? "" : " not");
6133
6134 if (sdebug_fail_abort(SCpnt)) {
6135 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6136 opcode);
6137 return FAILED;
6138 }
6139
6140 return SUCCESS;
6141 }
6142
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)6143 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6144 {
6145 struct scsi_device *sdp = data;
6146 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6147
6148 if (scmd->device == sdp)
6149 scsi_debug_abort_cmnd(scmd);
6150
6151 return true;
6152 }
6153
6154 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)6155 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6156 {
6157 struct Scsi_Host *shost = sdp->host;
6158
6159 blk_mq_tagset_busy_iter(&shost->tag_set,
6160 scsi_debug_stop_all_queued_iter, sdp);
6161 }
6162
sdebug_fail_lun_reset(struct scsi_cmnd * cmnd)6163 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6164 {
6165 struct scsi_device *sdp = cmnd->device;
6166 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6167 struct sdebug_err_inject *err;
6168 unsigned char *cmd = cmnd->cmnd;
6169 int ret = 0;
6170
6171 if (devip == NULL)
6172 return 0;
6173
6174 rcu_read_lock();
6175 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6176 if (err->type == ERR_LUN_RESET_FAILED &&
6177 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6178 ret = !!err->cnt;
6179 if (err->cnt < 0)
6180 err->cnt++;
6181
6182 rcu_read_unlock();
6183 return ret;
6184 }
6185 }
6186 rcu_read_unlock();
6187
6188 return 0;
6189 }
6190
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)6191 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6192 {
6193 struct scsi_device *sdp = SCpnt->device;
6194 struct sdebug_dev_info *devip = sdp->hostdata;
6195 u8 *cmd = SCpnt->cmnd;
6196 u8 opcode = cmd[0];
6197
6198 ++num_dev_resets;
6199
6200 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6201 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6202
6203 scsi_debug_stop_all_queued(sdp);
6204 if (devip)
6205 set_bit(SDEBUG_UA_POR, devip->uas_bm);
6206
6207 if (sdebug_fail_lun_reset(SCpnt)) {
6208 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6209 return FAILED;
6210 }
6211
6212 return SUCCESS;
6213 }
6214
sdebug_fail_target_reset(struct scsi_cmnd * cmnd)6215 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6216 {
6217 struct scsi_target *starget = scsi_target(cmnd->device);
6218 struct sdebug_target_info *targetip =
6219 (struct sdebug_target_info *)starget->hostdata;
6220
6221 if (targetip)
6222 return targetip->reset_fail;
6223
6224 return 0;
6225 }
6226
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)6227 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6228 {
6229 struct scsi_device *sdp = SCpnt->device;
6230 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6231 struct sdebug_dev_info *devip;
6232 u8 *cmd = SCpnt->cmnd;
6233 u8 opcode = cmd[0];
6234 int k = 0;
6235
6236 ++num_target_resets;
6237 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6238 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6239
6240 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6241 if (devip->target == sdp->id) {
6242 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6243 ++k;
6244 }
6245 }
6246
6247 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6248 sdev_printk(KERN_INFO, sdp,
6249 "%s: %d device(s) found in target\n", __func__, k);
6250
6251 if (sdebug_fail_target_reset(SCpnt)) {
6252 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6253 opcode);
6254 return FAILED;
6255 }
6256
6257 return SUCCESS;
6258 }
6259
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)6260 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6261 {
6262 struct scsi_device *sdp = SCpnt->device;
6263 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6264 struct sdebug_dev_info *devip;
6265 int k = 0;
6266
6267 ++num_bus_resets;
6268
6269 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6270 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6271
6272 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6273 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6274 ++k;
6275 }
6276
6277 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6278 sdev_printk(KERN_INFO, sdp,
6279 "%s: %d device(s) found in host\n", __func__, k);
6280 return SUCCESS;
6281 }
6282
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)6283 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
6284 {
6285 struct sdebug_host_info *sdbg_host;
6286 struct sdebug_dev_info *devip;
6287 int k = 0;
6288
6289 ++num_host_resets;
6290 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6291 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
6292 mutex_lock(&sdebug_host_list_mutex);
6293 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
6294 list_for_each_entry(devip, &sdbg_host->dev_info_list,
6295 dev_list) {
6296 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6297 ++k;
6298 }
6299 }
6300 mutex_unlock(&sdebug_host_list_mutex);
6301 stop_all_queued();
6302 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6303 sdev_printk(KERN_INFO, SCpnt->device,
6304 "%s: %d device(s) found\n", __func__, k);
6305 return SUCCESS;
6306 }
6307
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)6308 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
6309 {
6310 struct msdos_partition *pp;
6311 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
6312 int sectors_per_part, num_sectors, k;
6313 int heads_by_sects, start_sec, end_sec;
6314
6315 /* assume partition table already zeroed */
6316 if ((sdebug_num_parts < 1) || (store_size < 1048576))
6317 return;
6318 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
6319 sdebug_num_parts = SDEBUG_MAX_PARTS;
6320 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
6321 }
6322 num_sectors = (int)get_sdebug_capacity();
6323 sectors_per_part = (num_sectors - sdebug_sectors_per)
6324 / sdebug_num_parts;
6325 heads_by_sects = sdebug_heads * sdebug_sectors_per;
6326 starts[0] = sdebug_sectors_per;
6327 max_part_secs = sectors_per_part;
6328 for (k = 1; k < sdebug_num_parts; ++k) {
6329 starts[k] = ((k * sectors_per_part) / heads_by_sects)
6330 * heads_by_sects;
6331 if (starts[k] - starts[k - 1] < max_part_secs)
6332 max_part_secs = starts[k] - starts[k - 1];
6333 }
6334 starts[sdebug_num_parts] = num_sectors;
6335 starts[sdebug_num_parts + 1] = 0;
6336
6337 ramp[510] = 0x55; /* magic partition markings */
6338 ramp[511] = 0xAA;
6339 pp = (struct msdos_partition *)(ramp + 0x1be);
6340 for (k = 0; starts[k + 1]; ++k, ++pp) {
6341 start_sec = starts[k];
6342 end_sec = starts[k] + max_part_secs - 1;
6343 pp->boot_ind = 0;
6344
6345 pp->cyl = start_sec / heads_by_sects;
6346 pp->head = (start_sec - (pp->cyl * heads_by_sects))
6347 / sdebug_sectors_per;
6348 pp->sector = (start_sec % sdebug_sectors_per) + 1;
6349
6350 pp->end_cyl = end_sec / heads_by_sects;
6351 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
6352 / sdebug_sectors_per;
6353 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
6354
6355 pp->start_sect = cpu_to_le32(start_sec);
6356 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
6357 pp->sys_ind = 0x83; /* plain Linux partition */
6358 }
6359 }
6360
block_unblock_all_queues(bool block)6361 static void block_unblock_all_queues(bool block)
6362 {
6363 struct sdebug_host_info *sdhp;
6364
6365 lockdep_assert_held(&sdebug_host_list_mutex);
6366
6367 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6368 struct Scsi_Host *shost = sdhp->shost;
6369
6370 if (block)
6371 scsi_block_requests(shost);
6372 else
6373 scsi_unblock_requests(shost);
6374 }
6375 }
6376
6377 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6378 * commands will be processed normally before triggers occur.
6379 */
tweak_cmnd_count(void)6380 static void tweak_cmnd_count(void)
6381 {
6382 int count, modulo;
6383
6384 modulo = abs(sdebug_every_nth);
6385 if (modulo < 2)
6386 return;
6387
6388 mutex_lock(&sdebug_host_list_mutex);
6389 block_unblock_all_queues(true);
6390 count = atomic_read(&sdebug_cmnd_count);
6391 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6392 block_unblock_all_queues(false);
6393 mutex_unlock(&sdebug_host_list_mutex);
6394 }
6395
clear_queue_stats(void)6396 static void clear_queue_stats(void)
6397 {
6398 atomic_set(&sdebug_cmnd_count, 0);
6399 atomic_set(&sdebug_completions, 0);
6400 atomic_set(&sdebug_miss_cpus, 0);
6401 atomic_set(&sdebug_a_tsf, 0);
6402 }
6403
inject_on_this_cmd(void)6404 static bool inject_on_this_cmd(void)
6405 {
6406 if (sdebug_every_nth == 0)
6407 return false;
6408 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6409 }
6410
6411 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
6412
6413
sdebug_free_queued_cmd(struct sdebug_queued_cmd * sqcp)6414 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6415 {
6416 if (sqcp)
6417 kmem_cache_free(queued_cmd_cache, sqcp);
6418 }
6419
sdebug_alloc_queued_cmd(struct scsi_cmnd * scmd)6420 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6421 {
6422 struct sdebug_queued_cmd *sqcp;
6423 struct sdebug_defer *sd_dp;
6424
6425 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6426 if (!sqcp)
6427 return NULL;
6428
6429 sd_dp = &sqcp->sd_dp;
6430
6431 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6432 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6433 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6434
6435 sqcp->scmd = scmd;
6436
6437 return sqcp;
6438 }
6439
6440 /* Complete the processing of the thread that queued a SCSI command to this
6441 * driver. It either completes the command by calling cmnd_done() or
6442 * schedules a hr timer or work queue then returns 0. Returns
6443 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6444 */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)6445 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6446 int scsi_result,
6447 int (*pfp)(struct scsi_cmnd *,
6448 struct sdebug_dev_info *),
6449 int delta_jiff, int ndelay)
6450 {
6451 struct request *rq = scsi_cmd_to_rq(cmnd);
6452 bool polled = rq->cmd_flags & REQ_POLLED;
6453 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6454 unsigned long flags;
6455 u64 ns_from_boot = 0;
6456 struct sdebug_queued_cmd *sqcp;
6457 struct scsi_device *sdp;
6458 struct sdebug_defer *sd_dp;
6459
6460 if (unlikely(devip == NULL)) {
6461 if (scsi_result == 0)
6462 scsi_result = DID_NO_CONNECT << 16;
6463 goto respond_in_thread;
6464 }
6465 sdp = cmnd->device;
6466
6467 if (delta_jiff == 0)
6468 goto respond_in_thread;
6469
6470
6471 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6472 (scsi_result == 0))) {
6473 int num_in_q = scsi_device_busy(sdp);
6474 int qdepth = cmnd->device->queue_depth;
6475
6476 if ((num_in_q == qdepth) &&
6477 (atomic_inc_return(&sdebug_a_tsf) >=
6478 abs(sdebug_every_nth))) {
6479 atomic_set(&sdebug_a_tsf, 0);
6480 scsi_result = device_qfull_result;
6481
6482 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6483 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6484 __func__, num_in_q);
6485 }
6486 }
6487
6488 sqcp = sdebug_alloc_queued_cmd(cmnd);
6489 if (!sqcp) {
6490 pr_err("%s no alloc\n", __func__);
6491 return SCSI_MLQUEUE_HOST_BUSY;
6492 }
6493 sd_dp = &sqcp->sd_dp;
6494
6495 if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
6496 ns_from_boot = ktime_get_boottime_ns();
6497
6498 /* one of the resp_*() response functions is called here */
6499 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6500 if (cmnd->result & SDEG_RES_IMMED_MASK) {
6501 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6502 delta_jiff = ndelay = 0;
6503 }
6504 if (cmnd->result == 0 && scsi_result != 0)
6505 cmnd->result = scsi_result;
6506 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6507 if (atomic_read(&sdeb_inject_pending)) {
6508 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6509 atomic_set(&sdeb_inject_pending, 0);
6510 cmnd->result = check_condition_result;
6511 }
6512 }
6513
6514 if (unlikely(sdebug_verbose && cmnd->result))
6515 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6516 __func__, cmnd->result);
6517
6518 if (delta_jiff > 0 || ndelay > 0) {
6519 ktime_t kt;
6520
6521 if (delta_jiff > 0) {
6522 u64 ns = jiffies_to_nsecs(delta_jiff);
6523
6524 if (sdebug_random && ns < U32_MAX) {
6525 ns = get_random_u32_below((u32)ns);
6526 } else if (sdebug_random) {
6527 ns >>= 12; /* scale to 4 usec precision */
6528 if (ns < U32_MAX) /* over 4 hours max */
6529 ns = get_random_u32_below((u32)ns);
6530 ns <<= 12;
6531 }
6532 kt = ns_to_ktime(ns);
6533 } else { /* ndelay has a 4.2 second max */
6534 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6535 (u32)ndelay;
6536 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6537 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6538
6539 if (kt <= d) { /* elapsed duration >= kt */
6540 /* call scsi_done() from this thread */
6541 sdebug_free_queued_cmd(sqcp);
6542 scsi_done(cmnd);
6543 return 0;
6544 }
6545 /* otherwise reduce kt by elapsed time */
6546 kt -= d;
6547 }
6548 }
6549 if (sdebug_statistics)
6550 sd_dp->issuing_cpu = raw_smp_processor_id();
6551 if (polled) {
6552 spin_lock_irqsave(&sdsc->lock, flags);
6553 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6554 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6555 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6556 spin_unlock_irqrestore(&sdsc->lock, flags);
6557 } else {
6558 /* schedule the invocation of scsi_done() for a later time */
6559 spin_lock_irqsave(&sdsc->lock, flags);
6560 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6561 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6562 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6563 /*
6564 * The completion handler will try to grab sqcp->lock,
6565 * so there is no chance that the completion handler
6566 * will call scsi_done() until we release the lock
6567 * here (so ok to keep referencing sdsc).
6568 */
6569 spin_unlock_irqrestore(&sdsc->lock, flags);
6570 }
6571 } else { /* jdelay < 0, use work queue */
6572 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6573 atomic_read(&sdeb_inject_pending))) {
6574 sd_dp->aborted = true;
6575 atomic_set(&sdeb_inject_pending, 0);
6576 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6577 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6578 }
6579
6580 if (sdebug_statistics)
6581 sd_dp->issuing_cpu = raw_smp_processor_id();
6582 if (polled) {
6583 spin_lock_irqsave(&sdsc->lock, flags);
6584 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6585 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6586 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6587 spin_unlock_irqrestore(&sdsc->lock, flags);
6588 } else {
6589 spin_lock_irqsave(&sdsc->lock, flags);
6590 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6591 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6592 schedule_work(&sd_dp->ew.work);
6593 spin_unlock_irqrestore(&sdsc->lock, flags);
6594 }
6595 }
6596
6597 return 0;
6598
6599 respond_in_thread: /* call back to mid-layer using invocation thread */
6600 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6601 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6602 if (cmnd->result == 0 && scsi_result != 0)
6603 cmnd->result = scsi_result;
6604 scsi_done(cmnd);
6605 return 0;
6606 }
6607
6608 /* Note: The following macros create attribute files in the
6609 /sys/module/scsi_debug/parameters directory. Unfortunately this
6610 driver is unaware of a change and cannot trigger auxiliary actions
6611 as it can when the corresponding attribute in the
6612 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6613 */
6614 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6615 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6616 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6617 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6618 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6619 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6620 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6621 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6622 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6623 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6624 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6625 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6626 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6627 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6628 module_param_string(inq_product, sdebug_inq_product_id,
6629 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6630 module_param_string(inq_rev, sdebug_inq_product_rev,
6631 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6632 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6633 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6634 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6635 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6636 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6637 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6638 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
6639 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6640 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6641 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6642 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6643 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6644 S_IRUGO | S_IWUSR);
6645 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6646 S_IRUGO | S_IWUSR);
6647 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6648 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6649 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6650 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6651 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6652 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6653 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6654 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6655 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6656 module_param_named(per_host_store, sdebug_per_host_store, bool,
6657 S_IRUGO | S_IWUSR);
6658 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6659 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6660 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6661 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6662 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6663 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6664 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6665 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6666 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6667 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6668 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6669 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6670 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6671 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6672 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6673 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
6674 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
6675 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
6676 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
6677 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
6678 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6679 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6680 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6681 S_IRUGO | S_IWUSR);
6682 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6683 module_param_named(write_same_length, sdebug_write_same_length, int,
6684 S_IRUGO | S_IWUSR);
6685 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6686 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6687 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6688 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6689 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6690 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6691
6692 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6693 MODULE_DESCRIPTION("SCSI debug adapter driver");
6694 MODULE_LICENSE("GPL");
6695 MODULE_VERSION(SDEBUG_VERSION);
6696
6697 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6698 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6699 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6700 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6701 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6702 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6703 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6704 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6705 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6706 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6707 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6708 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6709 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6710 MODULE_PARM_DESC(host_max_queue,
6711 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6712 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6713 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6714 SDEBUG_VERSION "\")");
6715 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6716 MODULE_PARM_DESC(lbprz,
6717 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6718 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6719 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6720 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6721 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
6722 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6723 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6724 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6725 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6726 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6727 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6728 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6729 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6730 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6731 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6732 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6733 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6734 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6735 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6736 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6737 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6738 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6739 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6740 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6741 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6742 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6743 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6744 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6745 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6746 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6747 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6748 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6749 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6750 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6751 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6752 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6753 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
6754 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
6755 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
6756 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
6757 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
6758 MODULE_PARM_DESC(uuid_ctl,
6759 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6760 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6761 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6762 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6763 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6764 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6765 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6766 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6767 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6768 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6769 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6770
6771 #define SDEBUG_INFO_LEN 256
6772 static char sdebug_info[SDEBUG_INFO_LEN];
6773
scsi_debug_info(struct Scsi_Host * shp)6774 static const char *scsi_debug_info(struct Scsi_Host *shp)
6775 {
6776 int k;
6777
6778 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6779 my_name, SDEBUG_VERSION, sdebug_version_date);
6780 if (k >= (SDEBUG_INFO_LEN - 1))
6781 return sdebug_info;
6782 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6783 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6784 sdebug_dev_size_mb, sdebug_opts, submit_queues,
6785 "statistics", (int)sdebug_statistics);
6786 return sdebug_info;
6787 }
6788
6789 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)6790 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6791 int length)
6792 {
6793 char arr[16];
6794 int opts;
6795 int minLen = length > 15 ? 15 : length;
6796
6797 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6798 return -EACCES;
6799 memcpy(arr, buffer, minLen);
6800 arr[minLen] = '\0';
6801 if (1 != sscanf(arr, "%d", &opts))
6802 return -EINVAL;
6803 sdebug_opts = opts;
6804 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6805 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6806 if (sdebug_every_nth != 0)
6807 tweak_cmnd_count();
6808 return length;
6809 }
6810
6811 struct sdebug_submit_queue_data {
6812 int *first;
6813 int *last;
6814 int queue_num;
6815 };
6816
sdebug_submit_queue_iter(struct request * rq,void * opaque)6817 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6818 {
6819 struct sdebug_submit_queue_data *data = opaque;
6820 u32 unique_tag = blk_mq_unique_tag(rq);
6821 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6822 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6823 int queue_num = data->queue_num;
6824
6825 if (hwq != queue_num)
6826 return true;
6827
6828 /* Rely on iter'ing in ascending tag order */
6829 if (*data->first == -1)
6830 *data->first = *data->last = tag;
6831 else
6832 *data->last = tag;
6833
6834 return true;
6835 }
6836
6837 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6838 * same for each scsi_debug host (if more than one). Some of the counters
6839 * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)6840 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6841 {
6842 struct sdebug_host_info *sdhp;
6843 int j;
6844
6845 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6846 SDEBUG_VERSION, sdebug_version_date);
6847 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6848 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6849 sdebug_opts, sdebug_every_nth);
6850 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6851 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6852 sdebug_sector_size, "bytes");
6853 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6854 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6855 num_aborts);
6856 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6857 num_dev_resets, num_target_resets, num_bus_resets,
6858 num_host_resets);
6859 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6860 dix_reads, dix_writes, dif_errors);
6861 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6862 sdebug_statistics);
6863 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6864 atomic_read(&sdebug_cmnd_count),
6865 atomic_read(&sdebug_completions),
6866 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6867 atomic_read(&sdebug_a_tsf),
6868 atomic_read(&sdeb_mq_poll_count));
6869
6870 seq_printf(m, "submit_queues=%d\n", submit_queues);
6871 for (j = 0; j < submit_queues; ++j) {
6872 int f = -1, l = -1;
6873 struct sdebug_submit_queue_data data = {
6874 .queue_num = j,
6875 .first = &f,
6876 .last = &l,
6877 };
6878 seq_printf(m, " queue %d:\n", j);
6879 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6880 &data);
6881 if (f >= 0) {
6882 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6883 "first,last bits", f, l);
6884 }
6885 }
6886
6887 seq_printf(m, "this host_no=%d\n", host->host_no);
6888 if (!xa_empty(per_store_ap)) {
6889 bool niu;
6890 int idx;
6891 unsigned long l_idx;
6892 struct sdeb_store_info *sip;
6893
6894 seq_puts(m, "\nhost list:\n");
6895 j = 0;
6896 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6897 idx = sdhp->si_idx;
6898 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6899 sdhp->shost->host_no, idx);
6900 ++j;
6901 }
6902 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6903 sdeb_most_recent_idx);
6904 j = 0;
6905 xa_for_each(per_store_ap, l_idx, sip) {
6906 niu = xa_get_mark(per_store_ap, l_idx,
6907 SDEB_XA_NOT_IN_USE);
6908 idx = (int)l_idx;
6909 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6910 (niu ? " not_in_use" : ""));
6911 ++j;
6912 }
6913 }
6914 return 0;
6915 }
6916
delay_show(struct device_driver * ddp,char * buf)6917 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6918 {
6919 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6920 }
6921 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6922 * of delay is jiffies.
6923 */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6924 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6925 size_t count)
6926 {
6927 int jdelay, res;
6928
6929 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6930 res = count;
6931 if (sdebug_jdelay != jdelay) {
6932 struct sdebug_host_info *sdhp;
6933
6934 mutex_lock(&sdebug_host_list_mutex);
6935 block_unblock_all_queues(true);
6936
6937 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6938 struct Scsi_Host *shost = sdhp->shost;
6939
6940 if (scsi_host_busy(shost)) {
6941 res = -EBUSY; /* queued commands */
6942 break;
6943 }
6944 }
6945 if (res > 0) {
6946 sdebug_jdelay = jdelay;
6947 sdebug_ndelay = 0;
6948 }
6949 block_unblock_all_queues(false);
6950 mutex_unlock(&sdebug_host_list_mutex);
6951 }
6952 return res;
6953 }
6954 return -EINVAL;
6955 }
6956 static DRIVER_ATTR_RW(delay);
6957
ndelay_show(struct device_driver * ddp,char * buf)6958 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6959 {
6960 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6961 }
6962 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6963 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6964 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6965 size_t count)
6966 {
6967 int ndelay, res;
6968
6969 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6970 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6971 res = count;
6972 if (sdebug_ndelay != ndelay) {
6973 struct sdebug_host_info *sdhp;
6974
6975 mutex_lock(&sdebug_host_list_mutex);
6976 block_unblock_all_queues(true);
6977
6978 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6979 struct Scsi_Host *shost = sdhp->shost;
6980
6981 if (scsi_host_busy(shost)) {
6982 res = -EBUSY; /* queued commands */
6983 break;
6984 }
6985 }
6986
6987 if (res > 0) {
6988 sdebug_ndelay = ndelay;
6989 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6990 : DEF_JDELAY;
6991 }
6992 block_unblock_all_queues(false);
6993 mutex_unlock(&sdebug_host_list_mutex);
6994 }
6995 return res;
6996 }
6997 return -EINVAL;
6998 }
6999 static DRIVER_ATTR_RW(ndelay);
7000
opts_show(struct device_driver * ddp,char * buf)7001 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7002 {
7003 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7004 }
7005
opts_store(struct device_driver * ddp,const char * buf,size_t count)7006 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7007 size_t count)
7008 {
7009 int opts;
7010 char work[20];
7011
7012 if (sscanf(buf, "%10s", work) == 1) {
7013 if (strncasecmp(work, "0x", 2) == 0) {
7014 if (kstrtoint(work + 2, 16, &opts) == 0)
7015 goto opts_done;
7016 } else {
7017 if (kstrtoint(work, 10, &opts) == 0)
7018 goto opts_done;
7019 }
7020 }
7021 return -EINVAL;
7022 opts_done:
7023 sdebug_opts = opts;
7024 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7025 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7026 tweak_cmnd_count();
7027 return count;
7028 }
7029 static DRIVER_ATTR_RW(opts);
7030
ptype_show(struct device_driver * ddp,char * buf)7031 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7032 {
7033 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7034 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)7035 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7036 size_t count)
7037 {
7038 int n;
7039
7040 /* Cannot change from or to TYPE_ZBC with sysfs */
7041 if (sdebug_ptype == TYPE_ZBC)
7042 return -EINVAL;
7043
7044 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7045 if (n == TYPE_ZBC)
7046 return -EINVAL;
7047 sdebug_ptype = n;
7048 return count;
7049 }
7050 return -EINVAL;
7051 }
7052 static DRIVER_ATTR_RW(ptype);
7053
dsense_show(struct device_driver * ddp,char * buf)7054 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7055 {
7056 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7057 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)7058 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7059 size_t count)
7060 {
7061 int n;
7062
7063 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7064 sdebug_dsense = n;
7065 return count;
7066 }
7067 return -EINVAL;
7068 }
7069 static DRIVER_ATTR_RW(dsense);
7070
fake_rw_show(struct device_driver * ddp,char * buf)7071 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7072 {
7073 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7074 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)7075 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7076 size_t count)
7077 {
7078 int n, idx;
7079
7080 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7081 bool want_store = (n == 0);
7082 struct sdebug_host_info *sdhp;
7083
7084 n = (n > 0);
7085 sdebug_fake_rw = (sdebug_fake_rw > 0);
7086 if (sdebug_fake_rw == n)
7087 return count; /* not transitioning so do nothing */
7088
7089 if (want_store) { /* 1 --> 0 transition, set up store */
7090 if (sdeb_first_idx < 0) {
7091 idx = sdebug_add_store();
7092 if (idx < 0)
7093 return idx;
7094 } else {
7095 idx = sdeb_first_idx;
7096 xa_clear_mark(per_store_ap, idx,
7097 SDEB_XA_NOT_IN_USE);
7098 }
7099 /* make all hosts use same store */
7100 list_for_each_entry(sdhp, &sdebug_host_list,
7101 host_list) {
7102 if (sdhp->si_idx != idx) {
7103 xa_set_mark(per_store_ap, sdhp->si_idx,
7104 SDEB_XA_NOT_IN_USE);
7105 sdhp->si_idx = idx;
7106 }
7107 }
7108 sdeb_most_recent_idx = idx;
7109 } else { /* 0 --> 1 transition is trigger for shrink */
7110 sdebug_erase_all_stores(true /* apart from first */);
7111 }
7112 sdebug_fake_rw = n;
7113 return count;
7114 }
7115 return -EINVAL;
7116 }
7117 static DRIVER_ATTR_RW(fake_rw);
7118
no_lun_0_show(struct device_driver * ddp,char * buf)7119 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7120 {
7121 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7122 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)7123 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7124 size_t count)
7125 {
7126 int n;
7127
7128 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7129 sdebug_no_lun_0 = n;
7130 return count;
7131 }
7132 return -EINVAL;
7133 }
7134 static DRIVER_ATTR_RW(no_lun_0);
7135
num_tgts_show(struct device_driver * ddp,char * buf)7136 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7137 {
7138 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7139 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)7140 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7141 size_t count)
7142 {
7143 int n;
7144
7145 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7146 sdebug_num_tgts = n;
7147 sdebug_max_tgts_luns();
7148 return count;
7149 }
7150 return -EINVAL;
7151 }
7152 static DRIVER_ATTR_RW(num_tgts);
7153
dev_size_mb_show(struct device_driver * ddp,char * buf)7154 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7155 {
7156 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7157 }
7158 static DRIVER_ATTR_RO(dev_size_mb);
7159
per_host_store_show(struct device_driver * ddp,char * buf)7160 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7161 {
7162 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7163 }
7164
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)7165 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7166 size_t count)
7167 {
7168 bool v;
7169
7170 if (kstrtobool(buf, &v))
7171 return -EINVAL;
7172
7173 sdebug_per_host_store = v;
7174 return count;
7175 }
7176 static DRIVER_ATTR_RW(per_host_store);
7177
num_parts_show(struct device_driver * ddp,char * buf)7178 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7179 {
7180 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7181 }
7182 static DRIVER_ATTR_RO(num_parts);
7183
every_nth_show(struct device_driver * ddp,char * buf)7184 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7185 {
7186 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7187 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)7188 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7189 size_t count)
7190 {
7191 int nth;
7192 char work[20];
7193
7194 if (sscanf(buf, "%10s", work) == 1) {
7195 if (strncasecmp(work, "0x", 2) == 0) {
7196 if (kstrtoint(work + 2, 16, &nth) == 0)
7197 goto every_nth_done;
7198 } else {
7199 if (kstrtoint(work, 10, &nth) == 0)
7200 goto every_nth_done;
7201 }
7202 }
7203 return -EINVAL;
7204
7205 every_nth_done:
7206 sdebug_every_nth = nth;
7207 if (nth && !sdebug_statistics) {
7208 pr_info("every_nth needs statistics=1, set it\n");
7209 sdebug_statistics = true;
7210 }
7211 tweak_cmnd_count();
7212 return count;
7213 }
7214 static DRIVER_ATTR_RW(every_nth);
7215
lun_format_show(struct device_driver * ddp,char * buf)7216 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7217 {
7218 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7219 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)7220 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7221 size_t count)
7222 {
7223 int n;
7224 bool changed;
7225
7226 if (kstrtoint(buf, 0, &n))
7227 return -EINVAL;
7228 if (n >= 0) {
7229 if (n > (int)SAM_LUN_AM_FLAT) {
7230 pr_warn("only LUN address methods 0 and 1 are supported\n");
7231 return -EINVAL;
7232 }
7233 changed = ((int)sdebug_lun_am != n);
7234 sdebug_lun_am = n;
7235 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
7236 struct sdebug_host_info *sdhp;
7237 struct sdebug_dev_info *dp;
7238
7239 mutex_lock(&sdebug_host_list_mutex);
7240 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7241 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7242 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7243 }
7244 }
7245 mutex_unlock(&sdebug_host_list_mutex);
7246 }
7247 return count;
7248 }
7249 return -EINVAL;
7250 }
7251 static DRIVER_ATTR_RW(lun_format);
7252
max_luns_show(struct device_driver * ddp,char * buf)7253 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7254 {
7255 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7256 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)7257 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7258 size_t count)
7259 {
7260 int n;
7261 bool changed;
7262
7263 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7264 if (n > 256) {
7265 pr_warn("max_luns can be no more than 256\n");
7266 return -EINVAL;
7267 }
7268 changed = (sdebug_max_luns != n);
7269 sdebug_max_luns = n;
7270 sdebug_max_tgts_luns();
7271 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
7272 struct sdebug_host_info *sdhp;
7273 struct sdebug_dev_info *dp;
7274
7275 mutex_lock(&sdebug_host_list_mutex);
7276 list_for_each_entry(sdhp, &sdebug_host_list,
7277 host_list) {
7278 list_for_each_entry(dp, &sdhp->dev_info_list,
7279 dev_list) {
7280 set_bit(SDEBUG_UA_LUNS_CHANGED,
7281 dp->uas_bm);
7282 }
7283 }
7284 mutex_unlock(&sdebug_host_list_mutex);
7285 }
7286 return count;
7287 }
7288 return -EINVAL;
7289 }
7290 static DRIVER_ATTR_RW(max_luns);
7291
max_queue_show(struct device_driver * ddp,char * buf)7292 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7293 {
7294 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7295 }
7296 /* N.B. max_queue can be changed while there are queued commands. In flight
7297 * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)7298 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7299 size_t count)
7300 {
7301 int n;
7302
7303 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7304 (n <= SDEBUG_CANQUEUE) &&
7305 (sdebug_host_max_queue == 0)) {
7306 mutex_lock(&sdebug_host_list_mutex);
7307
7308 /* We may only change sdebug_max_queue when we have no shosts */
7309 if (list_empty(&sdebug_host_list))
7310 sdebug_max_queue = n;
7311 else
7312 count = -EBUSY;
7313 mutex_unlock(&sdebug_host_list_mutex);
7314 return count;
7315 }
7316 return -EINVAL;
7317 }
7318 static DRIVER_ATTR_RW(max_queue);
7319
host_max_queue_show(struct device_driver * ddp,char * buf)7320 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
7321 {
7322 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
7323 }
7324
no_rwlock_show(struct device_driver * ddp,char * buf)7325 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
7326 {
7327 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
7328 }
7329
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)7330 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
7331 {
7332 bool v;
7333
7334 if (kstrtobool(buf, &v))
7335 return -EINVAL;
7336
7337 sdebug_no_rwlock = v;
7338 return count;
7339 }
7340 static DRIVER_ATTR_RW(no_rwlock);
7341
7342 /*
7343 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
7344 * in range [0, sdebug_host_max_queue), we can't change it.
7345 */
7346 static DRIVER_ATTR_RO(host_max_queue);
7347
no_uld_show(struct device_driver * ddp,char * buf)7348 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
7349 {
7350 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
7351 }
7352 static DRIVER_ATTR_RO(no_uld);
7353
scsi_level_show(struct device_driver * ddp,char * buf)7354 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
7355 {
7356 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
7357 }
7358 static DRIVER_ATTR_RO(scsi_level);
7359
virtual_gb_show(struct device_driver * ddp,char * buf)7360 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
7361 {
7362 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7363 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)7364 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7365 size_t count)
7366 {
7367 int n;
7368 bool changed;
7369
7370 /* Ignore capacity change for ZBC drives for now */
7371 if (sdeb_zbc_in_use)
7372 return -ENOTSUPP;
7373
7374 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7375 changed = (sdebug_virtual_gb != n);
7376 sdebug_virtual_gb = n;
7377 sdebug_capacity = get_sdebug_capacity();
7378 if (changed) {
7379 struct sdebug_host_info *sdhp;
7380 struct sdebug_dev_info *dp;
7381
7382 mutex_lock(&sdebug_host_list_mutex);
7383 list_for_each_entry(sdhp, &sdebug_host_list,
7384 host_list) {
7385 list_for_each_entry(dp, &sdhp->dev_info_list,
7386 dev_list) {
7387 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7388 dp->uas_bm);
7389 }
7390 }
7391 mutex_unlock(&sdebug_host_list_mutex);
7392 }
7393 return count;
7394 }
7395 return -EINVAL;
7396 }
7397 static DRIVER_ATTR_RW(virtual_gb);
7398
add_host_show(struct device_driver * ddp,char * buf)7399 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7400 {
7401 /* absolute number of hosts currently active is what is shown */
7402 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7403 }
7404
add_host_store(struct device_driver * ddp,const char * buf,size_t count)7405 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7406 size_t count)
7407 {
7408 bool found;
7409 unsigned long idx;
7410 struct sdeb_store_info *sip;
7411 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7412 int delta_hosts;
7413
7414 if (sscanf(buf, "%d", &delta_hosts) != 1)
7415 return -EINVAL;
7416 if (delta_hosts > 0) {
7417 do {
7418 found = false;
7419 if (want_phs) {
7420 xa_for_each_marked(per_store_ap, idx, sip,
7421 SDEB_XA_NOT_IN_USE) {
7422 sdeb_most_recent_idx = (int)idx;
7423 found = true;
7424 break;
7425 }
7426 if (found) /* re-use case */
7427 sdebug_add_host_helper((int)idx);
7428 else
7429 sdebug_do_add_host(true);
7430 } else {
7431 sdebug_do_add_host(false);
7432 }
7433 } while (--delta_hosts);
7434 } else if (delta_hosts < 0) {
7435 do {
7436 sdebug_do_remove_host(false);
7437 } while (++delta_hosts);
7438 }
7439 return count;
7440 }
7441 static DRIVER_ATTR_RW(add_host);
7442
vpd_use_hostno_show(struct device_driver * ddp,char * buf)7443 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7444 {
7445 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7446 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)7447 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7448 size_t count)
7449 {
7450 int n;
7451
7452 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7453 sdebug_vpd_use_hostno = n;
7454 return count;
7455 }
7456 return -EINVAL;
7457 }
7458 static DRIVER_ATTR_RW(vpd_use_hostno);
7459
statistics_show(struct device_driver * ddp,char * buf)7460 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7461 {
7462 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7463 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)7464 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7465 size_t count)
7466 {
7467 int n;
7468
7469 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7470 if (n > 0)
7471 sdebug_statistics = true;
7472 else {
7473 clear_queue_stats();
7474 sdebug_statistics = false;
7475 }
7476 return count;
7477 }
7478 return -EINVAL;
7479 }
7480 static DRIVER_ATTR_RW(statistics);
7481
sector_size_show(struct device_driver * ddp,char * buf)7482 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7483 {
7484 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7485 }
7486 static DRIVER_ATTR_RO(sector_size);
7487
submit_queues_show(struct device_driver * ddp,char * buf)7488 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7489 {
7490 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7491 }
7492 static DRIVER_ATTR_RO(submit_queues);
7493
dix_show(struct device_driver * ddp,char * buf)7494 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7495 {
7496 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7497 }
7498 static DRIVER_ATTR_RO(dix);
7499
dif_show(struct device_driver * ddp,char * buf)7500 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7501 {
7502 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7503 }
7504 static DRIVER_ATTR_RO(dif);
7505
guard_show(struct device_driver * ddp,char * buf)7506 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7507 {
7508 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7509 }
7510 static DRIVER_ATTR_RO(guard);
7511
ato_show(struct device_driver * ddp,char * buf)7512 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7513 {
7514 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7515 }
7516 static DRIVER_ATTR_RO(ato);
7517
map_show(struct device_driver * ddp,char * buf)7518 static ssize_t map_show(struct device_driver *ddp, char *buf)
7519 {
7520 ssize_t count = 0;
7521
7522 if (!scsi_debug_lbp())
7523 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7524 sdebug_store_sectors);
7525
7526 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7527 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7528
7529 if (sip)
7530 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7531 (int)map_size, sip->map_storep);
7532 }
7533 buf[count++] = '\n';
7534 buf[count] = '\0';
7535
7536 return count;
7537 }
7538 static DRIVER_ATTR_RO(map);
7539
random_show(struct device_driver * ddp,char * buf)7540 static ssize_t random_show(struct device_driver *ddp, char *buf)
7541 {
7542 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7543 }
7544
random_store(struct device_driver * ddp,const char * buf,size_t count)7545 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7546 size_t count)
7547 {
7548 bool v;
7549
7550 if (kstrtobool(buf, &v))
7551 return -EINVAL;
7552
7553 sdebug_random = v;
7554 return count;
7555 }
7556 static DRIVER_ATTR_RW(random);
7557
removable_show(struct device_driver * ddp,char * buf)7558 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7559 {
7560 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7561 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)7562 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7563 size_t count)
7564 {
7565 int n;
7566
7567 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7568 sdebug_removable = (n > 0);
7569 return count;
7570 }
7571 return -EINVAL;
7572 }
7573 static DRIVER_ATTR_RW(removable);
7574
host_lock_show(struct device_driver * ddp,char * buf)7575 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7576 {
7577 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7578 }
7579 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)7580 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7581 size_t count)
7582 {
7583 int n;
7584
7585 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7586 sdebug_host_lock = (n > 0);
7587 return count;
7588 }
7589 return -EINVAL;
7590 }
7591 static DRIVER_ATTR_RW(host_lock);
7592
strict_show(struct device_driver * ddp,char * buf)7593 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7594 {
7595 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7596 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)7597 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7598 size_t count)
7599 {
7600 int n;
7601
7602 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7603 sdebug_strict = (n > 0);
7604 return count;
7605 }
7606 return -EINVAL;
7607 }
7608 static DRIVER_ATTR_RW(strict);
7609
uuid_ctl_show(struct device_driver * ddp,char * buf)7610 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7611 {
7612 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7613 }
7614 static DRIVER_ATTR_RO(uuid_ctl);
7615
cdb_len_show(struct device_driver * ddp,char * buf)7616 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7617 {
7618 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7619 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)7620 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7621 size_t count)
7622 {
7623 int ret, n;
7624
7625 ret = kstrtoint(buf, 0, &n);
7626 if (ret)
7627 return ret;
7628 sdebug_cdb_len = n;
7629 all_config_cdb_len();
7630 return count;
7631 }
7632 static DRIVER_ATTR_RW(cdb_len);
7633
7634 static const char * const zbc_model_strs_a[] = {
7635 [BLK_ZONED_NONE] = "none",
7636 [BLK_ZONED_HA] = "host-aware",
7637 [BLK_ZONED_HM] = "host-managed",
7638 };
7639
7640 static const char * const zbc_model_strs_b[] = {
7641 [BLK_ZONED_NONE] = "no",
7642 [BLK_ZONED_HA] = "aware",
7643 [BLK_ZONED_HM] = "managed",
7644 };
7645
7646 static const char * const zbc_model_strs_c[] = {
7647 [BLK_ZONED_NONE] = "0",
7648 [BLK_ZONED_HA] = "1",
7649 [BLK_ZONED_HM] = "2",
7650 };
7651
sdeb_zbc_model_str(const char * cp)7652 static int sdeb_zbc_model_str(const char *cp)
7653 {
7654 int res = sysfs_match_string(zbc_model_strs_a, cp);
7655
7656 if (res < 0) {
7657 res = sysfs_match_string(zbc_model_strs_b, cp);
7658 if (res < 0) {
7659 res = sysfs_match_string(zbc_model_strs_c, cp);
7660 if (res < 0)
7661 return -EINVAL;
7662 }
7663 }
7664 return res;
7665 }
7666
zbc_show(struct device_driver * ddp,char * buf)7667 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7668 {
7669 return scnprintf(buf, PAGE_SIZE, "%s\n",
7670 zbc_model_strs_a[sdeb_zbc_model]);
7671 }
7672 static DRIVER_ATTR_RO(zbc);
7673
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)7674 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7675 {
7676 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7677 }
7678 static DRIVER_ATTR_RO(tur_ms_to_ready);
7679
group_number_stats_show(struct device_driver * ddp,char * buf)7680 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
7681 {
7682 char *p = buf, *end = buf + PAGE_SIZE;
7683 int i;
7684
7685 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7686 p += scnprintf(p, end - p, "%d %ld\n", i,
7687 atomic_long_read(&writes_by_group_number[i]));
7688
7689 return p - buf;
7690 }
7691
group_number_stats_store(struct device_driver * ddp,const char * buf,size_t count)7692 static ssize_t group_number_stats_store(struct device_driver *ddp,
7693 const char *buf, size_t count)
7694 {
7695 int i;
7696
7697 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7698 atomic_long_set(&writes_by_group_number[i], 0);
7699
7700 return count;
7701 }
7702 static DRIVER_ATTR_RW(group_number_stats);
7703
7704 /* Note: The following array creates attribute files in the
7705 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7706 files (over those found in the /sys/module/scsi_debug/parameters
7707 directory) is that auxiliary actions can be triggered when an attribute
7708 is changed. For example see: add_host_store() above.
7709 */
7710
7711 static struct attribute *sdebug_drv_attrs[] = {
7712 &driver_attr_delay.attr,
7713 &driver_attr_opts.attr,
7714 &driver_attr_ptype.attr,
7715 &driver_attr_dsense.attr,
7716 &driver_attr_fake_rw.attr,
7717 &driver_attr_host_max_queue.attr,
7718 &driver_attr_no_lun_0.attr,
7719 &driver_attr_num_tgts.attr,
7720 &driver_attr_dev_size_mb.attr,
7721 &driver_attr_num_parts.attr,
7722 &driver_attr_every_nth.attr,
7723 &driver_attr_lun_format.attr,
7724 &driver_attr_max_luns.attr,
7725 &driver_attr_max_queue.attr,
7726 &driver_attr_no_rwlock.attr,
7727 &driver_attr_no_uld.attr,
7728 &driver_attr_scsi_level.attr,
7729 &driver_attr_virtual_gb.attr,
7730 &driver_attr_add_host.attr,
7731 &driver_attr_per_host_store.attr,
7732 &driver_attr_vpd_use_hostno.attr,
7733 &driver_attr_sector_size.attr,
7734 &driver_attr_statistics.attr,
7735 &driver_attr_submit_queues.attr,
7736 &driver_attr_dix.attr,
7737 &driver_attr_dif.attr,
7738 &driver_attr_guard.attr,
7739 &driver_attr_ato.attr,
7740 &driver_attr_map.attr,
7741 &driver_attr_random.attr,
7742 &driver_attr_removable.attr,
7743 &driver_attr_host_lock.attr,
7744 &driver_attr_ndelay.attr,
7745 &driver_attr_strict.attr,
7746 &driver_attr_uuid_ctl.attr,
7747 &driver_attr_cdb_len.attr,
7748 &driver_attr_tur_ms_to_ready.attr,
7749 &driver_attr_zbc.attr,
7750 &driver_attr_group_number_stats.attr,
7751 NULL,
7752 };
7753 ATTRIBUTE_GROUPS(sdebug_drv);
7754
7755 static struct device *pseudo_primary;
7756
scsi_debug_init(void)7757 static int __init scsi_debug_init(void)
7758 {
7759 bool want_store = (sdebug_fake_rw == 0);
7760 unsigned long sz;
7761 int k, ret, hosts_to_add;
7762 int idx = -1;
7763
7764 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7765 pr_warn("ndelay must be less than 1 second, ignored\n");
7766 sdebug_ndelay = 0;
7767 } else if (sdebug_ndelay > 0)
7768 sdebug_jdelay = JDELAY_OVERRIDDEN;
7769
7770 switch (sdebug_sector_size) {
7771 case 512:
7772 case 1024:
7773 case 2048:
7774 case 4096:
7775 break;
7776 default:
7777 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7778 return -EINVAL;
7779 }
7780
7781 switch (sdebug_dif) {
7782 case T10_PI_TYPE0_PROTECTION:
7783 break;
7784 case T10_PI_TYPE1_PROTECTION:
7785 case T10_PI_TYPE2_PROTECTION:
7786 case T10_PI_TYPE3_PROTECTION:
7787 have_dif_prot = true;
7788 break;
7789
7790 default:
7791 pr_err("dif must be 0, 1, 2 or 3\n");
7792 return -EINVAL;
7793 }
7794
7795 if (sdebug_num_tgts < 0) {
7796 pr_err("num_tgts must be >= 0\n");
7797 return -EINVAL;
7798 }
7799
7800 if (sdebug_guard > 1) {
7801 pr_err("guard must be 0 or 1\n");
7802 return -EINVAL;
7803 }
7804
7805 if (sdebug_ato > 1) {
7806 pr_err("ato must be 0 or 1\n");
7807 return -EINVAL;
7808 }
7809
7810 if (sdebug_physblk_exp > 15) {
7811 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7812 return -EINVAL;
7813 }
7814
7815 sdebug_lun_am = sdebug_lun_am_i;
7816 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7817 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7818 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7819 }
7820
7821 if (sdebug_max_luns > 256) {
7822 if (sdebug_max_luns > 16384) {
7823 pr_warn("max_luns can be no more than 16384, use default\n");
7824 sdebug_max_luns = DEF_MAX_LUNS;
7825 }
7826 sdebug_lun_am = SAM_LUN_AM_FLAT;
7827 }
7828
7829 if (sdebug_lowest_aligned > 0x3fff) {
7830 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7831 return -EINVAL;
7832 }
7833
7834 if (submit_queues < 1) {
7835 pr_err("submit_queues must be 1 or more\n");
7836 return -EINVAL;
7837 }
7838
7839 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7840 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7841 return -EINVAL;
7842 }
7843
7844 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7845 (sdebug_host_max_queue < 0)) {
7846 pr_err("host_max_queue must be in range [0 %d]\n",
7847 SDEBUG_CANQUEUE);
7848 return -EINVAL;
7849 }
7850
7851 if (sdebug_host_max_queue &&
7852 (sdebug_max_queue != sdebug_host_max_queue)) {
7853 sdebug_max_queue = sdebug_host_max_queue;
7854 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7855 sdebug_max_queue);
7856 }
7857
7858 /*
7859 * check for host managed zoned block device specified with
7860 * ptype=0x14 or zbc=XXX.
7861 */
7862 if (sdebug_ptype == TYPE_ZBC) {
7863 sdeb_zbc_model = BLK_ZONED_HM;
7864 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7865 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7866 if (k < 0)
7867 return k;
7868 sdeb_zbc_model = k;
7869 switch (sdeb_zbc_model) {
7870 case BLK_ZONED_NONE:
7871 case BLK_ZONED_HA:
7872 sdebug_ptype = TYPE_DISK;
7873 break;
7874 case BLK_ZONED_HM:
7875 sdebug_ptype = TYPE_ZBC;
7876 break;
7877 default:
7878 pr_err("Invalid ZBC model\n");
7879 return -EINVAL;
7880 }
7881 }
7882 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7883 sdeb_zbc_in_use = true;
7884 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7885 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7886 }
7887
7888 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7889 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7890 if (sdebug_dev_size_mb < 1)
7891 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7892 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7893 sdebug_store_sectors = sz / sdebug_sector_size;
7894 sdebug_capacity = get_sdebug_capacity();
7895
7896 /* play around with geometry, don't waste too much on track 0 */
7897 sdebug_heads = 8;
7898 sdebug_sectors_per = 32;
7899 if (sdebug_dev_size_mb >= 256)
7900 sdebug_heads = 64;
7901 else if (sdebug_dev_size_mb >= 16)
7902 sdebug_heads = 32;
7903 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7904 (sdebug_sectors_per * sdebug_heads);
7905 if (sdebug_cylinders_per >= 1024) {
7906 /* other LLDs do this; implies >= 1GB ram disk ... */
7907 sdebug_heads = 255;
7908 sdebug_sectors_per = 63;
7909 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7910 (sdebug_sectors_per * sdebug_heads);
7911 }
7912 if (scsi_debug_lbp()) {
7913 sdebug_unmap_max_blocks =
7914 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7915
7916 sdebug_unmap_max_desc =
7917 clamp(sdebug_unmap_max_desc, 0U, 256U);
7918
7919 sdebug_unmap_granularity =
7920 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7921
7922 if (sdebug_unmap_alignment &&
7923 sdebug_unmap_granularity <=
7924 sdebug_unmap_alignment) {
7925 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7926 return -EINVAL;
7927 }
7928 }
7929
7930 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7931 if (want_store) {
7932 idx = sdebug_add_store();
7933 if (idx < 0)
7934 return idx;
7935 }
7936
7937 pseudo_primary = root_device_register("pseudo_0");
7938 if (IS_ERR(pseudo_primary)) {
7939 pr_warn("root_device_register() error\n");
7940 ret = PTR_ERR(pseudo_primary);
7941 goto free_vm;
7942 }
7943 ret = bus_register(&pseudo_lld_bus);
7944 if (ret < 0) {
7945 pr_warn("bus_register error: %d\n", ret);
7946 goto dev_unreg;
7947 }
7948 ret = driver_register(&sdebug_driverfs_driver);
7949 if (ret < 0) {
7950 pr_warn("driver_register error: %d\n", ret);
7951 goto bus_unreg;
7952 }
7953
7954 hosts_to_add = sdebug_add_host;
7955 sdebug_add_host = 0;
7956
7957 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7958 if (!queued_cmd_cache) {
7959 ret = -ENOMEM;
7960 goto driver_unreg;
7961 }
7962
7963 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7964 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7965 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7966
7967 for (k = 0; k < hosts_to_add; k++) {
7968 if (want_store && k == 0) {
7969 ret = sdebug_add_host_helper(idx);
7970 if (ret < 0) {
7971 pr_err("add_host_helper k=%d, error=%d\n",
7972 k, -ret);
7973 break;
7974 }
7975 } else {
7976 ret = sdebug_do_add_host(want_store &&
7977 sdebug_per_host_store);
7978 if (ret < 0) {
7979 pr_err("add_host k=%d error=%d\n", k, -ret);
7980 break;
7981 }
7982 }
7983 }
7984 if (sdebug_verbose)
7985 pr_info("built %d host(s)\n", sdebug_num_hosts);
7986
7987 return 0;
7988
7989 driver_unreg:
7990 driver_unregister(&sdebug_driverfs_driver);
7991 bus_unreg:
7992 bus_unregister(&pseudo_lld_bus);
7993 dev_unreg:
7994 root_device_unregister(pseudo_primary);
7995 free_vm:
7996 sdebug_erase_store(idx, NULL);
7997 return ret;
7998 }
7999
scsi_debug_exit(void)8000 static void __exit scsi_debug_exit(void)
8001 {
8002 int k = sdebug_num_hosts;
8003
8004 for (; k; k--)
8005 sdebug_do_remove_host(true);
8006 kmem_cache_destroy(queued_cmd_cache);
8007 driver_unregister(&sdebug_driverfs_driver);
8008 bus_unregister(&pseudo_lld_bus);
8009 root_device_unregister(pseudo_primary);
8010
8011 sdebug_erase_all_stores(false);
8012 xa_destroy(per_store_ap);
8013 debugfs_remove(sdebug_debugfs_root);
8014 }
8015
8016 device_initcall(scsi_debug_init);
8017 module_exit(scsi_debug_exit);
8018
sdebug_release_adapter(struct device * dev)8019 static void sdebug_release_adapter(struct device *dev)
8020 {
8021 struct sdebug_host_info *sdbg_host;
8022
8023 sdbg_host = dev_to_sdebug_host(dev);
8024 kfree(sdbg_host);
8025 }
8026
8027 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)8028 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8029 {
8030 if (idx < 0)
8031 return;
8032 if (!sip) {
8033 if (xa_empty(per_store_ap))
8034 return;
8035 sip = xa_load(per_store_ap, idx);
8036 if (!sip)
8037 return;
8038 }
8039 vfree(sip->map_storep);
8040 vfree(sip->dif_storep);
8041 vfree(sip->storep);
8042 xa_erase(per_store_ap, idx);
8043 kfree(sip);
8044 }
8045
8046 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)8047 static void sdebug_erase_all_stores(bool apart_from_first)
8048 {
8049 unsigned long idx;
8050 struct sdeb_store_info *sip = NULL;
8051
8052 xa_for_each(per_store_ap, idx, sip) {
8053 if (apart_from_first)
8054 apart_from_first = false;
8055 else
8056 sdebug_erase_store(idx, sip);
8057 }
8058 if (apart_from_first)
8059 sdeb_most_recent_idx = sdeb_first_idx;
8060 }
8061
8062 /*
8063 * Returns store xarray new element index (idx) if >=0 else negated errno.
8064 * Limit the number of stores to 65536.
8065 */
sdebug_add_store(void)8066 static int sdebug_add_store(void)
8067 {
8068 int res;
8069 u32 n_idx;
8070 unsigned long iflags;
8071 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8072 struct sdeb_store_info *sip = NULL;
8073 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8074
8075 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8076 if (!sip)
8077 return -ENOMEM;
8078
8079 xa_lock_irqsave(per_store_ap, iflags);
8080 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8081 if (unlikely(res < 0)) {
8082 xa_unlock_irqrestore(per_store_ap, iflags);
8083 kfree(sip);
8084 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8085 return res;
8086 }
8087 sdeb_most_recent_idx = n_idx;
8088 if (sdeb_first_idx < 0)
8089 sdeb_first_idx = n_idx;
8090 xa_unlock_irqrestore(per_store_ap, iflags);
8091
8092 res = -ENOMEM;
8093 sip->storep = vzalloc(sz);
8094 if (!sip->storep) {
8095 pr_err("user data oom\n");
8096 goto err;
8097 }
8098 if (sdebug_num_parts > 0)
8099 sdebug_build_parts(sip->storep, sz);
8100
8101 /* DIF/DIX: what T10 calls Protection Information (PI) */
8102 if (sdebug_dix) {
8103 int dif_size;
8104
8105 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8106 sip->dif_storep = vmalloc(dif_size);
8107
8108 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
8109 sip->dif_storep);
8110
8111 if (!sip->dif_storep) {
8112 pr_err("DIX oom\n");
8113 goto err;
8114 }
8115 memset(sip->dif_storep, 0xff, dif_size);
8116 }
8117 /* Logical Block Provisioning */
8118 if (scsi_debug_lbp()) {
8119 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8120 sip->map_storep = vmalloc(array_size(sizeof(long),
8121 BITS_TO_LONGS(map_size)));
8122
8123 pr_info("%lu provisioning blocks\n", map_size);
8124
8125 if (!sip->map_storep) {
8126 pr_err("LBP map oom\n");
8127 goto err;
8128 }
8129
8130 bitmap_zero(sip->map_storep, map_size);
8131
8132 /* Map first 1KB for partition table */
8133 if (sdebug_num_parts)
8134 map_region(sip, 0, 2);
8135 }
8136
8137 rwlock_init(&sip->macc_data_lck);
8138 rwlock_init(&sip->macc_meta_lck);
8139 rwlock_init(&sip->macc_sector_lck);
8140 return (int)n_idx;
8141 err:
8142 sdebug_erase_store((int)n_idx, sip);
8143 pr_warn("%s: failed, errno=%d\n", __func__, -res);
8144 return res;
8145 }
8146
sdebug_add_host_helper(int per_host_idx)8147 static int sdebug_add_host_helper(int per_host_idx)
8148 {
8149 int k, devs_per_host, idx;
8150 int error = -ENOMEM;
8151 struct sdebug_host_info *sdbg_host;
8152 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8153
8154 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8155 if (!sdbg_host)
8156 return -ENOMEM;
8157 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8158 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8159 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8160 sdbg_host->si_idx = idx;
8161
8162 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8163
8164 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8165 for (k = 0; k < devs_per_host; k++) {
8166 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8167 if (!sdbg_devinfo)
8168 goto clean;
8169 }
8170
8171 mutex_lock(&sdebug_host_list_mutex);
8172 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8173 mutex_unlock(&sdebug_host_list_mutex);
8174
8175 sdbg_host->dev.bus = &pseudo_lld_bus;
8176 sdbg_host->dev.parent = pseudo_primary;
8177 sdbg_host->dev.release = &sdebug_release_adapter;
8178 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8179
8180 error = device_register(&sdbg_host->dev);
8181 if (error) {
8182 mutex_lock(&sdebug_host_list_mutex);
8183 list_del(&sdbg_host->host_list);
8184 mutex_unlock(&sdebug_host_list_mutex);
8185 goto clean;
8186 }
8187
8188 ++sdebug_num_hosts;
8189 return 0;
8190
8191 clean:
8192 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8193 dev_list) {
8194 list_del(&sdbg_devinfo->dev_list);
8195 kfree(sdbg_devinfo->zstate);
8196 kfree(sdbg_devinfo);
8197 }
8198 if (sdbg_host->dev.release)
8199 put_device(&sdbg_host->dev);
8200 else
8201 kfree(sdbg_host);
8202 pr_warn("%s: failed, errno=%d\n", __func__, -error);
8203 return error;
8204 }
8205
sdebug_do_add_host(bool mk_new_store)8206 static int sdebug_do_add_host(bool mk_new_store)
8207 {
8208 int ph_idx = sdeb_most_recent_idx;
8209
8210 if (mk_new_store) {
8211 ph_idx = sdebug_add_store();
8212 if (ph_idx < 0)
8213 return ph_idx;
8214 }
8215 return sdebug_add_host_helper(ph_idx);
8216 }
8217
sdebug_do_remove_host(bool the_end)8218 static void sdebug_do_remove_host(bool the_end)
8219 {
8220 int idx = -1;
8221 struct sdebug_host_info *sdbg_host = NULL;
8222 struct sdebug_host_info *sdbg_host2;
8223
8224 mutex_lock(&sdebug_host_list_mutex);
8225 if (!list_empty(&sdebug_host_list)) {
8226 sdbg_host = list_entry(sdebug_host_list.prev,
8227 struct sdebug_host_info, host_list);
8228 idx = sdbg_host->si_idx;
8229 }
8230 if (!the_end && idx >= 0) {
8231 bool unique = true;
8232
8233 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8234 if (sdbg_host2 == sdbg_host)
8235 continue;
8236 if (idx == sdbg_host2->si_idx) {
8237 unique = false;
8238 break;
8239 }
8240 }
8241 if (unique) {
8242 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8243 if (idx == sdeb_most_recent_idx)
8244 --sdeb_most_recent_idx;
8245 }
8246 }
8247 if (sdbg_host)
8248 list_del(&sdbg_host->host_list);
8249 mutex_unlock(&sdebug_host_list_mutex);
8250
8251 if (!sdbg_host)
8252 return;
8253
8254 device_unregister(&sdbg_host->dev);
8255 --sdebug_num_hosts;
8256 }
8257
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)8258 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8259 {
8260 struct sdebug_dev_info *devip = sdev->hostdata;
8261
8262 if (!devip)
8263 return -ENODEV;
8264
8265 mutex_lock(&sdebug_host_list_mutex);
8266 block_unblock_all_queues(true);
8267
8268 if (qdepth > SDEBUG_CANQUEUE) {
8269 qdepth = SDEBUG_CANQUEUE;
8270 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8271 qdepth, SDEBUG_CANQUEUE);
8272 }
8273 if (qdepth < 1)
8274 qdepth = 1;
8275 if (qdepth != sdev->queue_depth)
8276 scsi_change_queue_depth(sdev, qdepth);
8277
8278 block_unblock_all_queues(false);
8279 mutex_unlock(&sdebug_host_list_mutex);
8280
8281 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8282 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8283
8284 return sdev->queue_depth;
8285 }
8286
fake_timeout(struct scsi_cmnd * scp)8287 static bool fake_timeout(struct scsi_cmnd *scp)
8288 {
8289 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8290 if (sdebug_every_nth < -1)
8291 sdebug_every_nth = -1;
8292 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8293 return true; /* ignore command causing timeout */
8294 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8295 scsi_medium_access_command(scp))
8296 return true; /* time out reads and writes */
8297 }
8298 return false;
8299 }
8300
8301 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)8302 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8303 {
8304 int stopped_state;
8305 u64 diff_ns = 0;
8306 ktime_t now_ts = ktime_get_boottime();
8307 struct scsi_device *sdp = scp->device;
8308
8309 stopped_state = atomic_read(&devip->stopped);
8310 if (stopped_state == 2) {
8311 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8312 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8313 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8314 /* tur_ms_to_ready timer extinguished */
8315 atomic_set(&devip->stopped, 0);
8316 return 0;
8317 }
8318 }
8319 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
8320 if (sdebug_verbose)
8321 sdev_printk(KERN_INFO, sdp,
8322 "%s: Not ready: in process of becoming ready\n", my_name);
8323 if (scp->cmnd[0] == TEST_UNIT_READY) {
8324 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
8325
8326 if (diff_ns <= tur_nanosecs_to_ready)
8327 diff_ns = tur_nanosecs_to_ready - diff_ns;
8328 else
8329 diff_ns = tur_nanosecs_to_ready;
8330 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
8331 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
8332 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
8333 diff_ns);
8334 return check_condition_result;
8335 }
8336 }
8337 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
8338 if (sdebug_verbose)
8339 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
8340 my_name);
8341 return check_condition_result;
8342 }
8343
sdebug_map_queues(struct Scsi_Host * shost)8344 static void sdebug_map_queues(struct Scsi_Host *shost)
8345 {
8346 int i, qoff;
8347
8348 if (shost->nr_hw_queues == 1)
8349 return;
8350
8351 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
8352 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
8353
8354 map->nr_queues = 0;
8355
8356 if (i == HCTX_TYPE_DEFAULT)
8357 map->nr_queues = submit_queues - poll_queues;
8358 else if (i == HCTX_TYPE_POLL)
8359 map->nr_queues = poll_queues;
8360
8361 if (!map->nr_queues) {
8362 BUG_ON(i == HCTX_TYPE_DEFAULT);
8363 continue;
8364 }
8365
8366 map->queue_offset = qoff;
8367 blk_mq_map_queues(map);
8368
8369 qoff += map->nr_queues;
8370 }
8371 }
8372
8373 struct sdebug_blk_mq_poll_data {
8374 unsigned int queue_num;
8375 int *num_entries;
8376 };
8377
8378 /*
8379 * We don't handle aborted commands here, but it does not seem possible to have
8380 * aborted polled commands from schedule_resp()
8381 */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)8382 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
8383 {
8384 struct sdebug_blk_mq_poll_data *data = opaque;
8385 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
8386 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8387 struct sdebug_defer *sd_dp;
8388 u32 unique_tag = blk_mq_unique_tag(rq);
8389 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
8390 struct sdebug_queued_cmd *sqcp;
8391 unsigned long flags;
8392 int queue_num = data->queue_num;
8393 ktime_t time;
8394
8395 /* We're only interested in one queue for this iteration */
8396 if (hwq != queue_num)
8397 return true;
8398
8399 /* Subsequent checks would fail if this failed, but check anyway */
8400 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
8401 return true;
8402
8403 time = ktime_get_boottime();
8404
8405 spin_lock_irqsave(&sdsc->lock, flags);
8406 sqcp = TO_QUEUED_CMD(cmd);
8407 if (!sqcp) {
8408 spin_unlock_irqrestore(&sdsc->lock, flags);
8409 return true;
8410 }
8411
8412 sd_dp = &sqcp->sd_dp;
8413 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8414 spin_unlock_irqrestore(&sdsc->lock, flags);
8415 return true;
8416 }
8417
8418 if (time < sd_dp->cmpl_ts) {
8419 spin_unlock_irqrestore(&sdsc->lock, flags);
8420 return true;
8421 }
8422
8423 ASSIGN_QUEUED_CMD(cmd, NULL);
8424 spin_unlock_irqrestore(&sdsc->lock, flags);
8425
8426 if (sdebug_statistics) {
8427 atomic_inc(&sdebug_completions);
8428 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8429 atomic_inc(&sdebug_miss_cpus);
8430 }
8431
8432 sdebug_free_queued_cmd(sqcp);
8433
8434 scsi_done(cmd); /* callback to mid level */
8435 (*data->num_entries)++;
8436 return true;
8437 }
8438
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)8439 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8440 {
8441 int num_entries = 0;
8442 struct sdebug_blk_mq_poll_data data = {
8443 .queue_num = queue_num,
8444 .num_entries = &num_entries,
8445 };
8446
8447 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8448 &data);
8449
8450 if (num_entries > 0)
8451 atomic_add(num_entries, &sdeb_mq_poll_count);
8452 return num_entries;
8453 }
8454
sdebug_timeout_cmd(struct scsi_cmnd * cmnd)8455 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8456 {
8457 struct scsi_device *sdp = cmnd->device;
8458 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8459 struct sdebug_err_inject *err;
8460 unsigned char *cmd = cmnd->cmnd;
8461 int ret = 0;
8462
8463 if (devip == NULL)
8464 return 0;
8465
8466 rcu_read_lock();
8467 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8468 if (err->type == ERR_TMOUT_CMD &&
8469 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8470 ret = !!err->cnt;
8471 if (err->cnt < 0)
8472 err->cnt++;
8473
8474 rcu_read_unlock();
8475 return ret;
8476 }
8477 }
8478 rcu_read_unlock();
8479
8480 return 0;
8481 }
8482
sdebug_fail_queue_cmd(struct scsi_cmnd * cmnd)8483 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8484 {
8485 struct scsi_device *sdp = cmnd->device;
8486 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8487 struct sdebug_err_inject *err;
8488 unsigned char *cmd = cmnd->cmnd;
8489 int ret = 0;
8490
8491 if (devip == NULL)
8492 return 0;
8493
8494 rcu_read_lock();
8495 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8496 if (err->type == ERR_FAIL_QUEUE_CMD &&
8497 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8498 ret = err->cnt ? err->queuecmd_ret : 0;
8499 if (err->cnt < 0)
8500 err->cnt++;
8501
8502 rcu_read_unlock();
8503 return ret;
8504 }
8505 }
8506 rcu_read_unlock();
8507
8508 return 0;
8509 }
8510
sdebug_fail_cmd(struct scsi_cmnd * cmnd,int * retval,struct sdebug_err_inject * info)8511 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8512 struct sdebug_err_inject *info)
8513 {
8514 struct scsi_device *sdp = cmnd->device;
8515 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8516 struct sdebug_err_inject *err;
8517 unsigned char *cmd = cmnd->cmnd;
8518 int ret = 0;
8519 int result;
8520
8521 if (devip == NULL)
8522 return 0;
8523
8524 rcu_read_lock();
8525 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8526 if (err->type == ERR_FAIL_CMD &&
8527 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8528 if (!err->cnt) {
8529 rcu_read_unlock();
8530 return 0;
8531 }
8532
8533 ret = !!err->cnt;
8534 rcu_read_unlock();
8535 goto out_handle;
8536 }
8537 }
8538 rcu_read_unlock();
8539
8540 return 0;
8541
8542 out_handle:
8543 if (err->cnt < 0)
8544 err->cnt++;
8545 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8546 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8547 *info = *err;
8548 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8549
8550 return ret;
8551 }
8552
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)8553 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8554 struct scsi_cmnd *scp)
8555 {
8556 u8 sdeb_i;
8557 struct scsi_device *sdp = scp->device;
8558 const struct opcode_info_t *oip;
8559 const struct opcode_info_t *r_oip;
8560 struct sdebug_dev_info *devip;
8561 u8 *cmd = scp->cmnd;
8562 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8563 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8564 int k, na;
8565 int errsts = 0;
8566 u64 lun_index = sdp->lun & 0x3FFF;
8567 u32 flags;
8568 u16 sa;
8569 u8 opcode = cmd[0];
8570 bool has_wlun_rl;
8571 bool inject_now;
8572 int ret = 0;
8573 struct sdebug_err_inject err;
8574
8575 scsi_set_resid(scp, 0);
8576 if (sdebug_statistics) {
8577 atomic_inc(&sdebug_cmnd_count);
8578 inject_now = inject_on_this_cmd();
8579 } else {
8580 inject_now = false;
8581 }
8582 if (unlikely(sdebug_verbose &&
8583 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8584 char b[120];
8585 int n, len, sb;
8586
8587 len = scp->cmd_len;
8588 sb = (int)sizeof(b);
8589 if (len > 32)
8590 strcpy(b, "too long, over 32 bytes");
8591 else {
8592 for (k = 0, n = 0; k < len && n < sb; ++k)
8593 n += scnprintf(b + n, sb - n, "%02x ",
8594 (u32)cmd[k]);
8595 }
8596 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8597 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8598 }
8599 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8600 return SCSI_MLQUEUE_HOST_BUSY;
8601 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8602 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8603 goto err_out;
8604
8605 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
8606 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
8607 devip = (struct sdebug_dev_info *)sdp->hostdata;
8608 if (unlikely(!devip)) {
8609 devip = find_build_dev_info(sdp);
8610 if (NULL == devip)
8611 goto err_out;
8612 }
8613
8614 if (sdebug_timeout_cmd(scp)) {
8615 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8616 return 0;
8617 }
8618
8619 ret = sdebug_fail_queue_cmd(scp);
8620 if (ret) {
8621 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8622 opcode, ret);
8623 return ret;
8624 }
8625
8626 if (sdebug_fail_cmd(scp, &ret, &err)) {
8627 scmd_printk(KERN_INFO, scp,
8628 "fail command 0x%x with hostbyte=0x%x, "
8629 "driverbyte=0x%x, statusbyte=0x%x, "
8630 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8631 opcode, err.host_byte, err.driver_byte,
8632 err.status_byte, err.sense_key, err.asc, err.asq);
8633 return ret;
8634 }
8635
8636 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8637 atomic_set(&sdeb_inject_pending, 1);
8638
8639 na = oip->num_attached;
8640 r_pfp = oip->pfp;
8641 if (na) { /* multiple commands with this opcode */
8642 r_oip = oip;
8643 if (FF_SA & r_oip->flags) {
8644 if (F_SA_LOW & oip->flags)
8645 sa = 0x1f & cmd[1];
8646 else
8647 sa = get_unaligned_be16(cmd + 8);
8648 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8649 if (opcode == oip->opcode && sa == oip->sa)
8650 break;
8651 }
8652 } else { /* since no service action only check opcode */
8653 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8654 if (opcode == oip->opcode)
8655 break;
8656 }
8657 }
8658 if (k > na) {
8659 if (F_SA_LOW & r_oip->flags)
8660 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8661 else if (F_SA_HIGH & r_oip->flags)
8662 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8663 else
8664 mk_sense_invalid_opcode(scp);
8665 goto check_cond;
8666 }
8667 } /* else (when na==0) we assume the oip is a match */
8668 flags = oip->flags;
8669 if (unlikely(F_INV_OP & flags)) {
8670 mk_sense_invalid_opcode(scp);
8671 goto check_cond;
8672 }
8673 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8674 if (sdebug_verbose)
8675 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8676 my_name, opcode, " supported for wlun");
8677 mk_sense_invalid_opcode(scp);
8678 goto check_cond;
8679 }
8680 if (unlikely(sdebug_strict)) { /* check cdb against mask */
8681 u8 rem;
8682 int j;
8683
8684 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8685 rem = ~oip->len_mask[k] & cmd[k];
8686 if (rem) {
8687 for (j = 7; j >= 0; --j, rem <<= 1) {
8688 if (0x80 & rem)
8689 break;
8690 }
8691 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8692 goto check_cond;
8693 }
8694 }
8695 }
8696 if (unlikely(!(F_SKIP_UA & flags) &&
8697 find_first_bit(devip->uas_bm,
8698 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8699 errsts = make_ua(scp, devip);
8700 if (errsts)
8701 goto check_cond;
8702 }
8703 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8704 atomic_read(&devip->stopped))) {
8705 errsts = resp_not_ready(scp, devip);
8706 if (errsts)
8707 goto fini;
8708 }
8709 if (sdebug_fake_rw && (F_FAKE_RW & flags))
8710 goto fini;
8711 if (unlikely(sdebug_every_nth)) {
8712 if (fake_timeout(scp))
8713 return 0; /* ignore command: make trouble */
8714 }
8715 if (likely(oip->pfp))
8716 pfp = oip->pfp; /* calls a resp_* function */
8717 else
8718 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
8719
8720 fini:
8721 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
8722 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8723 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8724 sdebug_ndelay > 10000)) {
8725 /*
8726 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8727 * for Start Stop Unit (SSU) want at least 1 second delay and
8728 * if sdebug_jdelay>1 want a long delay of that many seconds.
8729 * For Synchronize Cache want 1/20 of SSU's delay.
8730 */
8731 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8732 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8733
8734 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8735 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8736 } else
8737 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8738 sdebug_ndelay);
8739 check_cond:
8740 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8741 err_out:
8742 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8743 }
8744
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)8745 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8746 {
8747 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8748
8749 spin_lock_init(&sdsc->lock);
8750
8751 return 0;
8752 }
8753
8754 static struct scsi_host_template sdebug_driver_template = {
8755 .show_info = scsi_debug_show_info,
8756 .write_info = scsi_debug_write_info,
8757 .proc_name = sdebug_proc_name,
8758 .name = "SCSI DEBUG",
8759 .info = scsi_debug_info,
8760 .slave_alloc = scsi_debug_slave_alloc,
8761 .slave_configure = scsi_debug_slave_configure,
8762 .slave_destroy = scsi_debug_slave_destroy,
8763 .ioctl = scsi_debug_ioctl,
8764 .queuecommand = scsi_debug_queuecommand,
8765 .change_queue_depth = sdebug_change_qdepth,
8766 .map_queues = sdebug_map_queues,
8767 .mq_poll = sdebug_blk_mq_poll,
8768 .eh_abort_handler = scsi_debug_abort,
8769 .eh_device_reset_handler = scsi_debug_device_reset,
8770 .eh_target_reset_handler = scsi_debug_target_reset,
8771 .eh_bus_reset_handler = scsi_debug_bus_reset,
8772 .eh_host_reset_handler = scsi_debug_host_reset,
8773 .can_queue = SDEBUG_CANQUEUE,
8774 .this_id = 7,
8775 .sg_tablesize = SG_MAX_SEGMENTS,
8776 .cmd_per_lun = DEF_CMD_PER_LUN,
8777 .max_sectors = -1U,
8778 .max_segment_size = -1U,
8779 .module = THIS_MODULE,
8780 .track_queue_depth = 1,
8781 .cmd_size = sizeof(struct sdebug_scsi_cmd),
8782 .init_cmd_priv = sdebug_init_cmd_priv,
8783 .target_alloc = sdebug_target_alloc,
8784 .target_destroy = sdebug_target_destroy,
8785 };
8786
sdebug_driver_probe(struct device * dev)8787 static int sdebug_driver_probe(struct device *dev)
8788 {
8789 int error = 0;
8790 struct sdebug_host_info *sdbg_host;
8791 struct Scsi_Host *hpnt;
8792 int hprot;
8793
8794 sdbg_host = dev_to_sdebug_host(dev);
8795
8796 sdebug_driver_template.can_queue = sdebug_max_queue;
8797 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8798 if (!sdebug_clustering)
8799 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8800
8801 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8802 if (NULL == hpnt) {
8803 pr_err("scsi_host_alloc failed\n");
8804 error = -ENODEV;
8805 return error;
8806 }
8807 if (submit_queues > nr_cpu_ids) {
8808 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8809 my_name, submit_queues, nr_cpu_ids);
8810 submit_queues = nr_cpu_ids;
8811 }
8812 /*
8813 * Decide whether to tell scsi subsystem that we want mq. The
8814 * following should give the same answer for each host.
8815 */
8816 hpnt->nr_hw_queues = submit_queues;
8817 if (sdebug_host_max_queue)
8818 hpnt->host_tagset = 1;
8819
8820 /* poll queues are possible for nr_hw_queues > 1 */
8821 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8822 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8823 my_name, poll_queues, hpnt->nr_hw_queues);
8824 poll_queues = 0;
8825 }
8826
8827 /*
8828 * Poll queues don't need interrupts, but we need at least one I/O queue
8829 * left over for non-polled I/O.
8830 * If condition not met, trim poll_queues to 1 (just for simplicity).
8831 */
8832 if (poll_queues >= submit_queues) {
8833 if (submit_queues < 3)
8834 pr_warn("%s: trim poll_queues to 1\n", my_name);
8835 else
8836 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8837 my_name, submit_queues - 1);
8838 poll_queues = 1;
8839 }
8840 if (poll_queues)
8841 hpnt->nr_maps = 3;
8842
8843 sdbg_host->shost = hpnt;
8844 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8845 hpnt->max_id = sdebug_num_tgts + 1;
8846 else
8847 hpnt->max_id = sdebug_num_tgts;
8848 /* = sdebug_max_luns; */
8849 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8850
8851 hprot = 0;
8852
8853 switch (sdebug_dif) {
8854
8855 case T10_PI_TYPE1_PROTECTION:
8856 hprot = SHOST_DIF_TYPE1_PROTECTION;
8857 if (sdebug_dix)
8858 hprot |= SHOST_DIX_TYPE1_PROTECTION;
8859 break;
8860
8861 case T10_PI_TYPE2_PROTECTION:
8862 hprot = SHOST_DIF_TYPE2_PROTECTION;
8863 if (sdebug_dix)
8864 hprot |= SHOST_DIX_TYPE2_PROTECTION;
8865 break;
8866
8867 case T10_PI_TYPE3_PROTECTION:
8868 hprot = SHOST_DIF_TYPE3_PROTECTION;
8869 if (sdebug_dix)
8870 hprot |= SHOST_DIX_TYPE3_PROTECTION;
8871 break;
8872
8873 default:
8874 if (sdebug_dix)
8875 hprot |= SHOST_DIX_TYPE0_PROTECTION;
8876 break;
8877 }
8878
8879 scsi_host_set_prot(hpnt, hprot);
8880
8881 if (have_dif_prot || sdebug_dix)
8882 pr_info("host protection%s%s%s%s%s%s%s\n",
8883 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8884 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8885 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8886 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8887 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8888 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8889 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8890
8891 if (sdebug_guard == 1)
8892 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8893 else
8894 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8895
8896 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8897 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8898 if (sdebug_every_nth) /* need stats counters for every_nth */
8899 sdebug_statistics = true;
8900 error = scsi_add_host(hpnt, &sdbg_host->dev);
8901 if (error) {
8902 pr_err("scsi_add_host failed\n");
8903 error = -ENODEV;
8904 scsi_host_put(hpnt);
8905 } else {
8906 scsi_scan_host(hpnt);
8907 }
8908
8909 return error;
8910 }
8911
sdebug_driver_remove(struct device * dev)8912 static void sdebug_driver_remove(struct device *dev)
8913 {
8914 struct sdebug_host_info *sdbg_host;
8915 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8916
8917 sdbg_host = dev_to_sdebug_host(dev);
8918
8919 scsi_remove_host(sdbg_host->shost);
8920
8921 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8922 dev_list) {
8923 list_del(&sdbg_devinfo->dev_list);
8924 kfree(sdbg_devinfo->zstate);
8925 kfree(sdbg_devinfo);
8926 }
8927
8928 scsi_host_put(sdbg_host->shost);
8929 }
8930
8931 static const struct bus_type pseudo_lld_bus = {
8932 .name = "pseudo",
8933 .probe = sdebug_driver_probe,
8934 .remove = sdebug_driver_remove,
8935 .drv_groups = sdebug_drv_groups,
8936 };
8937