• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 static struct kmem_cache *queued_cmd_cache;
254 
255 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
257 
258 /* Zone types (zbcr05 table 25) */
259 enum sdebug_z_type {
260 	ZBC_ZTYPE_CNV	= 0x1,
261 	ZBC_ZTYPE_SWR	= 0x2,
262 	ZBC_ZTYPE_SWP	= 0x3,
263 	/* ZBC_ZTYPE_SOBR = 0x4, */
264 	ZBC_ZTYPE_GAP	= 0x5,
265 };
266 
267 /* enumeration names taken from table 26, zbcr05 */
268 enum sdebug_z_cond {
269 	ZBC_NOT_WRITE_POINTER	= 0x0,
270 	ZC1_EMPTY		= 0x1,
271 	ZC2_IMPLICIT_OPEN	= 0x2,
272 	ZC3_EXPLICIT_OPEN	= 0x3,
273 	ZC4_CLOSED		= 0x4,
274 	ZC6_READ_ONLY		= 0xd,
275 	ZC5_FULL		= 0xe,
276 	ZC7_OFFLINE		= 0xf,
277 };
278 
279 struct sdeb_zone_state {	/* ZBC: per zone state */
280 	enum sdebug_z_type z_type;
281 	enum sdebug_z_cond z_cond;
282 	bool z_non_seq_resource;
283 	unsigned int z_size;
284 	sector_t z_start;
285 	sector_t z_wp;
286 };
287 
288 struct sdebug_dev_info {
289 	struct list_head dev_list;
290 	unsigned int channel;
291 	unsigned int target;
292 	u64 lun;
293 	uuid_t lu_name;
294 	struct sdebug_host_info *sdbg_host;
295 	unsigned long uas_bm[1];
296 	atomic_t stopped;	/* 1: by SSU, 2: device start */
297 	bool used;
298 
299 	/* For ZBC devices */
300 	enum blk_zoned_model zmodel;
301 	unsigned int zcap;
302 	unsigned int zsize;
303 	unsigned int nr_zones;
304 	unsigned int nr_conv_zones;
305 	unsigned int nr_seq_zones;
306 	unsigned int nr_imp_open;
307 	unsigned int nr_exp_open;
308 	unsigned int nr_closed;
309 	unsigned int max_open;
310 	ktime_t create_ts;	/* time since bootup that this device was created */
311 	struct sdeb_zone_state *zstate;
312 };
313 
314 struct sdebug_host_info {
315 	struct list_head host_list;
316 	int si_idx;	/* sdeb_store_info (per host) xarray index */
317 	struct Scsi_Host *shost;
318 	struct device dev;
319 	struct list_head dev_info_list;
320 };
321 
322 /* There is an xarray of pointers to this struct's objects, one per host */
323 struct sdeb_store_info {
324 	rwlock_t macc_lck;	/* for atomic media access on this store */
325 	u8 *storep;		/* user data storage (ram) */
326 	struct t10_pi_tuple *dif_storep; /* protection info */
327 	void *map_storep;	/* provisioning map */
328 };
329 
330 #define dev_to_sdebug_host(d)	\
331 	container_of(d, struct sdebug_host_info, dev)
332 
333 #define shost_to_sdebug_host(shost)	\
334 	dev_to_sdebug_host(shost->dma_dev)
335 
336 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
337 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
338 
339 struct sdebug_defer {
340 	struct hrtimer hrt;
341 	struct execute_work ew;
342 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
343 	int issuing_cpu;
344 	bool aborted;	/* true when blk_abort_request() already called */
345 	enum sdeb_defer_type defer_t;
346 };
347 
348 struct sdebug_queued_cmd {
349 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 	 * instance indicates this slot is in use.
351 	 */
352 	struct sdebug_defer sd_dp;
353 	struct scsi_cmnd *scmd;
354 };
355 
356 struct sdebug_scsi_cmd {
357 	spinlock_t   lock;
358 };
359 
360 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
361 static atomic_t sdebug_completions;  /* count of deferred completions */
362 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
363 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
364 static atomic_t sdeb_inject_pending;
365 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
366 
367 struct opcode_info_t {
368 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
369 				/* for terminating element */
370 	u8 opcode;		/* if num_attached > 0, preferred */
371 	u16 sa;			/* service action */
372 	u32 flags;		/* OR-ed set of SDEB_F_* */
373 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
374 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
375 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
376 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
377 };
378 
379 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
380 enum sdeb_opcode_index {
381 	SDEB_I_INVALID_OPCODE =	0,
382 	SDEB_I_INQUIRY = 1,
383 	SDEB_I_REPORT_LUNS = 2,
384 	SDEB_I_REQUEST_SENSE = 3,
385 	SDEB_I_TEST_UNIT_READY = 4,
386 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
387 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
388 	SDEB_I_LOG_SENSE = 7,
389 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
390 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
391 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
392 	SDEB_I_START_STOP = 11,
393 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
394 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
395 	SDEB_I_MAINT_IN = 14,
396 	SDEB_I_MAINT_OUT = 15,
397 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
398 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
399 	SDEB_I_RESERVE = 18,		/* 6, 10 */
400 	SDEB_I_RELEASE = 19,		/* 6, 10 */
401 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
402 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
403 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
404 	SDEB_I_SEND_DIAG = 23,
405 	SDEB_I_UNMAP = 24,
406 	SDEB_I_WRITE_BUFFER = 25,
407 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
408 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
409 	SDEB_I_COMP_WRITE = 28,
410 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
411 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
412 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
413 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
414 };
415 
416 
417 static const unsigned char opcode_ind_arr[256] = {
418 /* 0x0; 0x0->0x1f: 6 byte cdbs */
419 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
420 	    0, 0, 0, 0,
421 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
422 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
423 	    SDEB_I_RELEASE,
424 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
425 	    SDEB_I_ALLOW_REMOVAL, 0,
426 /* 0x20; 0x20->0x3f: 10 byte cdbs */
427 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
428 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
429 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
430 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
431 /* 0x40; 0x40->0x5f: 10 byte cdbs */
432 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
433 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
434 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
435 	    SDEB_I_RELEASE,
436 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
437 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
438 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
439 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440 	0, SDEB_I_VARIABLE_LEN,
441 /* 0x80; 0x80->0x9f: 16 byte cdbs */
442 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
443 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
444 	0, 0, 0, SDEB_I_VERIFY,
445 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
446 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
447 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
448 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
449 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
450 	     SDEB_I_MAINT_OUT, 0, 0, 0,
451 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
452 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0,
455 /* 0xc0; 0xc0->0xff: vendor specific */
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 };
461 
462 /*
463  * The following "response" functions return the SCSI mid-level's 4 byte
464  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
465  * command completion, they can mask their return value with
466  * SDEG_RES_IMMED_MASK .
467  */
468 #define SDEG_RES_IMMED_MASK 0x40000000
469 
470 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 
500 static int sdebug_do_add_host(bool mk_new_store);
501 static int sdebug_add_host_helper(int per_host_idx);
502 static void sdebug_do_remove_host(bool the_end);
503 static int sdebug_add_store(void);
504 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
505 static void sdebug_erase_all_stores(bool apart_from_first);
506 
507 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
508 
509 /*
510  * The following are overflow arrays for cdbs that "hit" the same index in
511  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
512  * should be placed in opcode_info_arr[], the others should be placed here.
513  */
514 static const struct opcode_info_t msense_iarr[] = {
515 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
516 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517 };
518 
519 static const struct opcode_info_t mselect_iarr[] = {
520 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
521 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 };
523 
524 static const struct opcode_info_t read_iarr[] = {
525 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
526 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
527 	     0, 0, 0, 0} },
528 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
529 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
530 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
531 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
532 	     0xc7, 0, 0, 0, 0} },
533 };
534 
535 static const struct opcode_info_t write_iarr[] = {
536 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
537 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
538 		   0, 0, 0, 0, 0, 0} },
539 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
540 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
541 		   0, 0, 0} },
542 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
543 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
544 		   0xbf, 0xc7, 0, 0, 0, 0} },
545 };
546 
547 static const struct opcode_info_t verify_iarr[] = {
548 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
549 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
550 		   0, 0, 0, 0, 0, 0} },
551 };
552 
553 static const struct opcode_info_t sa_in_16_iarr[] = {
554 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
555 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
556 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
557 };
558 
559 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
560 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
561 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
562 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
563 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
564 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
565 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
566 };
567 
568 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
569 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
570 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
571 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
572 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
573 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
574 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
575 };
576 
577 static const struct opcode_info_t write_same_iarr[] = {
578 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
579 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
580 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
581 };
582 
583 static const struct opcode_info_t reserve_iarr[] = {
584 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
585 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
586 };
587 
588 static const struct opcode_info_t release_iarr[] = {
589 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
590 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
591 };
592 
593 static const struct opcode_info_t sync_cache_iarr[] = {
594 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
595 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
597 };
598 
599 static const struct opcode_info_t pre_fetch_iarr[] = {
600 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
601 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
602 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
603 };
604 
605 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
606 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
607 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
609 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
610 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
612 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
613 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
614 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
615 };
616 
617 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
618 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
619 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
620 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
621 };
622 
623 
624 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
625  * plus the terminating elements for logic that scans this table such as
626  * REPORT SUPPORTED OPERATION CODES. */
627 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
628 /* 0 */
629 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
630 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
631 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
632 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
634 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
635 	     0, 0} },					/* REPORT LUNS */
636 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
637 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
638 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
639 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
640 /* 5 */
641 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
642 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
643 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
644 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
645 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
646 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
647 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
648 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
649 	     0, 0, 0} },
650 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
651 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
652 	     0, 0} },
653 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
654 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
655 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 /* 10 */
657 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
658 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
659 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
661 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
662 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
664 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
665 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
667 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
668 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
669 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
670 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
671 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
672 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
673 				0xff, 0, 0xc7, 0, 0, 0, 0} },
674 /* 15 */
675 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
676 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
677 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
678 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
679 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
680 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
681 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
682 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
683 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
684 	     0xff, 0xff} },
685 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
686 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
687 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
688 	     0} },
689 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
690 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
691 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
692 	     0} },
693 /* 20 */
694 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
695 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
697 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
699 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
700 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
701 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
702 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
703 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
704 /* 25 */
705 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
706 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
707 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
708 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
709 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
710 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
711 		 0, 0, 0, 0, 0} },
712 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
713 	    resp_sync_cache, sync_cache_iarr,
714 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
715 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
716 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
717 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
718 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
719 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
720 	    resp_pre_fetch, pre_fetch_iarr,
721 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
722 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
723 
724 /* 30 */
725 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
726 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
727 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
728 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
729 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
730 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
731 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
732 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
733 /* sentinel */
734 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
735 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
736 };
737 
738 static int sdebug_num_hosts;
739 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
740 static int sdebug_ato = DEF_ATO;
741 static int sdebug_cdb_len = DEF_CDB_LEN;
742 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
743 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
744 static int sdebug_dif = DEF_DIF;
745 static int sdebug_dix = DEF_DIX;
746 static int sdebug_dsense = DEF_D_SENSE;
747 static int sdebug_every_nth = DEF_EVERY_NTH;
748 static int sdebug_fake_rw = DEF_FAKE_RW;
749 static unsigned int sdebug_guard = DEF_GUARD;
750 static int sdebug_host_max_queue;	/* per host */
751 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
752 static int sdebug_max_luns = DEF_MAX_LUNS;
753 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
754 static unsigned int sdebug_max_segment_size = BLK_MAX_SEGMENT_SIZE;
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
758 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
759 static int sdebug_no_uld;
760 static int sdebug_num_parts = DEF_NUM_PARTS;
761 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
762 static int sdebug_opt_blks = DEF_OPT_BLKS;
763 static int sdebug_opts = DEF_OPTS;
764 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
765 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
766 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
767 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
768 static int sdebug_sector_size = DEF_SECTOR_SIZE;
769 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
770 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
771 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
772 static unsigned int sdebug_lbpu = DEF_LBPU;
773 static unsigned int sdebug_lbpws = DEF_LBPWS;
774 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
775 static unsigned int sdebug_lbprz = DEF_LBPRZ;
776 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
777 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
778 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
779 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
780 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
781 static int sdebug_uuid_ctl = DEF_UUID_CTL;
782 static bool sdebug_random = DEF_RANDOM;
783 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
784 static bool sdebug_removable = DEF_REMOVABLE;
785 static bool sdebug_clustering;
786 static bool sdebug_host_lock = DEF_HOST_LOCK;
787 static bool sdebug_strict = DEF_STRICT;
788 static bool sdebug_any_injecting_opt;
789 static bool sdebug_no_rwlock;
790 static bool sdebug_verbose;
791 static bool have_dif_prot;
792 static bool write_since_sync;
793 static bool sdebug_statistics = DEF_STATISTICS;
794 static bool sdebug_wp;
795 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
796 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
797 static char *sdeb_zbc_model_s;
798 
799 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
800 			  SAM_LUN_AM_FLAT = 0x1,
801 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
802 			  SAM_LUN_AM_EXTENDED = 0x3};
803 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
804 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
805 
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity;	/* in sectors */
808 
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810    may still need them */
811 static int sdebug_heads;		/* heads per disk */
812 static int sdebug_cylinders_per;	/* cylinders per surface */
813 static int sdebug_sectors_per;		/* sectors per cylinder */
814 
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_MUTEX(sdebug_host_list_mutex);
817 
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
823 
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
833 
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_cap_mb;
837 static int sdeb_zbc_zone_size_mb;
838 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
839 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
840 
841 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
842 static int poll_queues; /* iouring iopoll interface.*/
843 
844 static char sdebug_proc_name[] = MY_NAME;
845 static const char *my_name = MY_NAME;
846 
847 static struct bus_type pseudo_lld_bus;
848 
849 static struct device_driver sdebug_driverfs_driver = {
850 	.name 		= sdebug_proc_name,
851 	.bus		= &pseudo_lld_bus,
852 };
853 
854 static const int check_condition_result =
855 	SAM_STAT_CHECK_CONDITION;
856 
857 static const int illegal_condition_result =
858 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 
860 static const int device_qfull_result =
861 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
862 
863 static const int condition_met_result = SAM_STAT_CONDITION_MET;
864 
865 
866 /* Only do the extra work involved in logical block provisioning if one or
867  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
868  * real reads and writes (i.e. not skipping them for speed).
869  */
scsi_debug_lbp(void)870 static inline bool scsi_debug_lbp(void)
871 {
872 	return 0 == sdebug_fake_rw &&
873 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
874 }
875 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)876 static void *lba2fake_store(struct sdeb_store_info *sip,
877 			    unsigned long long lba)
878 {
879 	struct sdeb_store_info *lsip = sip;
880 
881 	lba = do_div(lba, sdebug_store_sectors);
882 	if (!sip || !sip->storep) {
883 		WARN_ON_ONCE(true);
884 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
885 	}
886 	return lsip->storep + lba * sdebug_sector_size;
887 }
888 
dif_store(struct sdeb_store_info * sip,sector_t sector)889 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
890 				      sector_t sector)
891 {
892 	sector = sector_div(sector, sdebug_store_sectors);
893 
894 	return sip->dif_storep + sector;
895 }
896 
sdebug_max_tgts_luns(void)897 static void sdebug_max_tgts_luns(void)
898 {
899 	struct sdebug_host_info *sdbg_host;
900 	struct Scsi_Host *hpnt;
901 
902 	mutex_lock(&sdebug_host_list_mutex);
903 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
904 		hpnt = sdbg_host->shost;
905 		if ((hpnt->this_id >= 0) &&
906 		    (sdebug_num_tgts > hpnt->this_id))
907 			hpnt->max_id = sdebug_num_tgts + 1;
908 		else
909 			hpnt->max_id = sdebug_num_tgts;
910 		/* sdebug_max_luns; */
911 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 	}
913 	mutex_unlock(&sdebug_host_list_mutex);
914 }
915 
916 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 
918 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)919 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
920 				 enum sdeb_cmd_data c_d,
921 				 int in_byte, int in_bit)
922 {
923 	unsigned char *sbuff;
924 	u8 sks[4];
925 	int sl, asc;
926 
927 	sbuff = scp->sense_buffer;
928 	if (!sbuff) {
929 		sdev_printk(KERN_ERR, scp->device,
930 			    "%s: sense_buffer is NULL\n", __func__);
931 		return;
932 	}
933 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
934 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
935 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
936 	memset(sks, 0, sizeof(sks));
937 	sks[0] = 0x80;
938 	if (c_d)
939 		sks[0] |= 0x40;
940 	if (in_bit >= 0) {
941 		sks[0] |= 0x8;
942 		sks[0] |= 0x7 & in_bit;
943 	}
944 	put_unaligned_be16(in_byte, sks + 1);
945 	if (sdebug_dsense) {
946 		sl = sbuff[7] + 8;
947 		sbuff[7] = sl;
948 		sbuff[sl] = 0x2;
949 		sbuff[sl + 1] = 0x6;
950 		memcpy(sbuff + sl + 4, sks, 3);
951 	} else
952 		memcpy(sbuff + 15, sks, 3);
953 	if (sdebug_verbose)
954 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
955 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
956 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
957 }
958 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)959 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 {
961 	if (!scp->sense_buffer) {
962 		sdev_printk(KERN_ERR, scp->device,
963 			    "%s: sense_buffer is NULL\n", __func__);
964 		return;
965 	}
966 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967 
968 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
969 
970 	if (sdebug_verbose)
971 		sdev_printk(KERN_INFO, scp->device,
972 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
973 			    my_name, key, asc, asq);
974 }
975 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)976 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 {
978 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
979 }
980 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)981 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
982 			    void __user *arg)
983 {
984 	if (sdebug_verbose) {
985 		if (0x1261 == cmd)
986 			sdev_printk(KERN_INFO, dev,
987 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
988 		else if (0x5331 == cmd)
989 			sdev_printk(KERN_INFO, dev,
990 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
991 				    __func__);
992 		else
993 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
994 				    __func__, cmd);
995 	}
996 	return -EINVAL;
997 	/* return -ENOTTY; // correct return but upsets fdisk */
998 }
999 
config_cdb_len(struct scsi_device * sdev)1000 static void config_cdb_len(struct scsi_device *sdev)
1001 {
1002 	switch (sdebug_cdb_len) {
1003 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1004 		sdev->use_10_for_rw = false;
1005 		sdev->use_16_for_rw = false;
1006 		sdev->use_10_for_ms = false;
1007 		break;
1008 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1009 		sdev->use_10_for_rw = true;
1010 		sdev->use_16_for_rw = false;
1011 		sdev->use_10_for_ms = false;
1012 		break;
1013 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1014 		sdev->use_10_for_rw = true;
1015 		sdev->use_16_for_rw = false;
1016 		sdev->use_10_for_ms = true;
1017 		break;
1018 	case 16:
1019 		sdev->use_10_for_rw = false;
1020 		sdev->use_16_for_rw = true;
1021 		sdev->use_10_for_ms = true;
1022 		break;
1023 	case 32: /* No knobs to suggest this so same as 16 for now */
1024 		sdev->use_10_for_rw = false;
1025 		sdev->use_16_for_rw = true;
1026 		sdev->use_10_for_ms = true;
1027 		break;
1028 	default:
1029 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 			sdebug_cdb_len);
1031 		sdev->use_10_for_rw = true;
1032 		sdev->use_16_for_rw = false;
1033 		sdev->use_10_for_ms = false;
1034 		sdebug_cdb_len = 10;
1035 		break;
1036 	}
1037 }
1038 
all_config_cdb_len(void)1039 static void all_config_cdb_len(void)
1040 {
1041 	struct sdebug_host_info *sdbg_host;
1042 	struct Scsi_Host *shost;
1043 	struct scsi_device *sdev;
1044 
1045 	mutex_lock(&sdebug_host_list_mutex);
1046 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1047 		shost = sdbg_host->shost;
1048 		shost_for_each_device(sdev, shost) {
1049 			config_cdb_len(sdev);
1050 		}
1051 	}
1052 	mutex_unlock(&sdebug_host_list_mutex);
1053 }
1054 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1055 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 {
1057 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1058 	struct sdebug_dev_info *dp;
1059 
1060 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 		if ((devip->sdbg_host == dp->sdbg_host) &&
1062 		    (devip->target == dp->target)) {
1063 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064 		}
1065 	}
1066 }
1067 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1068 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1069 {
1070 	int k;
1071 
1072 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1073 	if (k != SDEBUG_NUM_UAS) {
1074 		const char *cp = NULL;
1075 
1076 		switch (k) {
1077 		case SDEBUG_UA_POR:
1078 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1079 					POWER_ON_RESET_ASCQ);
1080 			if (sdebug_verbose)
1081 				cp = "power on reset";
1082 			break;
1083 		case SDEBUG_UA_POOCCUR:
1084 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1085 					POWER_ON_OCCURRED_ASCQ);
1086 			if (sdebug_verbose)
1087 				cp = "power on occurred";
1088 			break;
1089 		case SDEBUG_UA_BUS_RESET:
1090 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 					BUS_RESET_ASCQ);
1092 			if (sdebug_verbose)
1093 				cp = "bus reset";
1094 			break;
1095 		case SDEBUG_UA_MODE_CHANGED:
1096 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1097 					MODE_CHANGED_ASCQ);
1098 			if (sdebug_verbose)
1099 				cp = "mode parameters changed";
1100 			break;
1101 		case SDEBUG_UA_CAPACITY_CHANGED:
1102 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1103 					CAPACITY_CHANGED_ASCQ);
1104 			if (sdebug_verbose)
1105 				cp = "capacity data changed";
1106 			break;
1107 		case SDEBUG_UA_MICROCODE_CHANGED:
1108 			mk_sense_buffer(scp, UNIT_ATTENTION,
1109 					TARGET_CHANGED_ASC,
1110 					MICROCODE_CHANGED_ASCQ);
1111 			if (sdebug_verbose)
1112 				cp = "microcode has been changed";
1113 			break;
1114 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1115 			mk_sense_buffer(scp, UNIT_ATTENTION,
1116 					TARGET_CHANGED_ASC,
1117 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1118 			if (sdebug_verbose)
1119 				cp = "microcode has been changed without reset";
1120 			break;
1121 		case SDEBUG_UA_LUNS_CHANGED:
1122 			/*
1123 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1124 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1125 			 * on the target, until a REPORT LUNS command is
1126 			 * received.  SPC-4 behavior is to report it only once.
1127 			 * NOTE:  sdebug_scsi_level does not use the same
1128 			 * values as struct scsi_device->scsi_level.
1129 			 */
1130 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1131 				clear_luns_changed_on_target(devip);
1132 			mk_sense_buffer(scp, UNIT_ATTENTION,
1133 					TARGET_CHANGED_ASC,
1134 					LUNS_CHANGED_ASCQ);
1135 			if (sdebug_verbose)
1136 				cp = "reported luns data has changed";
1137 			break;
1138 		default:
1139 			pr_warn("unexpected unit attention code=%d\n", k);
1140 			if (sdebug_verbose)
1141 				cp = "unknown";
1142 			break;
1143 		}
1144 		clear_bit(k, devip->uas_bm);
1145 		if (sdebug_verbose)
1146 			sdev_printk(KERN_INFO, scp->device,
1147 				   "%s reports: Unit attention: %s\n",
1148 				   my_name, cp);
1149 		return check_condition_result;
1150 	}
1151 	return 0;
1152 }
1153 
1154 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1155 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1156 				int arr_len)
1157 {
1158 	int act_len;
1159 	struct scsi_data_buffer *sdb = &scp->sdb;
1160 
1161 	if (!sdb->length)
1162 		return 0;
1163 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1164 		return DID_ERROR << 16;
1165 
1166 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1167 				      arr, arr_len);
1168 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 
1170 	return 0;
1171 }
1172 
1173 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1174  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1175  * calls, not required to write in ascending offset order. Assumes resid
1176  * set to scsi_bufflen() prior to any calls.
1177  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1178 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1179 				  int arr_len, unsigned int off_dst)
1180 {
1181 	unsigned int act_len, n;
1182 	struct scsi_data_buffer *sdb = &scp->sdb;
1183 	off_t skip = off_dst;
1184 
1185 	if (sdb->length <= off_dst)
1186 		return 0;
1187 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1188 		return DID_ERROR << 16;
1189 
1190 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1191 				       arr, arr_len, skip);
1192 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1193 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1194 		 scsi_get_resid(scp));
1195 	n = scsi_bufflen(scp) - (off_dst + act_len);
1196 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1197 	return 0;
1198 }
1199 
1200 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1201  * 'arr' or -1 if error.
1202  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1203 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1204 			       int arr_len)
1205 {
1206 	if (!scsi_bufflen(scp))
1207 		return 0;
1208 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1209 		return -1;
1210 
1211 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1212 }
1213 
1214 
1215 static char sdebug_inq_vendor_id[9] = "Linux   ";
1216 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1217 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1218 /* Use some locally assigned NAAs for SAS addresses. */
1219 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1220 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1221 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1222 
1223 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1224 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1225 			  int target_dev_id, int dev_id_num,
1226 			  const char *dev_id_str, int dev_id_str_len,
1227 			  const uuid_t *lu_name)
1228 {
1229 	int num, port_a;
1230 	char b[32];
1231 
1232 	port_a = target_dev_id + 1;
1233 	/* T10 vendor identifier field format (faked) */
1234 	arr[0] = 0x2;	/* ASCII */
1235 	arr[1] = 0x1;
1236 	arr[2] = 0x0;
1237 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1238 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1239 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1240 	num = 8 + 16 + dev_id_str_len;
1241 	arr[3] = num;
1242 	num += 4;
1243 	if (dev_id_num >= 0) {
1244 		if (sdebug_uuid_ctl) {
1245 			/* Locally assigned UUID */
1246 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1247 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1248 			arr[num++] = 0x0;
1249 			arr[num++] = 0x12;
1250 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1251 			arr[num++] = 0x0;
1252 			memcpy(arr + num, lu_name, 16);
1253 			num += 16;
1254 		} else {
1255 			/* NAA-3, Logical unit identifier (binary) */
1256 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1257 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1258 			arr[num++] = 0x0;
1259 			arr[num++] = 0x8;
1260 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1261 			num += 8;
1262 		}
1263 		/* Target relative port number */
1264 		arr[num++] = 0x61;	/* proto=sas, binary */
1265 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1266 		arr[num++] = 0x0;	/* reserved */
1267 		arr[num++] = 0x4;	/* length */
1268 		arr[num++] = 0x0;	/* reserved */
1269 		arr[num++] = 0x0;	/* reserved */
1270 		arr[num++] = 0x0;
1271 		arr[num++] = 0x1;	/* relative port A */
1272 	}
1273 	/* NAA-3, Target port identifier */
1274 	arr[num++] = 0x61;	/* proto=sas, binary */
1275 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1276 	arr[num++] = 0x0;
1277 	arr[num++] = 0x8;
1278 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1279 	num += 8;
1280 	/* NAA-3, Target port group identifier */
1281 	arr[num++] = 0x61;	/* proto=sas, binary */
1282 	arr[num++] = 0x95;	/* piv=1, target port group id */
1283 	arr[num++] = 0x0;
1284 	arr[num++] = 0x4;
1285 	arr[num++] = 0;
1286 	arr[num++] = 0;
1287 	put_unaligned_be16(port_group_id, arr + num);
1288 	num += 2;
1289 	/* NAA-3, Target device identifier */
1290 	arr[num++] = 0x61;	/* proto=sas, binary */
1291 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1292 	arr[num++] = 0x0;
1293 	arr[num++] = 0x8;
1294 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1295 	num += 8;
1296 	/* SCSI name string: Target device identifier */
1297 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1298 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1299 	arr[num++] = 0x0;
1300 	arr[num++] = 24;
1301 	memcpy(arr + num, "naa.32222220", 12);
1302 	num += 12;
1303 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1304 	memcpy(arr + num, b, 8);
1305 	num += 8;
1306 	memset(arr + num, 0, 4);
1307 	num += 4;
1308 	return num;
1309 }
1310 
1311 static unsigned char vpd84_data[] = {
1312 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1313     0x22,0x22,0x22,0x0,0xbb,0x1,
1314     0x22,0x22,0x22,0x0,0xbb,0x2,
1315 };
1316 
1317 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1318 static int inquiry_vpd_84(unsigned char *arr)
1319 {
1320 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1321 	return sizeof(vpd84_data);
1322 }
1323 
1324 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1325 static int inquiry_vpd_85(unsigned char *arr)
1326 {
1327 	int num = 0;
1328 	const char *na1 = "https://www.kernel.org/config";
1329 	const char *na2 = "http://www.kernel.org/log";
1330 	int plen, olen;
1331 
1332 	arr[num++] = 0x1;	/* lu, storage config */
1333 	arr[num++] = 0x0;	/* reserved */
1334 	arr[num++] = 0x0;
1335 	olen = strlen(na1);
1336 	plen = olen + 1;
1337 	if (plen % 4)
1338 		plen = ((plen / 4) + 1) * 4;
1339 	arr[num++] = plen;	/* length, null termianted, padded */
1340 	memcpy(arr + num, na1, olen);
1341 	memset(arr + num + olen, 0, plen - olen);
1342 	num += plen;
1343 
1344 	arr[num++] = 0x4;	/* lu, logging */
1345 	arr[num++] = 0x0;	/* reserved */
1346 	arr[num++] = 0x0;
1347 	olen = strlen(na2);
1348 	plen = olen + 1;
1349 	if (plen % 4)
1350 		plen = ((plen / 4) + 1) * 4;
1351 	arr[num++] = plen;	/* length, null terminated, padded */
1352 	memcpy(arr + num, na2, olen);
1353 	memset(arr + num + olen, 0, plen - olen);
1354 	num += plen;
1355 
1356 	return num;
1357 }
1358 
1359 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1360 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 {
1362 	int num = 0;
1363 	int port_a, port_b;
1364 
1365 	port_a = target_dev_id + 1;
1366 	port_b = port_a + 1;
1367 	arr[num++] = 0x0;	/* reserved */
1368 	arr[num++] = 0x0;	/* reserved */
1369 	arr[num++] = 0x0;
1370 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1371 	memset(arr + num, 0, 6);
1372 	num += 6;
1373 	arr[num++] = 0x0;
1374 	arr[num++] = 12;	/* length tp descriptor */
1375 	/* naa-5 target port identifier (A) */
1376 	arr[num++] = 0x61;	/* proto=sas, binary */
1377 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x8;	/* length */
1380 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1381 	num += 8;
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x0;	/* reserved */
1384 	arr[num++] = 0x0;
1385 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1386 	memset(arr + num, 0, 6);
1387 	num += 6;
1388 	arr[num++] = 0x0;
1389 	arr[num++] = 12;	/* length tp descriptor */
1390 	/* naa-5 target port identifier (B) */
1391 	arr[num++] = 0x61;	/* proto=sas, binary */
1392 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x8;	/* length */
1395 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1396 	num += 8;
1397 
1398 	return num;
1399 }
1400 
1401 
1402 static unsigned char vpd89_data[] = {
1403 /* from 4th byte */ 0,0,0,0,
1404 'l','i','n','u','x',' ',' ',' ',
1405 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1406 '1','2','3','4',
1407 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1408 0xec,0,0,0,
1409 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1410 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1412 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1413 0x53,0x41,
1414 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1415 0x20,0x20,
1416 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1417 0x10,0x80,
1418 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1419 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1420 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1422 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1423 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1424 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1429 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1430 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1431 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1444 };
1445 
1446 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1447 static int inquiry_vpd_89(unsigned char *arr)
1448 {
1449 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1450 	return sizeof(vpd89_data);
1451 }
1452 
1453 
1454 static unsigned char vpdb0_data[] = {
1455 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1456 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1458 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1459 };
1460 
1461 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1462 static int inquiry_vpd_b0(unsigned char *arr)
1463 {
1464 	unsigned int gran;
1465 
1466 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1467 
1468 	/* Optimal transfer length granularity */
1469 	if (sdebug_opt_xferlen_exp != 0 &&
1470 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1471 		gran = 1 << sdebug_opt_xferlen_exp;
1472 	else
1473 		gran = 1 << sdebug_physblk_exp;
1474 	put_unaligned_be16(gran, arr + 2);
1475 
1476 	/* Maximum Transfer Length */
1477 	if (sdebug_store_sectors > 0x400)
1478 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1479 
1480 	/* Optimal Transfer Length */
1481 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1482 
1483 	if (sdebug_lbpu) {
1484 		/* Maximum Unmap LBA Count */
1485 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1486 
1487 		/* Maximum Unmap Block Descriptor Count */
1488 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1489 	}
1490 
1491 	/* Unmap Granularity Alignment */
1492 	if (sdebug_unmap_alignment) {
1493 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1494 		arr[28] |= 0x80; /* UGAVALID */
1495 	}
1496 
1497 	/* Optimal Unmap Granularity */
1498 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1499 
1500 	/* Maximum WRITE SAME Length */
1501 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1502 
1503 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1504 }
1505 
1506 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1507 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1508 {
1509 	memset(arr, 0, 0x3c);
1510 	arr[0] = 0;
1511 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1512 	arr[2] = 0;
1513 	arr[3] = 5;	/* less than 1.8" */
1514 	if (devip->zmodel == BLK_ZONED_HA)
1515 		arr[4] = 1 << 4;	/* zoned field = 01b */
1516 
1517 	return 0x3c;
1518 }
1519 
1520 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1521 static int inquiry_vpd_b2(unsigned char *arr)
1522 {
1523 	memset(arr, 0, 0x4);
1524 	arr[0] = 0;			/* threshold exponent */
1525 	if (sdebug_lbpu)
1526 		arr[1] = 1 << 7;
1527 	if (sdebug_lbpws)
1528 		arr[1] |= 1 << 6;
1529 	if (sdebug_lbpws10)
1530 		arr[1] |= 1 << 5;
1531 	if (sdebug_lbprz && scsi_debug_lbp())
1532 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1533 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1534 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1535 	/* threshold_percentage=0 */
1536 	return 0x4;
1537 }
1538 
1539 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1540 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1541 {
1542 	memset(arr, 0, 0x3c);
1543 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1544 	/*
1545 	 * Set Optimal number of open sequential write preferred zones and
1546 	 * Optimal number of non-sequentially written sequential write
1547 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1548 	 * fields set to zero, apart from Max. number of open swrz_s field.
1549 	 */
1550 	put_unaligned_be32(0xffffffff, &arr[4]);
1551 	put_unaligned_be32(0xffffffff, &arr[8]);
1552 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1553 		put_unaligned_be32(devip->max_open, &arr[12]);
1554 	else
1555 		put_unaligned_be32(0xffffffff, &arr[12]);
1556 	if (devip->zcap < devip->zsize) {
1557 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1558 		put_unaligned_be64(devip->zsize, &arr[20]);
1559 	} else {
1560 		arr[19] = 0;
1561 	}
1562 	return 0x3c;
1563 }
1564 
1565 #define SDEBUG_LONG_INQ_SZ 96
1566 #define SDEBUG_MAX_INQ_ARR_SZ 584
1567 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1568 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1569 {
1570 	unsigned char pq_pdt;
1571 	unsigned char *arr;
1572 	unsigned char *cmd = scp->cmnd;
1573 	u32 alloc_len, n;
1574 	int ret;
1575 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1576 
1577 	alloc_len = get_unaligned_be16(cmd + 3);
1578 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1579 	if (! arr)
1580 		return DID_REQUEUE << 16;
1581 	is_disk = (sdebug_ptype == TYPE_DISK);
1582 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1583 	is_disk_zbc = (is_disk || is_zbc);
1584 	have_wlun = scsi_is_wlun(scp->device->lun);
1585 	if (have_wlun)
1586 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1587 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1588 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1589 	else
1590 		pq_pdt = (sdebug_ptype & 0x1f);
1591 	arr[0] = pq_pdt;
1592 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1593 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1594 		kfree(arr);
1595 		return check_condition_result;
1596 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1597 		int lu_id_num, port_group_id, target_dev_id;
1598 		u32 len;
1599 		char lu_id_str[6];
1600 		int host_no = devip->sdbg_host->shost->host_no;
1601 
1602 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1603 		    (devip->channel & 0x7f);
1604 		if (sdebug_vpd_use_hostno == 0)
1605 			host_no = 0;
1606 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1607 			    (devip->target * 1000) + devip->lun);
1608 		target_dev_id = ((host_no + 1) * 2000) +
1609 				 (devip->target * 1000) - 3;
1610 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1611 		if (0 == cmd[2]) { /* supported vital product data pages */
1612 			arr[1] = cmd[2];	/*sanity */
1613 			n = 4;
1614 			arr[n++] = 0x0;   /* this page */
1615 			arr[n++] = 0x80;  /* unit serial number */
1616 			arr[n++] = 0x83;  /* device identification */
1617 			arr[n++] = 0x84;  /* software interface ident. */
1618 			arr[n++] = 0x85;  /* management network addresses */
1619 			arr[n++] = 0x86;  /* extended inquiry */
1620 			arr[n++] = 0x87;  /* mode page policy */
1621 			arr[n++] = 0x88;  /* SCSI ports */
1622 			if (is_disk_zbc) {	  /* SBC or ZBC */
1623 				arr[n++] = 0x89;  /* ATA information */
1624 				arr[n++] = 0xb0;  /* Block limits */
1625 				arr[n++] = 0xb1;  /* Block characteristics */
1626 				if (is_disk)
1627 					arr[n++] = 0xb2;  /* LB Provisioning */
1628 				if (is_zbc)
1629 					arr[n++] = 0xb6;  /* ZB dev. char. */
1630 			}
1631 			arr[3] = n - 4;	  /* number of supported VPD pages */
1632 		} else if (0x80 == cmd[2]) { /* unit serial number */
1633 			arr[1] = cmd[2];	/*sanity */
1634 			arr[3] = len;
1635 			memcpy(&arr[4], lu_id_str, len);
1636 		} else if (0x83 == cmd[2]) { /* device identification */
1637 			arr[1] = cmd[2];	/*sanity */
1638 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1639 						target_dev_id, lu_id_num,
1640 						lu_id_str, len,
1641 						&devip->lu_name);
1642 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1643 			arr[1] = cmd[2];	/*sanity */
1644 			arr[3] = inquiry_vpd_84(&arr[4]);
1645 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1646 			arr[1] = cmd[2];	/*sanity */
1647 			arr[3] = inquiry_vpd_85(&arr[4]);
1648 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1649 			arr[1] = cmd[2];	/*sanity */
1650 			arr[3] = 0x3c;	/* number of following entries */
1651 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1652 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1653 			else if (have_dif_prot)
1654 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1655 			else
1656 				arr[4] = 0x0;   /* no protection stuff */
1657 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1658 		} else if (0x87 == cmd[2]) { /* mode page policy */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = 0x8;	/* number of following entries */
1661 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1662 			arr[6] = 0x80;	/* mlus, shared */
1663 			arr[8] = 0x18;	 /* protocol specific lu */
1664 			arr[10] = 0x82;	 /* mlus, per initiator port */
1665 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1666 			arr[1] = cmd[2];	/*sanity */
1667 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1668 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1669 			arr[1] = cmd[2];        /*sanity */
1670 			n = inquiry_vpd_89(&arr[4]);
1671 			put_unaligned_be16(n, arr + 2);
1672 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1673 			arr[1] = cmd[2];        /*sanity */
1674 			arr[3] = inquiry_vpd_b0(&arr[4]);
1675 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1676 			arr[1] = cmd[2];        /*sanity */
1677 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1678 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1679 			arr[1] = cmd[2];        /*sanity */
1680 			arr[3] = inquiry_vpd_b2(&arr[4]);
1681 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1682 			arr[1] = cmd[2];        /*sanity */
1683 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1684 		} else {
1685 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1686 			kfree(arr);
1687 			return check_condition_result;
1688 		}
1689 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1690 		ret = fill_from_dev_buffer(scp, arr,
1691 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1692 		kfree(arr);
1693 		return ret;
1694 	}
1695 	/* drops through here for a standard inquiry */
1696 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1697 	arr[2] = sdebug_scsi_level;
1698 	arr[3] = 2;    /* response_data_format==2 */
1699 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1700 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1701 	if (sdebug_vpd_use_hostno == 0)
1702 		arr[5] |= 0x10; /* claim: implicit TPGS */
1703 	arr[6] = 0x10; /* claim: MultiP */
1704 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1705 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1706 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1707 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1708 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1709 	/* Use Vendor Specific area to place driver date in ASCII hex */
1710 	memcpy(&arr[36], sdebug_version_date, 8);
1711 	/* version descriptors (2 bytes each) follow */
1712 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1713 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1714 	n = 62;
1715 	if (is_disk) {		/* SBC-4 no version claimed */
1716 		put_unaligned_be16(0x600, arr + n);
1717 		n += 2;
1718 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1719 		put_unaligned_be16(0x525, arr + n);
1720 		n += 2;
1721 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1722 		put_unaligned_be16(0x624, arr + n);
1723 		n += 2;
1724 	}
1725 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1726 	ret = fill_from_dev_buffer(scp, arr,
1727 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1728 	kfree(arr);
1729 	return ret;
1730 }
1731 
1732 /* See resp_iec_m_pg() for how this data is manipulated */
1733 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1734 				   0, 0, 0x0, 0x0};
1735 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1736 static int resp_requests(struct scsi_cmnd *scp,
1737 			 struct sdebug_dev_info *devip)
1738 {
1739 	unsigned char *cmd = scp->cmnd;
1740 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1741 	bool dsense = !!(cmd[1] & 1);
1742 	u32 alloc_len = cmd[4];
1743 	u32 len = 18;
1744 	int stopped_state = atomic_read(&devip->stopped);
1745 
1746 	memset(arr, 0, sizeof(arr));
1747 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1748 		if (dsense) {
1749 			arr[0] = 0x72;
1750 			arr[1] = NOT_READY;
1751 			arr[2] = LOGICAL_UNIT_NOT_READY;
1752 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1753 			len = 8;
1754 		} else {
1755 			arr[0] = 0x70;
1756 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1757 			arr[7] = 0xa;			/* 18 byte sense buffer */
1758 			arr[12] = LOGICAL_UNIT_NOT_READY;
1759 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1760 		}
1761 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1762 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1763 		if (dsense) {
1764 			arr[0] = 0x72;
1765 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1766 			arr[2] = THRESHOLD_EXCEEDED;
1767 			arr[3] = 0xff;		/* Failure prediction(false) */
1768 			len = 8;
1769 		} else {
1770 			arr[0] = 0x70;
1771 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1772 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1773 			arr[12] = THRESHOLD_EXCEEDED;
1774 			arr[13] = 0xff;		/* Failure prediction(false) */
1775 		}
1776 	} else {	/* nothing to report */
1777 		if (dsense) {
1778 			len = 8;
1779 			memset(arr, 0, len);
1780 			arr[0] = 0x72;
1781 		} else {
1782 			memset(arr, 0, len);
1783 			arr[0] = 0x70;
1784 			arr[7] = 0xa;
1785 		}
1786 	}
1787 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1788 }
1789 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1790 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1791 {
1792 	unsigned char *cmd = scp->cmnd;
1793 	int power_cond, want_stop, stopped_state;
1794 	bool changing;
1795 
1796 	power_cond = (cmd[4] & 0xf0) >> 4;
1797 	if (power_cond) {
1798 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1799 		return check_condition_result;
1800 	}
1801 	want_stop = !(cmd[4] & 1);
1802 	stopped_state = atomic_read(&devip->stopped);
1803 	if (stopped_state == 2) {
1804 		ktime_t now_ts = ktime_get_boottime();
1805 
1806 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1807 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1808 
1809 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1810 				/* tur_ms_to_ready timer extinguished */
1811 				atomic_set(&devip->stopped, 0);
1812 				stopped_state = 0;
1813 			}
1814 		}
1815 		if (stopped_state == 2) {
1816 			if (want_stop) {
1817 				stopped_state = 1;	/* dummy up success */
1818 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1819 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1820 				return check_condition_result;
1821 			}
1822 		}
1823 	}
1824 	changing = (stopped_state != want_stop);
1825 	if (changing)
1826 		atomic_xchg(&devip->stopped, want_stop);
1827 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1828 		return SDEG_RES_IMMED_MASK;
1829 	else
1830 		return 0;
1831 }
1832 
get_sdebug_capacity(void)1833 static sector_t get_sdebug_capacity(void)
1834 {
1835 	static const unsigned int gibibyte = 1073741824;
1836 
1837 	if (sdebug_virtual_gb > 0)
1838 		return (sector_t)sdebug_virtual_gb *
1839 			(gibibyte / sdebug_sector_size);
1840 	else
1841 		return sdebug_store_sectors;
1842 }
1843 
1844 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1845 static int resp_readcap(struct scsi_cmnd *scp,
1846 			struct sdebug_dev_info *devip)
1847 {
1848 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1849 	unsigned int capac;
1850 
1851 	/* following just in case virtual_gb changed */
1852 	sdebug_capacity = get_sdebug_capacity();
1853 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1854 	if (sdebug_capacity < 0xffffffff) {
1855 		capac = (unsigned int)sdebug_capacity - 1;
1856 		put_unaligned_be32(capac, arr + 0);
1857 	} else
1858 		put_unaligned_be32(0xffffffff, arr + 0);
1859 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1860 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1861 }
1862 
1863 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1864 static int resp_readcap16(struct scsi_cmnd *scp,
1865 			  struct sdebug_dev_info *devip)
1866 {
1867 	unsigned char *cmd = scp->cmnd;
1868 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1869 	u32 alloc_len;
1870 
1871 	alloc_len = get_unaligned_be32(cmd + 10);
1872 	/* following just in case virtual_gb changed */
1873 	sdebug_capacity = get_sdebug_capacity();
1874 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1875 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1876 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1877 	arr[13] = sdebug_physblk_exp & 0xf;
1878 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1879 
1880 	if (scsi_debug_lbp()) {
1881 		arr[14] |= 0x80; /* LBPME */
1882 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1883 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1884 		 * in the wider field maps to 0 in this field.
1885 		 */
1886 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1887 			arr[14] |= 0x40;
1888 	}
1889 
1890 	/*
1891 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1892 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1893 	 */
1894 	if (devip->zmodel == BLK_ZONED_HM)
1895 		arr[12] |= 1 << 4;
1896 
1897 	arr[15] = sdebug_lowest_aligned & 0xff;
1898 
1899 	if (have_dif_prot) {
1900 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1901 		arr[12] |= 1; /* PROT_EN */
1902 	}
1903 
1904 	return fill_from_dev_buffer(scp, arr,
1905 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1906 }
1907 
1908 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1909 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1910 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1911 			      struct sdebug_dev_info *devip)
1912 {
1913 	unsigned char *cmd = scp->cmnd;
1914 	unsigned char *arr;
1915 	int host_no = devip->sdbg_host->shost->host_no;
1916 	int port_group_a, port_group_b, port_a, port_b;
1917 	u32 alen, n, rlen;
1918 	int ret;
1919 
1920 	alen = get_unaligned_be32(cmd + 6);
1921 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1922 	if (! arr)
1923 		return DID_REQUEUE << 16;
1924 	/*
1925 	 * EVPD page 0x88 states we have two ports, one
1926 	 * real and a fake port with no device connected.
1927 	 * So we create two port groups with one port each
1928 	 * and set the group with port B to unavailable.
1929 	 */
1930 	port_a = 0x1; /* relative port A */
1931 	port_b = 0x2; /* relative port B */
1932 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1933 			(devip->channel & 0x7f);
1934 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1935 			(devip->channel & 0x7f) + 0x80;
1936 
1937 	/*
1938 	 * The asymmetric access state is cycled according to the host_id.
1939 	 */
1940 	n = 4;
1941 	if (sdebug_vpd_use_hostno == 0) {
1942 		arr[n++] = host_no % 3; /* Asymm access state */
1943 		arr[n++] = 0x0F; /* claim: all states are supported */
1944 	} else {
1945 		arr[n++] = 0x0; /* Active/Optimized path */
1946 		arr[n++] = 0x01; /* only support active/optimized paths */
1947 	}
1948 	put_unaligned_be16(port_group_a, arr + n);
1949 	n += 2;
1950 	arr[n++] = 0;    /* Reserved */
1951 	arr[n++] = 0;    /* Status code */
1952 	arr[n++] = 0;    /* Vendor unique */
1953 	arr[n++] = 0x1;  /* One port per group */
1954 	arr[n++] = 0;    /* Reserved */
1955 	arr[n++] = 0;    /* Reserved */
1956 	put_unaligned_be16(port_a, arr + n);
1957 	n += 2;
1958 	arr[n++] = 3;    /* Port unavailable */
1959 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1960 	put_unaligned_be16(port_group_b, arr + n);
1961 	n += 2;
1962 	arr[n++] = 0;    /* Reserved */
1963 	arr[n++] = 0;    /* Status code */
1964 	arr[n++] = 0;    /* Vendor unique */
1965 	arr[n++] = 0x1;  /* One port per group */
1966 	arr[n++] = 0;    /* Reserved */
1967 	arr[n++] = 0;    /* Reserved */
1968 	put_unaligned_be16(port_b, arr + n);
1969 	n += 2;
1970 
1971 	rlen = n - 4;
1972 	put_unaligned_be32(rlen, arr + 0);
1973 
1974 	/*
1975 	 * Return the smallest value of either
1976 	 * - The allocated length
1977 	 * - The constructed command length
1978 	 * - The maximum array size
1979 	 */
1980 	rlen = min(alen, n);
1981 	ret = fill_from_dev_buffer(scp, arr,
1982 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1983 	kfree(arr);
1984 	return ret;
1985 }
1986 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1987 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1988 			     struct sdebug_dev_info *devip)
1989 {
1990 	bool rctd;
1991 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1992 	u16 req_sa, u;
1993 	u32 alloc_len, a_len;
1994 	int k, offset, len, errsts, count, bump, na;
1995 	const struct opcode_info_t *oip;
1996 	const struct opcode_info_t *r_oip;
1997 	u8 *arr;
1998 	u8 *cmd = scp->cmnd;
1999 
2000 	rctd = !!(cmd[2] & 0x80);
2001 	reporting_opts = cmd[2] & 0x7;
2002 	req_opcode = cmd[3];
2003 	req_sa = get_unaligned_be16(cmd + 4);
2004 	alloc_len = get_unaligned_be32(cmd + 6);
2005 	if (alloc_len < 4 || alloc_len > 0xffff) {
2006 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2007 		return check_condition_result;
2008 	}
2009 	if (alloc_len > 8192)
2010 		a_len = 8192;
2011 	else
2012 		a_len = alloc_len;
2013 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2014 	if (NULL == arr) {
2015 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2016 				INSUFF_RES_ASCQ);
2017 		return check_condition_result;
2018 	}
2019 	switch (reporting_opts) {
2020 	case 0:	/* all commands */
2021 		/* count number of commands */
2022 		for (count = 0, oip = opcode_info_arr;
2023 		     oip->num_attached != 0xff; ++oip) {
2024 			if (F_INV_OP & oip->flags)
2025 				continue;
2026 			count += (oip->num_attached + 1);
2027 		}
2028 		bump = rctd ? 20 : 8;
2029 		put_unaligned_be32(count * bump, arr);
2030 		for (offset = 4, oip = opcode_info_arr;
2031 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2032 			if (F_INV_OP & oip->flags)
2033 				continue;
2034 			na = oip->num_attached;
2035 			arr[offset] = oip->opcode;
2036 			put_unaligned_be16(oip->sa, arr + offset + 2);
2037 			if (rctd)
2038 				arr[offset + 5] |= 0x2;
2039 			if (FF_SA & oip->flags)
2040 				arr[offset + 5] |= 0x1;
2041 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2042 			if (rctd)
2043 				put_unaligned_be16(0xa, arr + offset + 8);
2044 			r_oip = oip;
2045 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2046 				if (F_INV_OP & oip->flags)
2047 					continue;
2048 				offset += bump;
2049 				arr[offset] = oip->opcode;
2050 				put_unaligned_be16(oip->sa, arr + offset + 2);
2051 				if (rctd)
2052 					arr[offset + 5] |= 0x2;
2053 				if (FF_SA & oip->flags)
2054 					arr[offset + 5] |= 0x1;
2055 				put_unaligned_be16(oip->len_mask[0],
2056 						   arr + offset + 6);
2057 				if (rctd)
2058 					put_unaligned_be16(0xa,
2059 							   arr + offset + 8);
2060 			}
2061 			oip = r_oip;
2062 			offset += bump;
2063 		}
2064 		break;
2065 	case 1:	/* one command: opcode only */
2066 	case 2:	/* one command: opcode plus service action */
2067 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2068 		sdeb_i = opcode_ind_arr[req_opcode];
2069 		oip = &opcode_info_arr[sdeb_i];
2070 		if (F_INV_OP & oip->flags) {
2071 			supp = 1;
2072 			offset = 4;
2073 		} else {
2074 			if (1 == reporting_opts) {
2075 				if (FF_SA & oip->flags) {
2076 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2077 							     2, 2);
2078 					kfree(arr);
2079 					return check_condition_result;
2080 				}
2081 				req_sa = 0;
2082 			} else if (2 == reporting_opts &&
2083 				   0 == (FF_SA & oip->flags)) {
2084 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2085 				kfree(arr);	/* point at requested sa */
2086 				return check_condition_result;
2087 			}
2088 			if (0 == (FF_SA & oip->flags) &&
2089 			    req_opcode == oip->opcode)
2090 				supp = 3;
2091 			else if (0 == (FF_SA & oip->flags)) {
2092 				na = oip->num_attached;
2093 				for (k = 0, oip = oip->arrp; k < na;
2094 				     ++k, ++oip) {
2095 					if (req_opcode == oip->opcode)
2096 						break;
2097 				}
2098 				supp = (k >= na) ? 1 : 3;
2099 			} else if (req_sa != oip->sa) {
2100 				na = oip->num_attached;
2101 				for (k = 0, oip = oip->arrp; k < na;
2102 				     ++k, ++oip) {
2103 					if (req_sa == oip->sa)
2104 						break;
2105 				}
2106 				supp = (k >= na) ? 1 : 3;
2107 			} else
2108 				supp = 3;
2109 			if (3 == supp) {
2110 				u = oip->len_mask[0];
2111 				put_unaligned_be16(u, arr + 2);
2112 				arr[4] = oip->opcode;
2113 				for (k = 1; k < u; ++k)
2114 					arr[4 + k] = (k < 16) ?
2115 						 oip->len_mask[k] : 0xff;
2116 				offset = 4 + u;
2117 			} else
2118 				offset = 4;
2119 		}
2120 		arr[1] = (rctd ? 0x80 : 0) | supp;
2121 		if (rctd) {
2122 			put_unaligned_be16(0xa, arr + offset);
2123 			offset += 12;
2124 		}
2125 		break;
2126 	default:
2127 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2128 		kfree(arr);
2129 		return check_condition_result;
2130 	}
2131 	offset = (offset < a_len) ? offset : a_len;
2132 	len = (offset < alloc_len) ? offset : alloc_len;
2133 	errsts = fill_from_dev_buffer(scp, arr, len);
2134 	kfree(arr);
2135 	return errsts;
2136 }
2137 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2138 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2139 			  struct sdebug_dev_info *devip)
2140 {
2141 	bool repd;
2142 	u32 alloc_len, len;
2143 	u8 arr[16];
2144 	u8 *cmd = scp->cmnd;
2145 
2146 	memset(arr, 0, sizeof(arr));
2147 	repd = !!(cmd[2] & 0x80);
2148 	alloc_len = get_unaligned_be32(cmd + 6);
2149 	if (alloc_len < 4) {
2150 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2151 		return check_condition_result;
2152 	}
2153 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2154 	arr[1] = 0x1;		/* ITNRS */
2155 	if (repd) {
2156 		arr[3] = 0xc;
2157 		len = 16;
2158 	} else
2159 		len = 4;
2160 
2161 	len = (len < alloc_len) ? len : alloc_len;
2162 	return fill_from_dev_buffer(scp, arr, len);
2163 }
2164 
2165 /* <<Following mode page info copied from ST318451LW>> */
2166 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2167 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2168 {	/* Read-Write Error Recovery page for mode_sense */
2169 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2170 					5, 0, 0xff, 0xff};
2171 
2172 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2173 	if (1 == pcontrol)
2174 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2175 	return sizeof(err_recov_pg);
2176 }
2177 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2178 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2179 { 	/* Disconnect-Reconnect page for mode_sense */
2180 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2181 					 0, 0, 0, 0, 0, 0, 0, 0};
2182 
2183 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2184 	if (1 == pcontrol)
2185 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2186 	return sizeof(disconnect_pg);
2187 }
2188 
resp_format_pg(unsigned char * p,int pcontrol,int target)2189 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2190 {       /* Format device page for mode_sense */
2191 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2192 				     0, 0, 0, 0, 0, 0, 0, 0,
2193 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2194 
2195 	memcpy(p, format_pg, sizeof(format_pg));
2196 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2197 	put_unaligned_be16(sdebug_sector_size, p + 12);
2198 	if (sdebug_removable)
2199 		p[20] |= 0x20; /* should agree with INQUIRY */
2200 	if (1 == pcontrol)
2201 		memset(p + 2, 0, sizeof(format_pg) - 2);
2202 	return sizeof(format_pg);
2203 }
2204 
2205 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2206 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2207 				     0, 0, 0, 0};
2208 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2209 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2210 { 	/* Caching page for mode_sense */
2211 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2212 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2213 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2214 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2215 
2216 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2217 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2218 	memcpy(p, caching_pg, sizeof(caching_pg));
2219 	if (1 == pcontrol)
2220 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2221 	else if (2 == pcontrol)
2222 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2223 	return sizeof(caching_pg);
2224 }
2225 
2226 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2227 				    0, 0, 0x2, 0x4b};
2228 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2229 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2230 { 	/* Control mode page for mode_sense */
2231 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2232 					0, 0, 0, 0};
2233 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2234 				     0, 0, 0x2, 0x4b};
2235 
2236 	if (sdebug_dsense)
2237 		ctrl_m_pg[2] |= 0x4;
2238 	else
2239 		ctrl_m_pg[2] &= ~0x4;
2240 
2241 	if (sdebug_ato)
2242 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2243 
2244 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2245 	if (1 == pcontrol)
2246 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2247 	else if (2 == pcontrol)
2248 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2249 	return sizeof(ctrl_m_pg);
2250 }
2251 
2252 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2253 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2254 {	/* Informational Exceptions control mode page for mode_sense */
2255 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2256 				       0, 0, 0x0, 0x0};
2257 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2258 				      0, 0, 0x0, 0x0};
2259 
2260 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2261 	if (1 == pcontrol)
2262 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2263 	else if (2 == pcontrol)
2264 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2265 	return sizeof(iec_m_pg);
2266 }
2267 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2268 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2269 {	/* SAS SSP mode page - short format for mode_sense */
2270 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2271 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2272 
2273 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2274 	if (1 == pcontrol)
2275 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2276 	return sizeof(sas_sf_m_pg);
2277 }
2278 
2279 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2280 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2281 			      int target_dev_id)
2282 {	/* SAS phy control and discover mode page for mode_sense */
2283 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2284 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2285 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2286 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2287 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2288 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2289 		    0, 0, 0, 0, 0, 0, 0, 0,
2290 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2291 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2292 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2293 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2294 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2295 		    0, 0, 0, 0, 0, 0, 0, 0,
2296 		};
2297 	int port_a, port_b;
2298 
2299 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2300 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2301 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2302 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2303 	port_a = target_dev_id + 1;
2304 	port_b = port_a + 1;
2305 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2306 	put_unaligned_be32(port_a, p + 20);
2307 	put_unaligned_be32(port_b, p + 48 + 20);
2308 	if (1 == pcontrol)
2309 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2310 	return sizeof(sas_pcd_m_pg);
2311 }
2312 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2313 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2314 {	/* SAS SSP shared protocol specific port mode subpage */
2315 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2316 		    0, 0, 0, 0, 0, 0, 0, 0,
2317 		};
2318 
2319 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2320 	if (1 == pcontrol)
2321 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2322 	return sizeof(sas_sha_m_pg);
2323 }
2324 
2325 #define SDEBUG_MAX_MSENSE_SZ 256
2326 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2327 static int resp_mode_sense(struct scsi_cmnd *scp,
2328 			   struct sdebug_dev_info *devip)
2329 {
2330 	int pcontrol, pcode, subpcode, bd_len;
2331 	unsigned char dev_spec;
2332 	u32 alloc_len, offset, len;
2333 	int target_dev_id;
2334 	int target = scp->device->id;
2335 	unsigned char *ap;
2336 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2337 	unsigned char *cmd = scp->cmnd;
2338 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2339 
2340 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2341 	pcontrol = (cmd[2] & 0xc0) >> 6;
2342 	pcode = cmd[2] & 0x3f;
2343 	subpcode = cmd[3];
2344 	msense_6 = (MODE_SENSE == cmd[0]);
2345 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2346 	is_disk = (sdebug_ptype == TYPE_DISK);
2347 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2348 	if ((is_disk || is_zbc) && !dbd)
2349 		bd_len = llbaa ? 16 : 8;
2350 	else
2351 		bd_len = 0;
2352 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2353 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2354 	if (0x3 == pcontrol) {  /* Saving values not supported */
2355 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2356 		return check_condition_result;
2357 	}
2358 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2359 			(devip->target * 1000) - 3;
2360 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2361 	if (is_disk || is_zbc) {
2362 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2363 		if (sdebug_wp)
2364 			dev_spec |= 0x80;
2365 	} else
2366 		dev_spec = 0x0;
2367 	if (msense_6) {
2368 		arr[2] = dev_spec;
2369 		arr[3] = bd_len;
2370 		offset = 4;
2371 	} else {
2372 		arr[3] = dev_spec;
2373 		if (16 == bd_len)
2374 			arr[4] = 0x1;	/* set LONGLBA bit */
2375 		arr[7] = bd_len;	/* assume 255 or less */
2376 		offset = 8;
2377 	}
2378 	ap = arr + offset;
2379 	if ((bd_len > 0) && (!sdebug_capacity))
2380 		sdebug_capacity = get_sdebug_capacity();
2381 
2382 	if (8 == bd_len) {
2383 		if (sdebug_capacity > 0xfffffffe)
2384 			put_unaligned_be32(0xffffffff, ap + 0);
2385 		else
2386 			put_unaligned_be32(sdebug_capacity, ap + 0);
2387 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2388 		offset += bd_len;
2389 		ap = arr + offset;
2390 	} else if (16 == bd_len) {
2391 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2392 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2393 		offset += bd_len;
2394 		ap = arr + offset;
2395 	}
2396 
2397 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2398 		/* TODO: Control Extension page */
2399 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2400 		return check_condition_result;
2401 	}
2402 	bad_pcode = false;
2403 
2404 	switch (pcode) {
2405 	case 0x1:	/* Read-Write error recovery page, direct access */
2406 		len = resp_err_recov_pg(ap, pcontrol, target);
2407 		offset += len;
2408 		break;
2409 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2410 		len = resp_disconnect_pg(ap, pcontrol, target);
2411 		offset += len;
2412 		break;
2413 	case 0x3:       /* Format device page, direct access */
2414 		if (is_disk) {
2415 			len = resp_format_pg(ap, pcontrol, target);
2416 			offset += len;
2417 		} else
2418 			bad_pcode = true;
2419 		break;
2420 	case 0x8:	/* Caching page, direct access */
2421 		if (is_disk || is_zbc) {
2422 			len = resp_caching_pg(ap, pcontrol, target);
2423 			offset += len;
2424 		} else
2425 			bad_pcode = true;
2426 		break;
2427 	case 0xa:	/* Control Mode page, all devices */
2428 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2429 		offset += len;
2430 		break;
2431 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2432 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2433 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2434 			return check_condition_result;
2435 		}
2436 		len = 0;
2437 		if ((0x0 == subpcode) || (0xff == subpcode))
2438 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2439 		if ((0x1 == subpcode) || (0xff == subpcode))
2440 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2441 						  target_dev_id);
2442 		if ((0x2 == subpcode) || (0xff == subpcode))
2443 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2444 		offset += len;
2445 		break;
2446 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2447 		len = resp_iec_m_pg(ap, pcontrol, target);
2448 		offset += len;
2449 		break;
2450 	case 0x3f:	/* Read all Mode pages */
2451 		if ((0 == subpcode) || (0xff == subpcode)) {
2452 			len = resp_err_recov_pg(ap, pcontrol, target);
2453 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2454 			if (is_disk) {
2455 				len += resp_format_pg(ap + len, pcontrol,
2456 						      target);
2457 				len += resp_caching_pg(ap + len, pcontrol,
2458 						       target);
2459 			} else if (is_zbc) {
2460 				len += resp_caching_pg(ap + len, pcontrol,
2461 						       target);
2462 			}
2463 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2464 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2465 			if (0xff == subpcode) {
2466 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2467 						  target, target_dev_id);
2468 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2469 			}
2470 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2471 			offset += len;
2472 		} else {
2473 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2474 			return check_condition_result;
2475 		}
2476 		break;
2477 	default:
2478 		bad_pcode = true;
2479 		break;
2480 	}
2481 	if (bad_pcode) {
2482 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2483 		return check_condition_result;
2484 	}
2485 	if (msense_6)
2486 		arr[0] = offset - 1;
2487 	else
2488 		put_unaligned_be16((offset - 2), arr + 0);
2489 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2490 }
2491 
2492 #define SDEBUG_MAX_MSELECT_SZ 512
2493 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2494 static int resp_mode_select(struct scsi_cmnd *scp,
2495 			    struct sdebug_dev_info *devip)
2496 {
2497 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2498 	int param_len, res, mpage;
2499 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2500 	unsigned char *cmd = scp->cmnd;
2501 	int mselect6 = (MODE_SELECT == cmd[0]);
2502 
2503 	memset(arr, 0, sizeof(arr));
2504 	pf = cmd[1] & 0x10;
2505 	sp = cmd[1] & 0x1;
2506 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2507 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2508 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2509 		return check_condition_result;
2510 	}
2511 	res = fetch_to_dev_buffer(scp, arr, param_len);
2512 	if (-1 == res)
2513 		return DID_ERROR << 16;
2514 	else if (sdebug_verbose && (res < param_len))
2515 		sdev_printk(KERN_INFO, scp->device,
2516 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2517 			    __func__, param_len, res);
2518 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2519 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2520 	off = bd_len + (mselect6 ? 4 : 8);
2521 	if (md_len > 2 || off >= res) {
2522 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2523 		return check_condition_result;
2524 	}
2525 	mpage = arr[off] & 0x3f;
2526 	ps = !!(arr[off] & 0x80);
2527 	if (ps) {
2528 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2529 		return check_condition_result;
2530 	}
2531 	spf = !!(arr[off] & 0x40);
2532 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2533 		       (arr[off + 1] + 2);
2534 	if ((pg_len + off) > param_len) {
2535 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2536 				PARAMETER_LIST_LENGTH_ERR, 0);
2537 		return check_condition_result;
2538 	}
2539 	switch (mpage) {
2540 	case 0x8:      /* Caching Mode page */
2541 		if (caching_pg[1] == arr[off + 1]) {
2542 			memcpy(caching_pg + 2, arr + off + 2,
2543 			       sizeof(caching_pg) - 2);
2544 			goto set_mode_changed_ua;
2545 		}
2546 		break;
2547 	case 0xa:      /* Control Mode page */
2548 		if (ctrl_m_pg[1] == arr[off + 1]) {
2549 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2550 			       sizeof(ctrl_m_pg) - 2);
2551 			if (ctrl_m_pg[4] & 0x8)
2552 				sdebug_wp = true;
2553 			else
2554 				sdebug_wp = false;
2555 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2556 			goto set_mode_changed_ua;
2557 		}
2558 		break;
2559 	case 0x1c:      /* Informational Exceptions Mode page */
2560 		if (iec_m_pg[1] == arr[off + 1]) {
2561 			memcpy(iec_m_pg + 2, arr + off + 2,
2562 			       sizeof(iec_m_pg) - 2);
2563 			goto set_mode_changed_ua;
2564 		}
2565 		break;
2566 	default:
2567 		break;
2568 	}
2569 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2570 	return check_condition_result;
2571 set_mode_changed_ua:
2572 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2573 	return 0;
2574 }
2575 
resp_temp_l_pg(unsigned char * arr)2576 static int resp_temp_l_pg(unsigned char *arr)
2577 {
2578 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2579 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2580 		};
2581 
2582 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2583 	return sizeof(temp_l_pg);
2584 }
2585 
resp_ie_l_pg(unsigned char * arr)2586 static int resp_ie_l_pg(unsigned char *arr)
2587 {
2588 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2589 		};
2590 
2591 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2592 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2593 		arr[4] = THRESHOLD_EXCEEDED;
2594 		arr[5] = 0xff;
2595 	}
2596 	return sizeof(ie_l_pg);
2597 }
2598 
resp_env_rep_l_spg(unsigned char * arr)2599 static int resp_env_rep_l_spg(unsigned char *arr)
2600 {
2601 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2602 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2603 					 0x1, 0x0, 0x23, 0x8,
2604 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2605 		};
2606 
2607 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2608 	return sizeof(env_rep_l_spg);
2609 }
2610 
2611 #define SDEBUG_MAX_LSENSE_SZ 512
2612 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2613 static int resp_log_sense(struct scsi_cmnd *scp,
2614 			  struct sdebug_dev_info *devip)
2615 {
2616 	int ppc, sp, pcode, subpcode;
2617 	u32 alloc_len, len, n;
2618 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2619 	unsigned char *cmd = scp->cmnd;
2620 
2621 	memset(arr, 0, sizeof(arr));
2622 	ppc = cmd[1] & 0x2;
2623 	sp = cmd[1] & 0x1;
2624 	if (ppc || sp) {
2625 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2626 		return check_condition_result;
2627 	}
2628 	pcode = cmd[2] & 0x3f;
2629 	subpcode = cmd[3] & 0xff;
2630 	alloc_len = get_unaligned_be16(cmd + 7);
2631 	arr[0] = pcode;
2632 	if (0 == subpcode) {
2633 		switch (pcode) {
2634 		case 0x0:	/* Supported log pages log page */
2635 			n = 4;
2636 			arr[n++] = 0x0;		/* this page */
2637 			arr[n++] = 0xd;		/* Temperature */
2638 			arr[n++] = 0x2f;	/* Informational exceptions */
2639 			arr[3] = n - 4;
2640 			break;
2641 		case 0xd:	/* Temperature log page */
2642 			arr[3] = resp_temp_l_pg(arr + 4);
2643 			break;
2644 		case 0x2f:	/* Informational exceptions log page */
2645 			arr[3] = resp_ie_l_pg(arr + 4);
2646 			break;
2647 		default:
2648 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2649 			return check_condition_result;
2650 		}
2651 	} else if (0xff == subpcode) {
2652 		arr[0] |= 0x40;
2653 		arr[1] = subpcode;
2654 		switch (pcode) {
2655 		case 0x0:	/* Supported log pages and subpages log page */
2656 			n = 4;
2657 			arr[n++] = 0x0;
2658 			arr[n++] = 0x0;		/* 0,0 page */
2659 			arr[n++] = 0x0;
2660 			arr[n++] = 0xff;	/* this page */
2661 			arr[n++] = 0xd;
2662 			arr[n++] = 0x0;		/* Temperature */
2663 			arr[n++] = 0xd;
2664 			arr[n++] = 0x1;		/* Environment reporting */
2665 			arr[n++] = 0xd;
2666 			arr[n++] = 0xff;	/* all 0xd subpages */
2667 			arr[n++] = 0x2f;
2668 			arr[n++] = 0x0;	/* Informational exceptions */
2669 			arr[n++] = 0x2f;
2670 			arr[n++] = 0xff;	/* all 0x2f subpages */
2671 			arr[3] = n - 4;
2672 			break;
2673 		case 0xd:	/* Temperature subpages */
2674 			n = 4;
2675 			arr[n++] = 0xd;
2676 			arr[n++] = 0x0;		/* Temperature */
2677 			arr[n++] = 0xd;
2678 			arr[n++] = 0x1;		/* Environment reporting */
2679 			arr[n++] = 0xd;
2680 			arr[n++] = 0xff;	/* these subpages */
2681 			arr[3] = n - 4;
2682 			break;
2683 		case 0x2f:	/* Informational exceptions subpages */
2684 			n = 4;
2685 			arr[n++] = 0x2f;
2686 			arr[n++] = 0x0;		/* Informational exceptions */
2687 			arr[n++] = 0x2f;
2688 			arr[n++] = 0xff;	/* these subpages */
2689 			arr[3] = n - 4;
2690 			break;
2691 		default:
2692 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2693 			return check_condition_result;
2694 		}
2695 	} else if (subpcode > 0) {
2696 		arr[0] |= 0x40;
2697 		arr[1] = subpcode;
2698 		if (pcode == 0xd && subpcode == 1)
2699 			arr[3] = resp_env_rep_l_spg(arr + 4);
2700 		else {
2701 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2702 			return check_condition_result;
2703 		}
2704 	} else {
2705 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2706 		return check_condition_result;
2707 	}
2708 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2709 	return fill_from_dev_buffer(scp, arr,
2710 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2711 }
2712 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)2713 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2714 {
2715 	return devip->nr_zones != 0;
2716 }
2717 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)2718 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2719 					unsigned long long lba)
2720 {
2721 	u32 zno = div_u64(lba, devip->zsize);
2722 	struct sdeb_zone_state *zsp;
2723 
2724 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2725 		return &devip->zstate[zno];
2726 
2727 	/*
2728 	 * If the zone capacity is less than the zone size, adjust for gap
2729 	 * zones.
2730 	 */
2731 	zno = 2 * zno - devip->nr_conv_zones;
2732 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2733 	zsp = &devip->zstate[zno];
2734 	if (lba >= zsp->z_start + zsp->z_size)
2735 		zsp++;
2736 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2737 	return zsp;
2738 }
2739 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)2740 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2741 {
2742 	return zsp->z_type == ZBC_ZTYPE_CNV;
2743 }
2744 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)2745 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2746 {
2747 	return zsp->z_type == ZBC_ZTYPE_GAP;
2748 }
2749 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)2750 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2751 {
2752 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2753 }
2754 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2755 static void zbc_close_zone(struct sdebug_dev_info *devip,
2756 			   struct sdeb_zone_state *zsp)
2757 {
2758 	enum sdebug_z_cond zc;
2759 
2760 	if (!zbc_zone_is_seq(zsp))
2761 		return;
2762 
2763 	zc = zsp->z_cond;
2764 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2765 		return;
2766 
2767 	if (zc == ZC2_IMPLICIT_OPEN)
2768 		devip->nr_imp_open--;
2769 	else
2770 		devip->nr_exp_open--;
2771 
2772 	if (zsp->z_wp == zsp->z_start) {
2773 		zsp->z_cond = ZC1_EMPTY;
2774 	} else {
2775 		zsp->z_cond = ZC4_CLOSED;
2776 		devip->nr_closed++;
2777 	}
2778 }
2779 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)2780 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2781 {
2782 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2783 	unsigned int i;
2784 
2785 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2786 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2787 			zbc_close_zone(devip, zsp);
2788 			return;
2789 		}
2790 	}
2791 }
2792 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)2793 static void zbc_open_zone(struct sdebug_dev_info *devip,
2794 			  struct sdeb_zone_state *zsp, bool explicit)
2795 {
2796 	enum sdebug_z_cond zc;
2797 
2798 	if (!zbc_zone_is_seq(zsp))
2799 		return;
2800 
2801 	zc = zsp->z_cond;
2802 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2803 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2804 		return;
2805 
2806 	/* Close an implicit open zone if necessary */
2807 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2808 		zbc_close_zone(devip, zsp);
2809 	else if (devip->max_open &&
2810 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2811 		zbc_close_imp_open_zone(devip);
2812 
2813 	if (zsp->z_cond == ZC4_CLOSED)
2814 		devip->nr_closed--;
2815 	if (explicit) {
2816 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2817 		devip->nr_exp_open++;
2818 	} else {
2819 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2820 		devip->nr_imp_open++;
2821 	}
2822 }
2823 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2824 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2825 				     struct sdeb_zone_state *zsp)
2826 {
2827 	switch (zsp->z_cond) {
2828 	case ZC2_IMPLICIT_OPEN:
2829 		devip->nr_imp_open--;
2830 		break;
2831 	case ZC3_EXPLICIT_OPEN:
2832 		devip->nr_exp_open--;
2833 		break;
2834 	default:
2835 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2836 			  zsp->z_start, zsp->z_cond);
2837 		break;
2838 	}
2839 	zsp->z_cond = ZC5_FULL;
2840 }
2841 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)2842 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2843 		       unsigned long long lba, unsigned int num)
2844 {
2845 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2846 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2847 
2848 	if (!zbc_zone_is_seq(zsp))
2849 		return;
2850 
2851 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2852 		zsp->z_wp += num;
2853 		if (zsp->z_wp >= zend)
2854 			zbc_set_zone_full(devip, zsp);
2855 		return;
2856 	}
2857 
2858 	while (num) {
2859 		if (lba != zsp->z_wp)
2860 			zsp->z_non_seq_resource = true;
2861 
2862 		end = lba + num;
2863 		if (end >= zend) {
2864 			n = zend - lba;
2865 			zsp->z_wp = zend;
2866 		} else if (end > zsp->z_wp) {
2867 			n = num;
2868 			zsp->z_wp = end;
2869 		} else {
2870 			n = num;
2871 		}
2872 		if (zsp->z_wp >= zend)
2873 			zbc_set_zone_full(devip, zsp);
2874 
2875 		num -= n;
2876 		lba += n;
2877 		if (num) {
2878 			zsp++;
2879 			zend = zsp->z_start + zsp->z_size;
2880 		}
2881 	}
2882 }
2883 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2884 static int check_zbc_access_params(struct scsi_cmnd *scp,
2885 			unsigned long long lba, unsigned int num, bool write)
2886 {
2887 	struct scsi_device *sdp = scp->device;
2888 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2889 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2890 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2891 
2892 	if (!write) {
2893 		if (devip->zmodel == BLK_ZONED_HA)
2894 			return 0;
2895 		/* For host-managed, reads cannot cross zone types boundaries */
2896 		if (zsp->z_type != zsp_end->z_type) {
2897 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2898 					LBA_OUT_OF_RANGE,
2899 					READ_INVDATA_ASCQ);
2900 			return check_condition_result;
2901 		}
2902 		return 0;
2903 	}
2904 
2905 	/* Writing into a gap zone is not allowed */
2906 	if (zbc_zone_is_gap(zsp)) {
2907 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2908 				ATTEMPT_ACCESS_GAP);
2909 		return check_condition_result;
2910 	}
2911 
2912 	/* No restrictions for writes within conventional zones */
2913 	if (zbc_zone_is_conv(zsp)) {
2914 		if (!zbc_zone_is_conv(zsp_end)) {
2915 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2916 					LBA_OUT_OF_RANGE,
2917 					WRITE_BOUNDARY_ASCQ);
2918 			return check_condition_result;
2919 		}
2920 		return 0;
2921 	}
2922 
2923 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2924 		/* Writes cannot cross sequential zone boundaries */
2925 		if (zsp_end != zsp) {
2926 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2927 					LBA_OUT_OF_RANGE,
2928 					WRITE_BOUNDARY_ASCQ);
2929 			return check_condition_result;
2930 		}
2931 		/* Cannot write full zones */
2932 		if (zsp->z_cond == ZC5_FULL) {
2933 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2934 					INVALID_FIELD_IN_CDB, 0);
2935 			return check_condition_result;
2936 		}
2937 		/* Writes must be aligned to the zone WP */
2938 		if (lba != zsp->z_wp) {
2939 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2940 					LBA_OUT_OF_RANGE,
2941 					UNALIGNED_WRITE_ASCQ);
2942 			return check_condition_result;
2943 		}
2944 	}
2945 
2946 	/* Handle implicit open of closed and empty zones */
2947 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2948 		if (devip->max_open &&
2949 		    devip->nr_exp_open >= devip->max_open) {
2950 			mk_sense_buffer(scp, DATA_PROTECT,
2951 					INSUFF_RES_ASC,
2952 					INSUFF_ZONE_ASCQ);
2953 			return check_condition_result;
2954 		}
2955 		zbc_open_zone(devip, zsp, false);
2956 	}
2957 
2958 	return 0;
2959 }
2960 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2961 static inline int check_device_access_params
2962 			(struct scsi_cmnd *scp, unsigned long long lba,
2963 			 unsigned int num, bool write)
2964 {
2965 	struct scsi_device *sdp = scp->device;
2966 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2967 
2968 	if (lba + num > sdebug_capacity) {
2969 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2970 		return check_condition_result;
2971 	}
2972 	/* transfer length excessive (tie in to block limits VPD page) */
2973 	if (num > sdebug_store_sectors) {
2974 		/* needs work to find which cdb byte 'num' comes from */
2975 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2976 		return check_condition_result;
2977 	}
2978 	if (write && unlikely(sdebug_wp)) {
2979 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2980 		return check_condition_result;
2981 	}
2982 	if (sdebug_dev_is_zoned(devip))
2983 		return check_zbc_access_params(scp, lba, num, write);
2984 
2985 	return 0;
2986 }
2987 
2988 /*
2989  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2990  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2991  * that access any of the "stores" in struct sdeb_store_info should call this
2992  * function with bug_if_fake_rw set to true.
2993  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)2994 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2995 						bool bug_if_fake_rw)
2996 {
2997 	if (sdebug_fake_rw) {
2998 		BUG_ON(bug_if_fake_rw);	/* See note above */
2999 		return NULL;
3000 	}
3001 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3002 }
3003 
3004 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,bool do_write)3005 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3006 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3007 {
3008 	int ret;
3009 	u64 block, rest = 0;
3010 	enum dma_data_direction dir;
3011 	struct scsi_data_buffer *sdb = &scp->sdb;
3012 	u8 *fsp;
3013 
3014 	if (do_write) {
3015 		dir = DMA_TO_DEVICE;
3016 		write_since_sync = true;
3017 	} else {
3018 		dir = DMA_FROM_DEVICE;
3019 	}
3020 
3021 	if (!sdb->length || !sip)
3022 		return 0;
3023 	if (scp->sc_data_direction != dir)
3024 		return -1;
3025 	fsp = sip->storep;
3026 
3027 	block = do_div(lba, sdebug_store_sectors);
3028 	if (block + num > sdebug_store_sectors)
3029 		rest = block + num - sdebug_store_sectors;
3030 
3031 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3032 		   fsp + (block * sdebug_sector_size),
3033 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3034 	if (ret != (num - rest) * sdebug_sector_size)
3035 		return ret;
3036 
3037 	if (rest) {
3038 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3039 			    fsp, rest * sdebug_sector_size,
3040 			    sg_skip + ((num - rest) * sdebug_sector_size),
3041 			    do_write);
3042 	}
3043 
3044 	return ret;
3045 }
3046 
3047 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3048 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3049 {
3050 	struct scsi_data_buffer *sdb = &scp->sdb;
3051 
3052 	if (!sdb->length)
3053 		return 0;
3054 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3055 		return -1;
3056 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3057 			      num * sdebug_sector_size, 0, true);
3058 }
3059 
3060 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3061  * arr into sip->storep+lba and return true. If comparison fails then
3062  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3063 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3064 			      const u8 *arr, bool compare_only)
3065 {
3066 	bool res;
3067 	u64 block, rest = 0;
3068 	u32 store_blks = sdebug_store_sectors;
3069 	u32 lb_size = sdebug_sector_size;
3070 	u8 *fsp = sip->storep;
3071 
3072 	block = do_div(lba, store_blks);
3073 	if (block + num > store_blks)
3074 		rest = block + num - store_blks;
3075 
3076 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3077 	if (!res)
3078 		return res;
3079 	if (rest)
3080 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3081 			     rest * lb_size);
3082 	if (!res)
3083 		return res;
3084 	if (compare_only)
3085 		return true;
3086 	arr += num * lb_size;
3087 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3088 	if (rest)
3089 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3090 	return res;
3091 }
3092 
dif_compute_csum(const void * buf,int len)3093 static __be16 dif_compute_csum(const void *buf, int len)
3094 {
3095 	__be16 csum;
3096 
3097 	if (sdebug_guard)
3098 		csum = (__force __be16)ip_compute_csum(buf, len);
3099 	else
3100 		csum = cpu_to_be16(crc_t10dif(buf, len));
3101 
3102 	return csum;
3103 }
3104 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3105 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3106 		      sector_t sector, u32 ei_lba)
3107 {
3108 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3109 
3110 	if (sdt->guard_tag != csum) {
3111 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3112 			(unsigned long)sector,
3113 			be16_to_cpu(sdt->guard_tag),
3114 			be16_to_cpu(csum));
3115 		return 0x01;
3116 	}
3117 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3118 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3119 		pr_err("REF check failed on sector %lu\n",
3120 			(unsigned long)sector);
3121 		return 0x03;
3122 	}
3123 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3124 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3125 		pr_err("REF check failed on sector %lu\n",
3126 			(unsigned long)sector);
3127 		return 0x03;
3128 	}
3129 	return 0;
3130 }
3131 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3132 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3133 			  unsigned int sectors, bool read)
3134 {
3135 	size_t resid;
3136 	void *paddr;
3137 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3138 						scp->device->hostdata, true);
3139 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3140 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3141 	struct sg_mapping_iter miter;
3142 
3143 	/* Bytes of protection data to copy into sgl */
3144 	resid = sectors * sizeof(*dif_storep);
3145 
3146 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3147 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3148 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3149 
3150 	while (sg_miter_next(&miter) && resid > 0) {
3151 		size_t len = min_t(size_t, miter.length, resid);
3152 		void *start = dif_store(sip, sector);
3153 		size_t rest = 0;
3154 
3155 		if (dif_store_end < start + len)
3156 			rest = start + len - dif_store_end;
3157 
3158 		paddr = miter.addr;
3159 
3160 		if (read)
3161 			memcpy(paddr, start, len - rest);
3162 		else
3163 			memcpy(start, paddr, len - rest);
3164 
3165 		if (rest) {
3166 			if (read)
3167 				memcpy(paddr + len - rest, dif_storep, rest);
3168 			else
3169 				memcpy(dif_storep, paddr + len - rest, rest);
3170 		}
3171 
3172 		sector += len / sizeof(*dif_storep);
3173 		resid -= len;
3174 	}
3175 	sg_miter_stop(&miter);
3176 }
3177 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3178 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3179 			    unsigned int sectors, u32 ei_lba)
3180 {
3181 	int ret = 0;
3182 	unsigned int i;
3183 	sector_t sector;
3184 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3185 						scp->device->hostdata, true);
3186 	struct t10_pi_tuple *sdt;
3187 
3188 	for (i = 0; i < sectors; i++, ei_lba++) {
3189 		sector = start_sec + i;
3190 		sdt = dif_store(sip, sector);
3191 
3192 		if (sdt->app_tag == cpu_to_be16(0xffff))
3193 			continue;
3194 
3195 		/*
3196 		 * Because scsi_debug acts as both initiator and
3197 		 * target we proceed to verify the PI even if
3198 		 * RDPROTECT=3. This is done so the "initiator" knows
3199 		 * which type of error to return. Otherwise we would
3200 		 * have to iterate over the PI twice.
3201 		 */
3202 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3203 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3204 					 sector, ei_lba);
3205 			if (ret) {
3206 				dif_errors++;
3207 				break;
3208 			}
3209 		}
3210 	}
3211 
3212 	dif_copy_prot(scp, start_sec, sectors, true);
3213 	dix_reads++;
3214 
3215 	return ret;
3216 }
3217 
3218 static inline void
sdeb_read_lock(struct sdeb_store_info * sip)3219 sdeb_read_lock(struct sdeb_store_info *sip)
3220 {
3221 	if (sdebug_no_rwlock) {
3222 		if (sip)
3223 			__acquire(&sip->macc_lck);
3224 		else
3225 			__acquire(&sdeb_fake_rw_lck);
3226 	} else {
3227 		if (sip)
3228 			read_lock(&sip->macc_lck);
3229 		else
3230 			read_lock(&sdeb_fake_rw_lck);
3231 	}
3232 }
3233 
3234 static inline void
sdeb_read_unlock(struct sdeb_store_info * sip)3235 sdeb_read_unlock(struct sdeb_store_info *sip)
3236 {
3237 	if (sdebug_no_rwlock) {
3238 		if (sip)
3239 			__release(&sip->macc_lck);
3240 		else
3241 			__release(&sdeb_fake_rw_lck);
3242 	} else {
3243 		if (sip)
3244 			read_unlock(&sip->macc_lck);
3245 		else
3246 			read_unlock(&sdeb_fake_rw_lck);
3247 	}
3248 }
3249 
3250 static inline void
sdeb_write_lock(struct sdeb_store_info * sip)3251 sdeb_write_lock(struct sdeb_store_info *sip)
3252 {
3253 	if (sdebug_no_rwlock) {
3254 		if (sip)
3255 			__acquire(&sip->macc_lck);
3256 		else
3257 			__acquire(&sdeb_fake_rw_lck);
3258 	} else {
3259 		if (sip)
3260 			write_lock(&sip->macc_lck);
3261 		else
3262 			write_lock(&sdeb_fake_rw_lck);
3263 	}
3264 }
3265 
3266 static inline void
sdeb_write_unlock(struct sdeb_store_info * sip)3267 sdeb_write_unlock(struct sdeb_store_info *sip)
3268 {
3269 	if (sdebug_no_rwlock) {
3270 		if (sip)
3271 			__release(&sip->macc_lck);
3272 		else
3273 			__release(&sdeb_fake_rw_lck);
3274 	} else {
3275 		if (sip)
3276 			write_unlock(&sip->macc_lck);
3277 		else
3278 			write_unlock(&sdeb_fake_rw_lck);
3279 	}
3280 }
3281 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3282 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3283 {
3284 	bool check_prot;
3285 	u32 num;
3286 	u32 ei_lba;
3287 	int ret;
3288 	u64 lba;
3289 	struct sdeb_store_info *sip = devip2sip(devip, true);
3290 	u8 *cmd = scp->cmnd;
3291 
3292 	switch (cmd[0]) {
3293 	case READ_16:
3294 		ei_lba = 0;
3295 		lba = get_unaligned_be64(cmd + 2);
3296 		num = get_unaligned_be32(cmd + 10);
3297 		check_prot = true;
3298 		break;
3299 	case READ_10:
3300 		ei_lba = 0;
3301 		lba = get_unaligned_be32(cmd + 2);
3302 		num = get_unaligned_be16(cmd + 7);
3303 		check_prot = true;
3304 		break;
3305 	case READ_6:
3306 		ei_lba = 0;
3307 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3308 		      (u32)(cmd[1] & 0x1f) << 16;
3309 		num = (0 == cmd[4]) ? 256 : cmd[4];
3310 		check_prot = true;
3311 		break;
3312 	case READ_12:
3313 		ei_lba = 0;
3314 		lba = get_unaligned_be32(cmd + 2);
3315 		num = get_unaligned_be32(cmd + 6);
3316 		check_prot = true;
3317 		break;
3318 	case XDWRITEREAD_10:
3319 		ei_lba = 0;
3320 		lba = get_unaligned_be32(cmd + 2);
3321 		num = get_unaligned_be16(cmd + 7);
3322 		check_prot = false;
3323 		break;
3324 	default:	/* assume READ(32) */
3325 		lba = get_unaligned_be64(cmd + 12);
3326 		ei_lba = get_unaligned_be32(cmd + 20);
3327 		num = get_unaligned_be32(cmd + 28);
3328 		check_prot = false;
3329 		break;
3330 	}
3331 	if (unlikely(have_dif_prot && check_prot)) {
3332 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3333 		    (cmd[1] & 0xe0)) {
3334 			mk_sense_invalid_opcode(scp);
3335 			return check_condition_result;
3336 		}
3337 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3338 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3339 		    (cmd[1] & 0xe0) == 0)
3340 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3341 				    "to DIF device\n");
3342 	}
3343 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3344 		     atomic_read(&sdeb_inject_pending))) {
3345 		num /= 2;
3346 		atomic_set(&sdeb_inject_pending, 0);
3347 	}
3348 
3349 	ret = check_device_access_params(scp, lba, num, false);
3350 	if (ret)
3351 		return ret;
3352 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3353 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3354 		     ((lba + num) > sdebug_medium_error_start))) {
3355 		/* claim unrecoverable read error */
3356 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3357 		/* set info field and valid bit for fixed descriptor */
3358 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3359 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3360 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3361 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3362 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3363 		}
3364 		scsi_set_resid(scp, scsi_bufflen(scp));
3365 		return check_condition_result;
3366 	}
3367 
3368 	sdeb_read_lock(sip);
3369 
3370 	/* DIX + T10 DIF */
3371 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3372 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3373 		case 1: /* Guard tag error */
3374 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3375 				sdeb_read_unlock(sip);
3376 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3377 				return check_condition_result;
3378 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3379 				sdeb_read_unlock(sip);
3380 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3381 				return illegal_condition_result;
3382 			}
3383 			break;
3384 		case 3: /* Reference tag error */
3385 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3386 				sdeb_read_unlock(sip);
3387 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3388 				return check_condition_result;
3389 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3390 				sdeb_read_unlock(sip);
3391 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3392 				return illegal_condition_result;
3393 			}
3394 			break;
3395 		}
3396 	}
3397 
3398 	ret = do_device_access(sip, scp, 0, lba, num, false);
3399 	sdeb_read_unlock(sip);
3400 	if (unlikely(ret == -1))
3401 		return DID_ERROR << 16;
3402 
3403 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3404 
3405 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3406 		     atomic_read(&sdeb_inject_pending))) {
3407 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3408 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3409 			atomic_set(&sdeb_inject_pending, 0);
3410 			return check_condition_result;
3411 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3412 			/* Logical block guard check failed */
3413 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3414 			atomic_set(&sdeb_inject_pending, 0);
3415 			return illegal_condition_result;
3416 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3417 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3418 			atomic_set(&sdeb_inject_pending, 0);
3419 			return illegal_condition_result;
3420 		}
3421 	}
3422 	return 0;
3423 }
3424 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)3425 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3426 			     unsigned int sectors, u32 ei_lba)
3427 {
3428 	int ret;
3429 	struct t10_pi_tuple *sdt;
3430 	void *daddr;
3431 	sector_t sector = start_sec;
3432 	int ppage_offset;
3433 	int dpage_offset;
3434 	struct sg_mapping_iter diter;
3435 	struct sg_mapping_iter piter;
3436 
3437 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3438 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3439 
3440 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3441 			scsi_prot_sg_count(SCpnt),
3442 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3443 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3444 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3445 
3446 	/* For each protection page */
3447 	while (sg_miter_next(&piter)) {
3448 		dpage_offset = 0;
3449 		if (WARN_ON(!sg_miter_next(&diter))) {
3450 			ret = 0x01;
3451 			goto out;
3452 		}
3453 
3454 		for (ppage_offset = 0; ppage_offset < piter.length;
3455 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3456 			/* If we're at the end of the current
3457 			 * data page advance to the next one
3458 			 */
3459 			if (dpage_offset >= diter.length) {
3460 				if (WARN_ON(!sg_miter_next(&diter))) {
3461 					ret = 0x01;
3462 					goto out;
3463 				}
3464 				dpage_offset = 0;
3465 			}
3466 
3467 			sdt = piter.addr + ppage_offset;
3468 			daddr = diter.addr + dpage_offset;
3469 
3470 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3471 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3472 				if (ret)
3473 					goto out;
3474 			}
3475 
3476 			sector++;
3477 			ei_lba++;
3478 			dpage_offset += sdebug_sector_size;
3479 		}
3480 		diter.consumed = dpage_offset;
3481 		sg_miter_stop(&diter);
3482 	}
3483 	sg_miter_stop(&piter);
3484 
3485 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3486 	dix_writes++;
3487 
3488 	return 0;
3489 
3490 out:
3491 	dif_errors++;
3492 	sg_miter_stop(&diter);
3493 	sg_miter_stop(&piter);
3494 	return ret;
3495 }
3496 
lba_to_map_index(sector_t lba)3497 static unsigned long lba_to_map_index(sector_t lba)
3498 {
3499 	if (sdebug_unmap_alignment)
3500 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3501 	sector_div(lba, sdebug_unmap_granularity);
3502 	return lba;
3503 }
3504 
map_index_to_lba(unsigned long index)3505 static sector_t map_index_to_lba(unsigned long index)
3506 {
3507 	sector_t lba = index * sdebug_unmap_granularity;
3508 
3509 	if (sdebug_unmap_alignment)
3510 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3511 	return lba;
3512 }
3513 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)3514 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3515 			      unsigned int *num)
3516 {
3517 	sector_t end;
3518 	unsigned int mapped;
3519 	unsigned long index;
3520 	unsigned long next;
3521 
3522 	index = lba_to_map_index(lba);
3523 	mapped = test_bit(index, sip->map_storep);
3524 
3525 	if (mapped)
3526 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3527 	else
3528 		next = find_next_bit(sip->map_storep, map_size, index);
3529 
3530 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3531 	*num = end - lba;
3532 	return mapped;
3533 }
3534 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3535 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3536 		       unsigned int len)
3537 {
3538 	sector_t end = lba + len;
3539 
3540 	while (lba < end) {
3541 		unsigned long index = lba_to_map_index(lba);
3542 
3543 		if (index < map_size)
3544 			set_bit(index, sip->map_storep);
3545 
3546 		lba = map_index_to_lba(index + 1);
3547 	}
3548 }
3549 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3550 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3551 			 unsigned int len)
3552 {
3553 	sector_t end = lba + len;
3554 	u8 *fsp = sip->storep;
3555 
3556 	while (lba < end) {
3557 		unsigned long index = lba_to_map_index(lba);
3558 
3559 		if (lba == map_index_to_lba(index) &&
3560 		    lba + sdebug_unmap_granularity <= end &&
3561 		    index < map_size) {
3562 			clear_bit(index, sip->map_storep);
3563 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3564 				memset(fsp + lba * sdebug_sector_size,
3565 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3566 				       sdebug_sector_size *
3567 				       sdebug_unmap_granularity);
3568 			}
3569 			if (sip->dif_storep) {
3570 				memset(sip->dif_storep + lba, 0xff,
3571 				       sizeof(*sip->dif_storep) *
3572 				       sdebug_unmap_granularity);
3573 			}
3574 		}
3575 		lba = map_index_to_lba(index + 1);
3576 	}
3577 }
3578 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3579 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3580 {
3581 	bool check_prot;
3582 	u32 num;
3583 	u32 ei_lba;
3584 	int ret;
3585 	u64 lba;
3586 	struct sdeb_store_info *sip = devip2sip(devip, true);
3587 	u8 *cmd = scp->cmnd;
3588 
3589 	switch (cmd[0]) {
3590 	case WRITE_16:
3591 		ei_lba = 0;
3592 		lba = get_unaligned_be64(cmd + 2);
3593 		num = get_unaligned_be32(cmd + 10);
3594 		check_prot = true;
3595 		break;
3596 	case WRITE_10:
3597 		ei_lba = 0;
3598 		lba = get_unaligned_be32(cmd + 2);
3599 		num = get_unaligned_be16(cmd + 7);
3600 		check_prot = true;
3601 		break;
3602 	case WRITE_6:
3603 		ei_lba = 0;
3604 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3605 		      (u32)(cmd[1] & 0x1f) << 16;
3606 		num = (0 == cmd[4]) ? 256 : cmd[4];
3607 		check_prot = true;
3608 		break;
3609 	case WRITE_12:
3610 		ei_lba = 0;
3611 		lba = get_unaligned_be32(cmd + 2);
3612 		num = get_unaligned_be32(cmd + 6);
3613 		check_prot = true;
3614 		break;
3615 	case 0x53:	/* XDWRITEREAD(10) */
3616 		ei_lba = 0;
3617 		lba = get_unaligned_be32(cmd + 2);
3618 		num = get_unaligned_be16(cmd + 7);
3619 		check_prot = false;
3620 		break;
3621 	default:	/* assume WRITE(32) */
3622 		lba = get_unaligned_be64(cmd + 12);
3623 		ei_lba = get_unaligned_be32(cmd + 20);
3624 		num = get_unaligned_be32(cmd + 28);
3625 		check_prot = false;
3626 		break;
3627 	}
3628 	if (unlikely(have_dif_prot && check_prot)) {
3629 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3630 		    (cmd[1] & 0xe0)) {
3631 			mk_sense_invalid_opcode(scp);
3632 			return check_condition_result;
3633 		}
3634 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3635 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3636 		    (cmd[1] & 0xe0) == 0)
3637 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3638 				    "to DIF device\n");
3639 	}
3640 
3641 	sdeb_write_lock(sip);
3642 	ret = check_device_access_params(scp, lba, num, true);
3643 	if (ret) {
3644 		sdeb_write_unlock(sip);
3645 		return ret;
3646 	}
3647 
3648 	/* DIX + T10 DIF */
3649 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3650 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3651 		case 1: /* Guard tag error */
3652 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3653 				sdeb_write_unlock(sip);
3654 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3655 				return illegal_condition_result;
3656 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3657 				sdeb_write_unlock(sip);
3658 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3659 				return check_condition_result;
3660 			}
3661 			break;
3662 		case 3: /* Reference tag error */
3663 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3664 				sdeb_write_unlock(sip);
3665 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3666 				return illegal_condition_result;
3667 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3668 				sdeb_write_unlock(sip);
3669 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3670 				return check_condition_result;
3671 			}
3672 			break;
3673 		}
3674 	}
3675 
3676 	ret = do_device_access(sip, scp, 0, lba, num, true);
3677 	if (unlikely(scsi_debug_lbp()))
3678 		map_region(sip, lba, num);
3679 	/* If ZBC zone then bump its write pointer */
3680 	if (sdebug_dev_is_zoned(devip))
3681 		zbc_inc_wp(devip, lba, num);
3682 	sdeb_write_unlock(sip);
3683 	if (unlikely(-1 == ret))
3684 		return DID_ERROR << 16;
3685 	else if (unlikely(sdebug_verbose &&
3686 			  (ret < (num * sdebug_sector_size))))
3687 		sdev_printk(KERN_INFO, scp->device,
3688 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3689 			    my_name, num * sdebug_sector_size, ret);
3690 
3691 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3692 		     atomic_read(&sdeb_inject_pending))) {
3693 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3694 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3695 			atomic_set(&sdeb_inject_pending, 0);
3696 			return check_condition_result;
3697 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3698 			/* Logical block guard check failed */
3699 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3700 			atomic_set(&sdeb_inject_pending, 0);
3701 			return illegal_condition_result;
3702 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3703 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3704 			atomic_set(&sdeb_inject_pending, 0);
3705 			return illegal_condition_result;
3706 		}
3707 	}
3708 	return 0;
3709 }
3710 
3711 /*
3712  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3713  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3714  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3715 static int resp_write_scat(struct scsi_cmnd *scp,
3716 			   struct sdebug_dev_info *devip)
3717 {
3718 	u8 *cmd = scp->cmnd;
3719 	u8 *lrdp = NULL;
3720 	u8 *up;
3721 	struct sdeb_store_info *sip = devip2sip(devip, true);
3722 	u8 wrprotect;
3723 	u16 lbdof, num_lrd, k;
3724 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3725 	u32 lb_size = sdebug_sector_size;
3726 	u32 ei_lba;
3727 	u64 lba;
3728 	int ret, res;
3729 	bool is_16;
3730 	static const u32 lrd_size = 32; /* + parameter list header size */
3731 
3732 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3733 		is_16 = false;
3734 		wrprotect = (cmd[10] >> 5) & 0x7;
3735 		lbdof = get_unaligned_be16(cmd + 12);
3736 		num_lrd = get_unaligned_be16(cmd + 16);
3737 		bt_len = get_unaligned_be32(cmd + 28);
3738 	} else {        /* that leaves WRITE SCATTERED(16) */
3739 		is_16 = true;
3740 		wrprotect = (cmd[2] >> 5) & 0x7;
3741 		lbdof = get_unaligned_be16(cmd + 4);
3742 		num_lrd = get_unaligned_be16(cmd + 8);
3743 		bt_len = get_unaligned_be32(cmd + 10);
3744 		if (unlikely(have_dif_prot)) {
3745 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3746 			    wrprotect) {
3747 				mk_sense_invalid_opcode(scp);
3748 				return illegal_condition_result;
3749 			}
3750 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3751 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3752 			     wrprotect == 0)
3753 				sdev_printk(KERN_ERR, scp->device,
3754 					    "Unprotected WR to DIF device\n");
3755 		}
3756 	}
3757 	if ((num_lrd == 0) || (bt_len == 0))
3758 		return 0;       /* T10 says these do-nothings are not errors */
3759 	if (lbdof == 0) {
3760 		if (sdebug_verbose)
3761 			sdev_printk(KERN_INFO, scp->device,
3762 				"%s: %s: LB Data Offset field bad\n",
3763 				my_name, __func__);
3764 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3765 		return illegal_condition_result;
3766 	}
3767 	lbdof_blen = lbdof * lb_size;
3768 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3769 		if (sdebug_verbose)
3770 			sdev_printk(KERN_INFO, scp->device,
3771 				"%s: %s: LBA range descriptors don't fit\n",
3772 				my_name, __func__);
3773 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3774 		return illegal_condition_result;
3775 	}
3776 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3777 	if (lrdp == NULL)
3778 		return SCSI_MLQUEUE_HOST_BUSY;
3779 	if (sdebug_verbose)
3780 		sdev_printk(KERN_INFO, scp->device,
3781 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3782 			my_name, __func__, lbdof_blen);
3783 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3784 	if (res == -1) {
3785 		ret = DID_ERROR << 16;
3786 		goto err_out;
3787 	}
3788 
3789 	sdeb_write_lock(sip);
3790 	sg_off = lbdof_blen;
3791 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3792 	cum_lb = 0;
3793 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3794 		lba = get_unaligned_be64(up + 0);
3795 		num = get_unaligned_be32(up + 8);
3796 		if (sdebug_verbose)
3797 			sdev_printk(KERN_INFO, scp->device,
3798 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3799 				my_name, __func__, k, lba, num, sg_off);
3800 		if (num == 0)
3801 			continue;
3802 		ret = check_device_access_params(scp, lba, num, true);
3803 		if (ret)
3804 			goto err_out_unlock;
3805 		num_by = num * lb_size;
3806 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3807 
3808 		if ((cum_lb + num) > bt_len) {
3809 			if (sdebug_verbose)
3810 				sdev_printk(KERN_INFO, scp->device,
3811 				    "%s: %s: sum of blocks > data provided\n",
3812 				    my_name, __func__);
3813 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3814 					0);
3815 			ret = illegal_condition_result;
3816 			goto err_out_unlock;
3817 		}
3818 
3819 		/* DIX + T10 DIF */
3820 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3821 			int prot_ret = prot_verify_write(scp, lba, num,
3822 							 ei_lba);
3823 
3824 			if (prot_ret) {
3825 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3826 						prot_ret);
3827 				ret = illegal_condition_result;
3828 				goto err_out_unlock;
3829 			}
3830 		}
3831 
3832 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3833 		/* If ZBC zone then bump its write pointer */
3834 		if (sdebug_dev_is_zoned(devip))
3835 			zbc_inc_wp(devip, lba, num);
3836 		if (unlikely(scsi_debug_lbp()))
3837 			map_region(sip, lba, num);
3838 		if (unlikely(-1 == ret)) {
3839 			ret = DID_ERROR << 16;
3840 			goto err_out_unlock;
3841 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3842 			sdev_printk(KERN_INFO, scp->device,
3843 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3844 			    my_name, num_by, ret);
3845 
3846 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3847 			     atomic_read(&sdeb_inject_pending))) {
3848 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3849 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3850 				atomic_set(&sdeb_inject_pending, 0);
3851 				ret = check_condition_result;
3852 				goto err_out_unlock;
3853 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3854 				/* Logical block guard check failed */
3855 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3856 				atomic_set(&sdeb_inject_pending, 0);
3857 				ret = illegal_condition_result;
3858 				goto err_out_unlock;
3859 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3860 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3861 				atomic_set(&sdeb_inject_pending, 0);
3862 				ret = illegal_condition_result;
3863 				goto err_out_unlock;
3864 			}
3865 		}
3866 		sg_off += num_by;
3867 		cum_lb += num;
3868 	}
3869 	ret = 0;
3870 err_out_unlock:
3871 	sdeb_write_unlock(sip);
3872 err_out:
3873 	kfree(lrdp);
3874 	return ret;
3875 }
3876 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)3877 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3878 			   u32 ei_lba, bool unmap, bool ndob)
3879 {
3880 	struct scsi_device *sdp = scp->device;
3881 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3882 	unsigned long long i;
3883 	u64 block, lbaa;
3884 	u32 lb_size = sdebug_sector_size;
3885 	int ret;
3886 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3887 						scp->device->hostdata, true);
3888 	u8 *fs1p;
3889 	u8 *fsp;
3890 
3891 	sdeb_write_lock(sip);
3892 
3893 	ret = check_device_access_params(scp, lba, num, true);
3894 	if (ret) {
3895 		sdeb_write_unlock(sip);
3896 		return ret;
3897 	}
3898 
3899 	if (unmap && scsi_debug_lbp()) {
3900 		unmap_region(sip, lba, num);
3901 		goto out;
3902 	}
3903 	lbaa = lba;
3904 	block = do_div(lbaa, sdebug_store_sectors);
3905 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3906 	fsp = sip->storep;
3907 	fs1p = fsp + (block * lb_size);
3908 	if (ndob) {
3909 		memset(fs1p, 0, lb_size);
3910 		ret = 0;
3911 	} else
3912 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3913 
3914 	if (-1 == ret) {
3915 		sdeb_write_unlock(sip);
3916 		return DID_ERROR << 16;
3917 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3918 		sdev_printk(KERN_INFO, scp->device,
3919 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3920 			    my_name, "write same", lb_size, ret);
3921 
3922 	/* Copy first sector to remaining blocks */
3923 	for (i = 1 ; i < num ; i++) {
3924 		lbaa = lba + i;
3925 		block = do_div(lbaa, sdebug_store_sectors);
3926 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3927 	}
3928 	if (scsi_debug_lbp())
3929 		map_region(sip, lba, num);
3930 	/* If ZBC zone then bump its write pointer */
3931 	if (sdebug_dev_is_zoned(devip))
3932 		zbc_inc_wp(devip, lba, num);
3933 out:
3934 	sdeb_write_unlock(sip);
3935 
3936 	return 0;
3937 }
3938 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3939 static int resp_write_same_10(struct scsi_cmnd *scp,
3940 			      struct sdebug_dev_info *devip)
3941 {
3942 	u8 *cmd = scp->cmnd;
3943 	u32 lba;
3944 	u16 num;
3945 	u32 ei_lba = 0;
3946 	bool unmap = false;
3947 
3948 	if (cmd[1] & 0x8) {
3949 		if (sdebug_lbpws10 == 0) {
3950 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3951 			return check_condition_result;
3952 		} else
3953 			unmap = true;
3954 	}
3955 	lba = get_unaligned_be32(cmd + 2);
3956 	num = get_unaligned_be16(cmd + 7);
3957 	if (num > sdebug_write_same_length) {
3958 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3959 		return check_condition_result;
3960 	}
3961 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3962 }
3963 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3964 static int resp_write_same_16(struct scsi_cmnd *scp,
3965 			      struct sdebug_dev_info *devip)
3966 {
3967 	u8 *cmd = scp->cmnd;
3968 	u64 lba;
3969 	u32 num;
3970 	u32 ei_lba = 0;
3971 	bool unmap = false;
3972 	bool ndob = false;
3973 
3974 	if (cmd[1] & 0x8) {	/* UNMAP */
3975 		if (sdebug_lbpws == 0) {
3976 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3977 			return check_condition_result;
3978 		} else
3979 			unmap = true;
3980 	}
3981 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3982 		ndob = true;
3983 	lba = get_unaligned_be64(cmd + 2);
3984 	num = get_unaligned_be32(cmd + 10);
3985 	if (num > sdebug_write_same_length) {
3986 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3987 		return check_condition_result;
3988 	}
3989 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3990 }
3991 
3992 /* Note the mode field is in the same position as the (lower) service action
3993  * field. For the Report supported operation codes command, SPC-4 suggests
3994  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3995 static int resp_write_buffer(struct scsi_cmnd *scp,
3996 			     struct sdebug_dev_info *devip)
3997 {
3998 	u8 *cmd = scp->cmnd;
3999 	struct scsi_device *sdp = scp->device;
4000 	struct sdebug_dev_info *dp;
4001 	u8 mode;
4002 
4003 	mode = cmd[1] & 0x1f;
4004 	switch (mode) {
4005 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4006 		/* set UAs on this device only */
4007 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4008 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4009 		break;
4010 	case 0x5:	/* download MC, save and ACT */
4011 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4012 		break;
4013 	case 0x6:	/* download MC with offsets and ACT */
4014 		/* set UAs on most devices (LUs) in this target */
4015 		list_for_each_entry(dp,
4016 				    &devip->sdbg_host->dev_info_list,
4017 				    dev_list)
4018 			if (dp->target == sdp->id) {
4019 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4020 				if (devip != dp)
4021 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4022 						dp->uas_bm);
4023 			}
4024 		break;
4025 	case 0x7:	/* download MC with offsets, save, and ACT */
4026 		/* set UA on all devices (LUs) in this target */
4027 		list_for_each_entry(dp,
4028 				    &devip->sdbg_host->dev_info_list,
4029 				    dev_list)
4030 			if (dp->target == sdp->id)
4031 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4032 					dp->uas_bm);
4033 		break;
4034 	default:
4035 		/* do nothing for this command for other mode values */
4036 		break;
4037 	}
4038 	return 0;
4039 }
4040 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4041 static int resp_comp_write(struct scsi_cmnd *scp,
4042 			   struct sdebug_dev_info *devip)
4043 {
4044 	u8 *cmd = scp->cmnd;
4045 	u8 *arr;
4046 	struct sdeb_store_info *sip = devip2sip(devip, true);
4047 	u64 lba;
4048 	u32 dnum;
4049 	u32 lb_size = sdebug_sector_size;
4050 	u8 num;
4051 	int ret;
4052 	int retval = 0;
4053 
4054 	lba = get_unaligned_be64(cmd + 2);
4055 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4056 	if (0 == num)
4057 		return 0;	/* degenerate case, not an error */
4058 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4059 	    (cmd[1] & 0xe0)) {
4060 		mk_sense_invalid_opcode(scp);
4061 		return check_condition_result;
4062 	}
4063 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4064 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4065 	    (cmd[1] & 0xe0) == 0)
4066 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4067 			    "to DIF device\n");
4068 	ret = check_device_access_params(scp, lba, num, false);
4069 	if (ret)
4070 		return ret;
4071 	dnum = 2 * num;
4072 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4073 	if (NULL == arr) {
4074 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4075 				INSUFF_RES_ASCQ);
4076 		return check_condition_result;
4077 	}
4078 
4079 	sdeb_write_lock(sip);
4080 
4081 	ret = do_dout_fetch(scp, dnum, arr);
4082 	if (ret == -1) {
4083 		retval = DID_ERROR << 16;
4084 		goto cleanup;
4085 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4086 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4087 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4088 			    dnum * lb_size, ret);
4089 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4090 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4091 		retval = check_condition_result;
4092 		goto cleanup;
4093 	}
4094 	if (scsi_debug_lbp())
4095 		map_region(sip, lba, num);
4096 cleanup:
4097 	sdeb_write_unlock(sip);
4098 	kfree(arr);
4099 	return retval;
4100 }
4101 
4102 struct unmap_block_desc {
4103 	__be64	lba;
4104 	__be32	blocks;
4105 	__be32	__reserved;
4106 };
4107 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4108 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4109 {
4110 	unsigned char *buf;
4111 	struct unmap_block_desc *desc;
4112 	struct sdeb_store_info *sip = devip2sip(devip, true);
4113 	unsigned int i, payload_len, descriptors;
4114 	int ret;
4115 
4116 	if (!scsi_debug_lbp())
4117 		return 0;	/* fib and say its done */
4118 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4119 	BUG_ON(scsi_bufflen(scp) != payload_len);
4120 
4121 	descriptors = (payload_len - 8) / 16;
4122 	if (descriptors > sdebug_unmap_max_desc) {
4123 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4124 		return check_condition_result;
4125 	}
4126 
4127 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4128 	if (!buf) {
4129 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4130 				INSUFF_RES_ASCQ);
4131 		return check_condition_result;
4132 	}
4133 
4134 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4135 
4136 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4137 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4138 
4139 	desc = (void *)&buf[8];
4140 
4141 	sdeb_write_lock(sip);
4142 
4143 	for (i = 0 ; i < descriptors ; i++) {
4144 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4145 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4146 
4147 		ret = check_device_access_params(scp, lba, num, true);
4148 		if (ret)
4149 			goto out;
4150 
4151 		unmap_region(sip, lba, num);
4152 	}
4153 
4154 	ret = 0;
4155 
4156 out:
4157 	sdeb_write_unlock(sip);
4158 	kfree(buf);
4159 
4160 	return ret;
4161 }
4162 
4163 #define SDEBUG_GET_LBA_STATUS_LEN 32
4164 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4165 static int resp_get_lba_status(struct scsi_cmnd *scp,
4166 			       struct sdebug_dev_info *devip)
4167 {
4168 	u8 *cmd = scp->cmnd;
4169 	u64 lba;
4170 	u32 alloc_len, mapped, num;
4171 	int ret;
4172 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4173 
4174 	lba = get_unaligned_be64(cmd + 2);
4175 	alloc_len = get_unaligned_be32(cmd + 10);
4176 
4177 	if (alloc_len < 24)
4178 		return 0;
4179 
4180 	ret = check_device_access_params(scp, lba, 1, false);
4181 	if (ret)
4182 		return ret;
4183 
4184 	if (scsi_debug_lbp()) {
4185 		struct sdeb_store_info *sip = devip2sip(devip, true);
4186 
4187 		mapped = map_state(sip, lba, &num);
4188 	} else {
4189 		mapped = 1;
4190 		/* following just in case virtual_gb changed */
4191 		sdebug_capacity = get_sdebug_capacity();
4192 		if (sdebug_capacity - lba <= 0xffffffff)
4193 			num = sdebug_capacity - lba;
4194 		else
4195 			num = 0xffffffff;
4196 	}
4197 
4198 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4199 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4200 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4201 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4202 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4203 
4204 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4205 }
4206 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4207 static int resp_sync_cache(struct scsi_cmnd *scp,
4208 			   struct sdebug_dev_info *devip)
4209 {
4210 	int res = 0;
4211 	u64 lba;
4212 	u32 num_blocks;
4213 	u8 *cmd = scp->cmnd;
4214 
4215 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4216 		lba = get_unaligned_be32(cmd + 2);
4217 		num_blocks = get_unaligned_be16(cmd + 7);
4218 	} else {				/* SYNCHRONIZE_CACHE(16) */
4219 		lba = get_unaligned_be64(cmd + 2);
4220 		num_blocks = get_unaligned_be32(cmd + 10);
4221 	}
4222 	if (lba + num_blocks > sdebug_capacity) {
4223 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4224 		return check_condition_result;
4225 	}
4226 	if (!write_since_sync || (cmd[1] & 0x2))
4227 		res = SDEG_RES_IMMED_MASK;
4228 	else		/* delay if write_since_sync and IMMED clear */
4229 		write_since_sync = false;
4230 	return res;
4231 }
4232 
4233 /*
4234  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4235  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4236  * a GOOD status otherwise. Model a disk with a big cache and yield
4237  * CONDITION MET. Actually tries to bring range in main memory into the
4238  * cache associated with the CPU(s).
4239  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4240 static int resp_pre_fetch(struct scsi_cmnd *scp,
4241 			  struct sdebug_dev_info *devip)
4242 {
4243 	int res = 0;
4244 	u64 lba;
4245 	u64 block, rest = 0;
4246 	u32 nblks;
4247 	u8 *cmd = scp->cmnd;
4248 	struct sdeb_store_info *sip = devip2sip(devip, true);
4249 	u8 *fsp = sip->storep;
4250 
4251 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4252 		lba = get_unaligned_be32(cmd + 2);
4253 		nblks = get_unaligned_be16(cmd + 7);
4254 	} else {			/* PRE-FETCH(16) */
4255 		lba = get_unaligned_be64(cmd + 2);
4256 		nblks = get_unaligned_be32(cmd + 10);
4257 	}
4258 	if (lba + nblks > sdebug_capacity) {
4259 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4260 		return check_condition_result;
4261 	}
4262 	if (!fsp)
4263 		goto fini;
4264 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4265 	block = do_div(lba, sdebug_store_sectors);
4266 	if (block + nblks > sdebug_store_sectors)
4267 		rest = block + nblks - sdebug_store_sectors;
4268 
4269 	/* Try to bring the PRE-FETCH range into CPU's cache */
4270 	sdeb_read_lock(sip);
4271 	prefetch_range(fsp + (sdebug_sector_size * block),
4272 		       (nblks - rest) * sdebug_sector_size);
4273 	if (rest)
4274 		prefetch_range(fsp, rest * sdebug_sector_size);
4275 	sdeb_read_unlock(sip);
4276 fini:
4277 	if (cmd[1] & 0x2)
4278 		res = SDEG_RES_IMMED_MASK;
4279 	return res | condition_met_result;
4280 }
4281 
4282 #define RL_BUCKET_ELEMS 8
4283 
4284 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4285  * (W-LUN), the normal Linux scanning logic does not associate it with a
4286  * device (e.g. /dev/sg7). The following magic will make that association:
4287  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4288  * where <n> is a host number. If there are multiple targets in a host then
4289  * the above will associate a W-LUN to each target. To only get a W-LUN
4290  * for target 2, then use "echo '- 2 49409' > scan" .
4291  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4292 static int resp_report_luns(struct scsi_cmnd *scp,
4293 			    struct sdebug_dev_info *devip)
4294 {
4295 	unsigned char *cmd = scp->cmnd;
4296 	unsigned int alloc_len;
4297 	unsigned char select_report;
4298 	u64 lun;
4299 	struct scsi_lun *lun_p;
4300 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4301 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4302 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4303 	unsigned int tlun_cnt;	/* total LUN count */
4304 	unsigned int rlen;	/* response length (in bytes) */
4305 	int k, j, n, res;
4306 	unsigned int off_rsp = 0;
4307 	const int sz_lun = sizeof(struct scsi_lun);
4308 
4309 	clear_luns_changed_on_target(devip);
4310 
4311 	select_report = cmd[2];
4312 	alloc_len = get_unaligned_be32(cmd + 6);
4313 
4314 	if (alloc_len < 4) {
4315 		pr_err("alloc len too small %d\n", alloc_len);
4316 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4317 		return check_condition_result;
4318 	}
4319 
4320 	switch (select_report) {
4321 	case 0:		/* all LUNs apart from W-LUNs */
4322 		lun_cnt = sdebug_max_luns;
4323 		wlun_cnt = 0;
4324 		break;
4325 	case 1:		/* only W-LUNs */
4326 		lun_cnt = 0;
4327 		wlun_cnt = 1;
4328 		break;
4329 	case 2:		/* all LUNs */
4330 		lun_cnt = sdebug_max_luns;
4331 		wlun_cnt = 1;
4332 		break;
4333 	case 0x10:	/* only administrative LUs */
4334 	case 0x11:	/* see SPC-5 */
4335 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4336 	default:
4337 		pr_debug("select report invalid %d\n", select_report);
4338 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4339 		return check_condition_result;
4340 	}
4341 
4342 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4343 		--lun_cnt;
4344 
4345 	tlun_cnt = lun_cnt + wlun_cnt;
4346 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4347 	scsi_set_resid(scp, scsi_bufflen(scp));
4348 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4349 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4350 
4351 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4352 	lun = sdebug_no_lun_0 ? 1 : 0;
4353 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4354 		memset(arr, 0, sizeof(arr));
4355 		lun_p = (struct scsi_lun *)&arr[0];
4356 		if (k == 0) {
4357 			put_unaligned_be32(rlen, &arr[0]);
4358 			++lun_p;
4359 			j = 1;
4360 		}
4361 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4362 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4363 				break;
4364 			int_to_scsilun(lun++, lun_p);
4365 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4366 				lun_p->scsi_lun[0] |= 0x40;
4367 		}
4368 		if (j < RL_BUCKET_ELEMS)
4369 			break;
4370 		n = j * sz_lun;
4371 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4372 		if (res)
4373 			return res;
4374 		off_rsp += n;
4375 	}
4376 	if (wlun_cnt) {
4377 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4378 		++j;
4379 	}
4380 	if (j > 0)
4381 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4382 	return res;
4383 }
4384 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4385 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4386 {
4387 	bool is_bytchk3 = false;
4388 	u8 bytchk;
4389 	int ret, j;
4390 	u32 vnum, a_num, off;
4391 	const u32 lb_size = sdebug_sector_size;
4392 	u64 lba;
4393 	u8 *arr;
4394 	u8 *cmd = scp->cmnd;
4395 	struct sdeb_store_info *sip = devip2sip(devip, true);
4396 
4397 	bytchk = (cmd[1] >> 1) & 0x3;
4398 	if (bytchk == 0) {
4399 		return 0;	/* always claim internal verify okay */
4400 	} else if (bytchk == 2) {
4401 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4402 		return check_condition_result;
4403 	} else if (bytchk == 3) {
4404 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4405 	}
4406 	switch (cmd[0]) {
4407 	case VERIFY_16:
4408 		lba = get_unaligned_be64(cmd + 2);
4409 		vnum = get_unaligned_be32(cmd + 10);
4410 		break;
4411 	case VERIFY:		/* is VERIFY(10) */
4412 		lba = get_unaligned_be32(cmd + 2);
4413 		vnum = get_unaligned_be16(cmd + 7);
4414 		break;
4415 	default:
4416 		mk_sense_invalid_opcode(scp);
4417 		return check_condition_result;
4418 	}
4419 	if (vnum == 0)
4420 		return 0;	/* not an error */
4421 	a_num = is_bytchk3 ? 1 : vnum;
4422 	/* Treat following check like one for read (i.e. no write) access */
4423 	ret = check_device_access_params(scp, lba, a_num, false);
4424 	if (ret)
4425 		return ret;
4426 
4427 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4428 	if (!arr) {
4429 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4430 				INSUFF_RES_ASCQ);
4431 		return check_condition_result;
4432 	}
4433 	/* Not changing store, so only need read access */
4434 	sdeb_read_lock(sip);
4435 
4436 	ret = do_dout_fetch(scp, a_num, arr);
4437 	if (ret == -1) {
4438 		ret = DID_ERROR << 16;
4439 		goto cleanup;
4440 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4441 		sdev_printk(KERN_INFO, scp->device,
4442 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4443 			    my_name, __func__, a_num * lb_size, ret);
4444 	}
4445 	if (is_bytchk3) {
4446 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4447 			memcpy(arr + off, arr, lb_size);
4448 	}
4449 	ret = 0;
4450 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4451 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4452 		ret = check_condition_result;
4453 		goto cleanup;
4454 	}
4455 cleanup:
4456 	sdeb_read_unlock(sip);
4457 	kfree(arr);
4458 	return ret;
4459 }
4460 
4461 #define RZONES_DESC_HD 64
4462 
4463 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4464 static int resp_report_zones(struct scsi_cmnd *scp,
4465 			     struct sdebug_dev_info *devip)
4466 {
4467 	unsigned int rep_max_zones, nrz = 0;
4468 	int ret = 0;
4469 	u32 alloc_len, rep_opts, rep_len;
4470 	bool partial;
4471 	u64 lba, zs_lba;
4472 	u8 *arr = NULL, *desc;
4473 	u8 *cmd = scp->cmnd;
4474 	struct sdeb_zone_state *zsp = NULL;
4475 	struct sdeb_store_info *sip = devip2sip(devip, false);
4476 
4477 	if (!sdebug_dev_is_zoned(devip)) {
4478 		mk_sense_invalid_opcode(scp);
4479 		return check_condition_result;
4480 	}
4481 	zs_lba = get_unaligned_be64(cmd + 2);
4482 	alloc_len = get_unaligned_be32(cmd + 10);
4483 	if (alloc_len == 0)
4484 		return 0;	/* not an error */
4485 	rep_opts = cmd[14] & 0x3f;
4486 	partial = cmd[14] & 0x80;
4487 
4488 	if (zs_lba >= sdebug_capacity) {
4489 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4490 		return check_condition_result;
4491 	}
4492 
4493 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4494 
4495 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4496 	if (!arr) {
4497 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4498 				INSUFF_RES_ASCQ);
4499 		return check_condition_result;
4500 	}
4501 
4502 	sdeb_read_lock(sip);
4503 
4504 	desc = arr + 64;
4505 	for (lba = zs_lba; lba < sdebug_capacity;
4506 	     lba = zsp->z_start + zsp->z_size) {
4507 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4508 			break;
4509 		zsp = zbc_zone(devip, lba);
4510 		switch (rep_opts) {
4511 		case 0x00:
4512 			/* All zones */
4513 			break;
4514 		case 0x01:
4515 			/* Empty zones */
4516 			if (zsp->z_cond != ZC1_EMPTY)
4517 				continue;
4518 			break;
4519 		case 0x02:
4520 			/* Implicit open zones */
4521 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4522 				continue;
4523 			break;
4524 		case 0x03:
4525 			/* Explicit open zones */
4526 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4527 				continue;
4528 			break;
4529 		case 0x04:
4530 			/* Closed zones */
4531 			if (zsp->z_cond != ZC4_CLOSED)
4532 				continue;
4533 			break;
4534 		case 0x05:
4535 			/* Full zones */
4536 			if (zsp->z_cond != ZC5_FULL)
4537 				continue;
4538 			break;
4539 		case 0x06:
4540 		case 0x07:
4541 		case 0x10:
4542 			/*
4543 			 * Read-only, offline, reset WP recommended are
4544 			 * not emulated: no zones to report;
4545 			 */
4546 			continue;
4547 		case 0x11:
4548 			/* non-seq-resource set */
4549 			if (!zsp->z_non_seq_resource)
4550 				continue;
4551 			break;
4552 		case 0x3e:
4553 			/* All zones except gap zones. */
4554 			if (zbc_zone_is_gap(zsp))
4555 				continue;
4556 			break;
4557 		case 0x3f:
4558 			/* Not write pointer (conventional) zones */
4559 			if (zbc_zone_is_seq(zsp))
4560 				continue;
4561 			break;
4562 		default:
4563 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4564 					INVALID_FIELD_IN_CDB, 0);
4565 			ret = check_condition_result;
4566 			goto fini;
4567 		}
4568 
4569 		if (nrz < rep_max_zones) {
4570 			/* Fill zone descriptor */
4571 			desc[0] = zsp->z_type;
4572 			desc[1] = zsp->z_cond << 4;
4573 			if (zsp->z_non_seq_resource)
4574 				desc[1] |= 1 << 1;
4575 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4576 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4577 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4578 			desc += 64;
4579 		}
4580 
4581 		if (partial && nrz >= rep_max_zones)
4582 			break;
4583 
4584 		nrz++;
4585 	}
4586 
4587 	/* Report header */
4588 	/* Zone list length. */
4589 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4590 	/* Maximum LBA */
4591 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4592 	/* Zone starting LBA granularity. */
4593 	if (devip->zcap < devip->zsize)
4594 		put_unaligned_be64(devip->zsize, arr + 16);
4595 
4596 	rep_len = (unsigned long)desc - (unsigned long)arr;
4597 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4598 
4599 fini:
4600 	sdeb_read_unlock(sip);
4601 	kfree(arr);
4602 	return ret;
4603 }
4604 
4605 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)4606 static void zbc_open_all(struct sdebug_dev_info *devip)
4607 {
4608 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4609 	unsigned int i;
4610 
4611 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4612 		if (zsp->z_cond == ZC4_CLOSED)
4613 			zbc_open_zone(devip, &devip->zstate[i], true);
4614 	}
4615 }
4616 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4617 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4618 {
4619 	int res = 0;
4620 	u64 z_id;
4621 	enum sdebug_z_cond zc;
4622 	u8 *cmd = scp->cmnd;
4623 	struct sdeb_zone_state *zsp;
4624 	bool all = cmd[14] & 0x01;
4625 	struct sdeb_store_info *sip = devip2sip(devip, false);
4626 
4627 	if (!sdebug_dev_is_zoned(devip)) {
4628 		mk_sense_invalid_opcode(scp);
4629 		return check_condition_result;
4630 	}
4631 
4632 	sdeb_write_lock(sip);
4633 
4634 	if (all) {
4635 		/* Check if all closed zones can be open */
4636 		if (devip->max_open &&
4637 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4638 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4639 					INSUFF_ZONE_ASCQ);
4640 			res = check_condition_result;
4641 			goto fini;
4642 		}
4643 		/* Open all closed zones */
4644 		zbc_open_all(devip);
4645 		goto fini;
4646 	}
4647 
4648 	/* Open the specified zone */
4649 	z_id = get_unaligned_be64(cmd + 2);
4650 	if (z_id >= sdebug_capacity) {
4651 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4652 		res = check_condition_result;
4653 		goto fini;
4654 	}
4655 
4656 	zsp = zbc_zone(devip, z_id);
4657 	if (z_id != zsp->z_start) {
4658 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4659 		res = check_condition_result;
4660 		goto fini;
4661 	}
4662 	if (zbc_zone_is_conv(zsp)) {
4663 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 		res = check_condition_result;
4665 		goto fini;
4666 	}
4667 
4668 	zc = zsp->z_cond;
4669 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4670 		goto fini;
4671 
4672 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4673 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4674 				INSUFF_ZONE_ASCQ);
4675 		res = check_condition_result;
4676 		goto fini;
4677 	}
4678 
4679 	zbc_open_zone(devip, zsp, true);
4680 fini:
4681 	sdeb_write_unlock(sip);
4682 	return res;
4683 }
4684 
zbc_close_all(struct sdebug_dev_info * devip)4685 static void zbc_close_all(struct sdebug_dev_info *devip)
4686 {
4687 	unsigned int i;
4688 
4689 	for (i = 0; i < devip->nr_zones; i++)
4690 		zbc_close_zone(devip, &devip->zstate[i]);
4691 }
4692 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4693 static int resp_close_zone(struct scsi_cmnd *scp,
4694 			   struct sdebug_dev_info *devip)
4695 {
4696 	int res = 0;
4697 	u64 z_id;
4698 	u8 *cmd = scp->cmnd;
4699 	struct sdeb_zone_state *zsp;
4700 	bool all = cmd[14] & 0x01;
4701 	struct sdeb_store_info *sip = devip2sip(devip, false);
4702 
4703 	if (!sdebug_dev_is_zoned(devip)) {
4704 		mk_sense_invalid_opcode(scp);
4705 		return check_condition_result;
4706 	}
4707 
4708 	sdeb_write_lock(sip);
4709 
4710 	if (all) {
4711 		zbc_close_all(devip);
4712 		goto fini;
4713 	}
4714 
4715 	/* Close specified zone */
4716 	z_id = get_unaligned_be64(cmd + 2);
4717 	if (z_id >= sdebug_capacity) {
4718 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4719 		res = check_condition_result;
4720 		goto fini;
4721 	}
4722 
4723 	zsp = zbc_zone(devip, z_id);
4724 	if (z_id != zsp->z_start) {
4725 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4726 		res = check_condition_result;
4727 		goto fini;
4728 	}
4729 	if (zbc_zone_is_conv(zsp)) {
4730 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4731 		res = check_condition_result;
4732 		goto fini;
4733 	}
4734 
4735 	zbc_close_zone(devip, zsp);
4736 fini:
4737 	sdeb_write_unlock(sip);
4738 	return res;
4739 }
4740 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)4741 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4742 			    struct sdeb_zone_state *zsp, bool empty)
4743 {
4744 	enum sdebug_z_cond zc = zsp->z_cond;
4745 
4746 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4747 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4748 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4749 			zbc_close_zone(devip, zsp);
4750 		if (zsp->z_cond == ZC4_CLOSED)
4751 			devip->nr_closed--;
4752 		zsp->z_wp = zsp->z_start + zsp->z_size;
4753 		zsp->z_cond = ZC5_FULL;
4754 	}
4755 }
4756 
zbc_finish_all(struct sdebug_dev_info * devip)4757 static void zbc_finish_all(struct sdebug_dev_info *devip)
4758 {
4759 	unsigned int i;
4760 
4761 	for (i = 0; i < devip->nr_zones; i++)
4762 		zbc_finish_zone(devip, &devip->zstate[i], false);
4763 }
4764 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4765 static int resp_finish_zone(struct scsi_cmnd *scp,
4766 			    struct sdebug_dev_info *devip)
4767 {
4768 	struct sdeb_zone_state *zsp;
4769 	int res = 0;
4770 	u64 z_id;
4771 	u8 *cmd = scp->cmnd;
4772 	bool all = cmd[14] & 0x01;
4773 	struct sdeb_store_info *sip = devip2sip(devip, false);
4774 
4775 	if (!sdebug_dev_is_zoned(devip)) {
4776 		mk_sense_invalid_opcode(scp);
4777 		return check_condition_result;
4778 	}
4779 
4780 	sdeb_write_lock(sip);
4781 
4782 	if (all) {
4783 		zbc_finish_all(devip);
4784 		goto fini;
4785 	}
4786 
4787 	/* Finish the specified zone */
4788 	z_id = get_unaligned_be64(cmd + 2);
4789 	if (z_id >= sdebug_capacity) {
4790 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4791 		res = check_condition_result;
4792 		goto fini;
4793 	}
4794 
4795 	zsp = zbc_zone(devip, z_id);
4796 	if (z_id != zsp->z_start) {
4797 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4798 		res = check_condition_result;
4799 		goto fini;
4800 	}
4801 	if (zbc_zone_is_conv(zsp)) {
4802 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4803 		res = check_condition_result;
4804 		goto fini;
4805 	}
4806 
4807 	zbc_finish_zone(devip, zsp, true);
4808 fini:
4809 	sdeb_write_unlock(sip);
4810 	return res;
4811 }
4812 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)4813 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4814 			 struct sdeb_zone_state *zsp)
4815 {
4816 	enum sdebug_z_cond zc;
4817 	struct sdeb_store_info *sip = devip2sip(devip, false);
4818 
4819 	if (!zbc_zone_is_seq(zsp))
4820 		return;
4821 
4822 	zc = zsp->z_cond;
4823 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4824 		zbc_close_zone(devip, zsp);
4825 
4826 	if (zsp->z_cond == ZC4_CLOSED)
4827 		devip->nr_closed--;
4828 
4829 	if (zsp->z_wp > zsp->z_start)
4830 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4831 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4832 
4833 	zsp->z_non_seq_resource = false;
4834 	zsp->z_wp = zsp->z_start;
4835 	zsp->z_cond = ZC1_EMPTY;
4836 }
4837 
zbc_rwp_all(struct sdebug_dev_info * devip)4838 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4839 {
4840 	unsigned int i;
4841 
4842 	for (i = 0; i < devip->nr_zones; i++)
4843 		zbc_rwp_zone(devip, &devip->zstate[i]);
4844 }
4845 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4846 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4847 {
4848 	struct sdeb_zone_state *zsp;
4849 	int res = 0;
4850 	u64 z_id;
4851 	u8 *cmd = scp->cmnd;
4852 	bool all = cmd[14] & 0x01;
4853 	struct sdeb_store_info *sip = devip2sip(devip, false);
4854 
4855 	if (!sdebug_dev_is_zoned(devip)) {
4856 		mk_sense_invalid_opcode(scp);
4857 		return check_condition_result;
4858 	}
4859 
4860 	sdeb_write_lock(sip);
4861 
4862 	if (all) {
4863 		zbc_rwp_all(devip);
4864 		goto fini;
4865 	}
4866 
4867 	z_id = get_unaligned_be64(cmd + 2);
4868 	if (z_id >= sdebug_capacity) {
4869 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4870 		res = check_condition_result;
4871 		goto fini;
4872 	}
4873 
4874 	zsp = zbc_zone(devip, z_id);
4875 	if (z_id != zsp->z_start) {
4876 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4877 		res = check_condition_result;
4878 		goto fini;
4879 	}
4880 	if (zbc_zone_is_conv(zsp)) {
4881 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4882 		res = check_condition_result;
4883 		goto fini;
4884 	}
4885 
4886 	zbc_rwp_zone(devip, zsp);
4887 fini:
4888 	sdeb_write_unlock(sip);
4889 	return res;
4890 }
4891 
get_tag(struct scsi_cmnd * cmnd)4892 static u32 get_tag(struct scsi_cmnd *cmnd)
4893 {
4894 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4895 }
4896 
4897 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)4898 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4899 {
4900 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4901 	unsigned long flags;
4902 	struct scsi_cmnd *scp = sqcp->scmd;
4903 	struct sdebug_scsi_cmd *sdsc;
4904 	bool aborted;
4905 
4906 	if (sdebug_statistics) {
4907 		atomic_inc(&sdebug_completions);
4908 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4909 			atomic_inc(&sdebug_miss_cpus);
4910 	}
4911 
4912 	if (!scp) {
4913 		pr_err("scmd=NULL\n");
4914 		goto out;
4915 	}
4916 
4917 	sdsc = scsi_cmd_priv(scp);
4918 	spin_lock_irqsave(&sdsc->lock, flags);
4919 	aborted = sd_dp->aborted;
4920 	if (unlikely(aborted))
4921 		sd_dp->aborted = false;
4922 	ASSIGN_QUEUED_CMD(scp, NULL);
4923 
4924 	spin_unlock_irqrestore(&sdsc->lock, flags);
4925 
4926 	if (aborted) {
4927 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4928 		blk_abort_request(scsi_cmd_to_rq(scp));
4929 		goto out;
4930 	}
4931 
4932 	scsi_done(scp); /* callback to mid level */
4933 out:
4934 	sdebug_free_queued_cmd(sqcp);
4935 }
4936 
4937 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)4938 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4939 {
4940 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4941 						  hrt);
4942 	sdebug_q_cmd_complete(sd_dp);
4943 	return HRTIMER_NORESTART;
4944 }
4945 
4946 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)4947 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4948 {
4949 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4950 						  ew.work);
4951 	sdebug_q_cmd_complete(sd_dp);
4952 }
4953 
4954 static bool got_shared_uuid;
4955 static uuid_t shared_uuid;
4956 
sdebug_is_zone_start(struct sdebug_dev_info * devip,u64 zstart)4957 static bool sdebug_is_zone_start(struct sdebug_dev_info *devip, u64 zstart)
4958 {
4959 	u32 remainder;
4960 
4961 	div_u64_rem(zstart, devip->zsize, &remainder);
4962 	return remainder == 0;
4963 }
4964 
sdebug_device_create_zones(struct sdebug_dev_info * devip)4965 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4966 {
4967 	struct sdeb_zone_state *zsp;
4968 	sector_t capacity = get_sdebug_capacity();
4969 	sector_t conv_capacity;
4970 	sector_t zstart = 0;
4971 	unsigned int i;
4972 
4973 	/*
4974 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4975 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4976 	 * use the specified zone size checking that at least 2 zones can be
4977 	 * created for the device.
4978 	 */
4979 	if (!sdeb_zbc_zone_size_mb) {
4980 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4981 			>> ilog2(sdebug_sector_size);
4982 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4983 			devip->zsize >>= 1;
4984 		if (devip->zsize < 2) {
4985 			pr_err("Device capacity too small\n");
4986 			return -EINVAL;
4987 		}
4988 	} else {
4989 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4990 			>> ilog2(sdebug_sector_size);
4991 		if (devip->zsize >= capacity) {
4992 			pr_err("Zone size too large for device capacity\n");
4993 			return -EINVAL;
4994 		}
4995 	}
4996 
4997 	devip->nr_zones = div_u64(capacity + devip->zsize - 1, devip->zsize);
4998 
4999 	if (sdeb_zbc_zone_cap_mb == 0) {
5000 		devip->zcap = devip->zsize;
5001 	} else {
5002 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5003 			      ilog2(sdebug_sector_size);
5004 		if (devip->zcap > devip->zsize) {
5005 			pr_err("Zone capacity too large\n");
5006 			return -EINVAL;
5007 		}
5008 	}
5009 
5010 	conv_capacity = (sector_t)sdeb_zbc_nr_conv * devip->zsize;
5011 	if (conv_capacity >= capacity) {
5012 		pr_err("Number of conventional zones too large\n");
5013 		return -EINVAL;
5014 	}
5015 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5016 	devip->nr_seq_zones = div_u64(capacity - conv_capacity +
5017 				      devip->zsize - 1, devip->zsize);
5018 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5019 
5020 	/* Add gap zones if zone capacity is smaller than the zone size */
5021 	if (devip->zcap < devip->zsize)
5022 		devip->nr_zones += devip->nr_seq_zones;
5023 
5024 	if (devip->zmodel == BLK_ZONED_HM) {
5025 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5026 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5027 			devip->max_open = (devip->nr_zones - 1) / 2;
5028 		else
5029 			devip->max_open = sdeb_zbc_max_open;
5030 	}
5031 
5032 	devip->zstate = kcalloc(devip->nr_zones,
5033 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5034 	if (!devip->zstate)
5035 		return -ENOMEM;
5036 
5037 	for (i = 0; i < devip->nr_zones; i++) {
5038 		zsp = &devip->zstate[i];
5039 
5040 		zsp->z_start = zstart;
5041 
5042 		if (i < devip->nr_conv_zones) {
5043 			zsp->z_type = ZBC_ZTYPE_CNV;
5044 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5045 			zsp->z_wp = (sector_t)-1;
5046 			zsp->z_size =
5047 				min_t(u64, devip->zsize, capacity - zstart);
5048 		} else if (sdebug_is_zone_start(devip, zstart)) {
5049 			if (devip->zmodel == BLK_ZONED_HM)
5050 				zsp->z_type = ZBC_ZTYPE_SWR;
5051 			else
5052 				zsp->z_type = ZBC_ZTYPE_SWP;
5053 			zsp->z_cond = ZC1_EMPTY;
5054 			zsp->z_wp = zsp->z_start;
5055 			zsp->z_size =
5056 				min_t(u64, devip->zcap, capacity - zstart);
5057 		} else {
5058 			zsp->z_type = ZBC_ZTYPE_GAP;
5059 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5060 			zsp->z_wp = (sector_t)-1;
5061 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5062 					    capacity - zstart);
5063 		}
5064 
5065 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5066 		zstart += zsp->z_size;
5067 	}
5068 
5069 	return 0;
5070 }
5071 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5072 static struct sdebug_dev_info *sdebug_device_create(
5073 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5074 {
5075 	struct sdebug_dev_info *devip;
5076 
5077 	devip = kzalloc(sizeof(*devip), flags);
5078 	if (devip) {
5079 		if (sdebug_uuid_ctl == 1)
5080 			uuid_gen(&devip->lu_name);
5081 		else if (sdebug_uuid_ctl == 2) {
5082 			if (got_shared_uuid)
5083 				devip->lu_name = shared_uuid;
5084 			else {
5085 				uuid_gen(&shared_uuid);
5086 				got_shared_uuid = true;
5087 				devip->lu_name = shared_uuid;
5088 			}
5089 		}
5090 		devip->sdbg_host = sdbg_host;
5091 		if (sdeb_zbc_in_use) {
5092 			devip->zmodel = sdeb_zbc_model;
5093 			if (sdebug_device_create_zones(devip)) {
5094 				kfree(devip);
5095 				return NULL;
5096 			}
5097 		} else {
5098 			devip->zmodel = BLK_ZONED_NONE;
5099 		}
5100 		devip->create_ts = ktime_get_boottime();
5101 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5102 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5103 	}
5104 	return devip;
5105 }
5106 
find_build_dev_info(struct scsi_device * sdev)5107 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5108 {
5109 	struct sdebug_host_info *sdbg_host;
5110 	struct sdebug_dev_info *open_devip = NULL;
5111 	struct sdebug_dev_info *devip;
5112 
5113 	sdbg_host = shost_to_sdebug_host(sdev->host);
5114 
5115 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5116 		if ((devip->used) && (devip->channel == sdev->channel) &&
5117 		    (devip->target == sdev->id) &&
5118 		    (devip->lun == sdev->lun))
5119 			return devip;
5120 		else {
5121 			if ((!devip->used) && (!open_devip))
5122 				open_devip = devip;
5123 		}
5124 	}
5125 	if (!open_devip) { /* try and make a new one */
5126 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5127 		if (!open_devip) {
5128 			pr_err("out of memory at line %d\n", __LINE__);
5129 			return NULL;
5130 		}
5131 	}
5132 
5133 	open_devip->channel = sdev->channel;
5134 	open_devip->target = sdev->id;
5135 	open_devip->lun = sdev->lun;
5136 	open_devip->sdbg_host = sdbg_host;
5137 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5138 	open_devip->used = true;
5139 	return open_devip;
5140 }
5141 
scsi_debug_slave_alloc(struct scsi_device * sdp)5142 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5143 {
5144 	if (sdebug_verbose)
5145 		pr_info("slave_alloc <%u %u %u %llu>\n",
5146 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5147 	return 0;
5148 }
5149 
scsi_debug_slave_configure(struct scsi_device * sdp)5150 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5151 {
5152 	struct sdebug_dev_info *devip =
5153 			(struct sdebug_dev_info *)sdp->hostdata;
5154 
5155 	if (sdebug_verbose)
5156 		pr_info("slave_configure <%u %u %u %llu>\n",
5157 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5158 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5159 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5160 	if (devip == NULL) {
5161 		devip = find_build_dev_info(sdp);
5162 		if (devip == NULL)
5163 			return 1;  /* no resources, will be marked offline */
5164 	}
5165 	sdp->hostdata = devip;
5166 	if (sdebug_no_uld)
5167 		sdp->no_uld_attach = 1;
5168 	config_cdb_len(sdp);
5169 	return 0;
5170 }
5171 
scsi_debug_slave_destroy(struct scsi_device * sdp)5172 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5173 {
5174 	struct sdebug_dev_info *devip =
5175 		(struct sdebug_dev_info *)sdp->hostdata;
5176 
5177 	if (sdebug_verbose)
5178 		pr_info("slave_destroy <%u %u %u %llu>\n",
5179 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5180 	if (devip) {
5181 		/* make this slot available for re-use */
5182 		devip->used = false;
5183 		sdp->hostdata = NULL;
5184 	}
5185 }
5186 
5187 /* Returns true if we require the queued memory to be freed by the caller. */
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5188 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5189 			   enum sdeb_defer_type defer_t)
5190 {
5191 	if (defer_t == SDEB_DEFER_HRT) {
5192 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5193 
5194 		switch (res) {
5195 		case 0: /* Not active, it must have already run */
5196 		case -1: /* -1 It's executing the CB */
5197 			return false;
5198 		case 1: /* Was active, we've now cancelled */
5199 		default:
5200 			return true;
5201 		}
5202 	} else if (defer_t == SDEB_DEFER_WQ) {
5203 		/* Cancel if pending */
5204 		if (cancel_work_sync(&sd_dp->ew.work))
5205 			return true;
5206 		/* Was not pending, so it must have run */
5207 		return false;
5208 	} else if (defer_t == SDEB_DEFER_POLL) {
5209 		return true;
5210 	}
5211 
5212 	return false;
5213 }
5214 
5215 
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)5216 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5217 {
5218 	enum sdeb_defer_type l_defer_t;
5219 	struct sdebug_defer *sd_dp;
5220 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5221 	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5222 
5223 	lockdep_assert_held(&sdsc->lock);
5224 
5225 	if (!sqcp)
5226 		return false;
5227 	sd_dp = &sqcp->sd_dp;
5228 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5229 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5230 
5231 	if (stop_qc_helper(sd_dp, l_defer_t))
5232 		sdebug_free_queued_cmd(sqcp);
5233 
5234 	return true;
5235 }
5236 
5237 /*
5238  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5239  */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)5240 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5241 {
5242 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5243 	unsigned long flags;
5244 	bool res;
5245 
5246 	spin_lock_irqsave(&sdsc->lock, flags);
5247 	res = scsi_debug_stop_cmnd(cmnd);
5248 	spin_unlock_irqrestore(&sdsc->lock, flags);
5249 
5250 	return res;
5251 }
5252 
5253 /*
5254  * All we can do is set the cmnd as internally aborted and wait for it to
5255  * finish. We cannot call scsi_done() as normal completion path may do that.
5256  */
sdebug_stop_cmnd(struct request * rq,void * data)5257 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5258 {
5259 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5260 
5261 	return true;
5262 }
5263 
5264 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)5265 static void stop_all_queued(void)
5266 {
5267 	struct sdebug_host_info *sdhp;
5268 
5269 	mutex_lock(&sdebug_host_list_mutex);
5270 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5271 		struct Scsi_Host *shost = sdhp->shost;
5272 
5273 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5274 	}
5275 	mutex_unlock(&sdebug_host_list_mutex);
5276 }
5277 
scsi_debug_abort(struct scsi_cmnd * SCpnt)5278 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5279 {
5280 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5281 
5282 	++num_aborts;
5283 
5284 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5285 		sdev_printk(KERN_INFO, SCpnt->device,
5286 			    "%s: command%s found\n", __func__,
5287 			    ok ? "" : " not");
5288 
5289 	return SUCCESS;
5290 }
5291 
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)5292 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5293 {
5294 	struct scsi_device *sdp = data;
5295 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5296 
5297 	if (scmd->device == sdp)
5298 		scsi_debug_abort_cmnd(scmd);
5299 
5300 	return true;
5301 }
5302 
5303 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)5304 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5305 {
5306 	struct Scsi_Host *shost = sdp->host;
5307 
5308 	blk_mq_tagset_busy_iter(&shost->tag_set,
5309 				scsi_debug_stop_all_queued_iter, sdp);
5310 }
5311 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)5312 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5313 {
5314 	struct scsi_device *sdp = SCpnt->device;
5315 	struct sdebug_dev_info *devip = sdp->hostdata;
5316 
5317 	++num_dev_resets;
5318 
5319 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5320 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5321 
5322 	scsi_debug_stop_all_queued(sdp);
5323 	if (devip)
5324 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5325 
5326 	return SUCCESS;
5327 }
5328 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)5329 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5330 {
5331 	struct scsi_device *sdp = SCpnt->device;
5332 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5333 	struct sdebug_dev_info *devip;
5334 	int k = 0;
5335 
5336 	++num_target_resets;
5337 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5338 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5339 
5340 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5341 		if (devip->target == sdp->id) {
5342 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5343 			++k;
5344 		}
5345 	}
5346 
5347 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5348 		sdev_printk(KERN_INFO, sdp,
5349 			    "%s: %d device(s) found in target\n", __func__, k);
5350 
5351 	return SUCCESS;
5352 }
5353 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)5354 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5355 {
5356 	struct scsi_device *sdp = SCpnt->device;
5357 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5358 	struct sdebug_dev_info *devip;
5359 	int k = 0;
5360 
5361 	++num_bus_resets;
5362 
5363 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5364 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5365 
5366 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5367 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5368 		++k;
5369 	}
5370 
5371 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5372 		sdev_printk(KERN_INFO, sdp,
5373 			    "%s: %d device(s) found in host\n", __func__, k);
5374 	return SUCCESS;
5375 }
5376 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)5377 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5378 {
5379 	struct sdebug_host_info *sdbg_host;
5380 	struct sdebug_dev_info *devip;
5381 	int k = 0;
5382 
5383 	++num_host_resets;
5384 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5385 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5386 	mutex_lock(&sdebug_host_list_mutex);
5387 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5388 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5389 				    dev_list) {
5390 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5391 			++k;
5392 		}
5393 	}
5394 	mutex_unlock(&sdebug_host_list_mutex);
5395 	stop_all_queued();
5396 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5397 		sdev_printk(KERN_INFO, SCpnt->device,
5398 			    "%s: %d device(s) found\n", __func__, k);
5399 	return SUCCESS;
5400 }
5401 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)5402 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5403 {
5404 	struct msdos_partition *pp;
5405 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5406 	int sectors_per_part, num_sectors, k;
5407 	int heads_by_sects, start_sec, end_sec;
5408 
5409 	/* assume partition table already zeroed */
5410 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5411 		return;
5412 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5413 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5414 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5415 	}
5416 	num_sectors = (int)get_sdebug_capacity();
5417 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5418 			   / sdebug_num_parts;
5419 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5420 	starts[0] = sdebug_sectors_per;
5421 	max_part_secs = sectors_per_part;
5422 	for (k = 1; k < sdebug_num_parts; ++k) {
5423 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5424 			    * heads_by_sects;
5425 		if (starts[k] - starts[k - 1] < max_part_secs)
5426 			max_part_secs = starts[k] - starts[k - 1];
5427 	}
5428 	starts[sdebug_num_parts] = num_sectors;
5429 	starts[sdebug_num_parts + 1] = 0;
5430 
5431 	ramp[510] = 0x55;	/* magic partition markings */
5432 	ramp[511] = 0xAA;
5433 	pp = (struct msdos_partition *)(ramp + 0x1be);
5434 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5435 		start_sec = starts[k];
5436 		end_sec = starts[k] + max_part_secs - 1;
5437 		pp->boot_ind = 0;
5438 
5439 		pp->cyl = start_sec / heads_by_sects;
5440 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5441 			   / sdebug_sectors_per;
5442 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5443 
5444 		pp->end_cyl = end_sec / heads_by_sects;
5445 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5446 			       / sdebug_sectors_per;
5447 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5448 
5449 		pp->start_sect = cpu_to_le32(start_sec);
5450 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5451 		pp->sys_ind = 0x83;	/* plain Linux partition */
5452 	}
5453 }
5454 
block_unblock_all_queues(bool block)5455 static void block_unblock_all_queues(bool block)
5456 {
5457 	struct sdebug_host_info *sdhp;
5458 
5459 	lockdep_assert_held(&sdebug_host_list_mutex);
5460 
5461 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5462 		struct Scsi_Host *shost = sdhp->shost;
5463 
5464 		if (block)
5465 			scsi_block_requests(shost);
5466 		else
5467 			scsi_unblock_requests(shost);
5468 	}
5469 }
5470 
5471 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5472  * commands will be processed normally before triggers occur.
5473  */
tweak_cmnd_count(void)5474 static void tweak_cmnd_count(void)
5475 {
5476 	int count, modulo;
5477 
5478 	modulo = abs(sdebug_every_nth);
5479 	if (modulo < 2)
5480 		return;
5481 
5482 	mutex_lock(&sdebug_host_list_mutex);
5483 	block_unblock_all_queues(true);
5484 	count = atomic_read(&sdebug_cmnd_count);
5485 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5486 	block_unblock_all_queues(false);
5487 	mutex_unlock(&sdebug_host_list_mutex);
5488 }
5489 
clear_queue_stats(void)5490 static void clear_queue_stats(void)
5491 {
5492 	atomic_set(&sdebug_cmnd_count, 0);
5493 	atomic_set(&sdebug_completions, 0);
5494 	atomic_set(&sdebug_miss_cpus, 0);
5495 	atomic_set(&sdebug_a_tsf, 0);
5496 }
5497 
inject_on_this_cmd(void)5498 static bool inject_on_this_cmd(void)
5499 {
5500 	if (sdebug_every_nth == 0)
5501 		return false;
5502 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5503 }
5504 
5505 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5506 
5507 
sdebug_free_queued_cmd(struct sdebug_queued_cmd * sqcp)5508 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5509 {
5510 	if (sqcp)
5511 		kmem_cache_free(queued_cmd_cache, sqcp);
5512 }
5513 
sdebug_alloc_queued_cmd(struct scsi_cmnd * scmd)5514 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5515 {
5516 	struct sdebug_queued_cmd *sqcp;
5517 	struct sdebug_defer *sd_dp;
5518 
5519 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5520 	if (!sqcp)
5521 		return NULL;
5522 
5523 	sd_dp = &sqcp->sd_dp;
5524 
5525 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5526 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5527 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5528 
5529 	sqcp->scmd = scmd;
5530 
5531 	return sqcp;
5532 }
5533 
5534 /* Complete the processing of the thread that queued a SCSI command to this
5535  * driver. It either completes the command by calling cmnd_done() or
5536  * schedules a hr timer or work queue then returns 0. Returns
5537  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5538  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)5539 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5540 			 int scsi_result,
5541 			 int (*pfp)(struct scsi_cmnd *,
5542 				    struct sdebug_dev_info *),
5543 			 int delta_jiff, int ndelay)
5544 {
5545 	struct request *rq = scsi_cmd_to_rq(cmnd);
5546 	bool polled = rq->cmd_flags & REQ_POLLED;
5547 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5548 	unsigned long flags;
5549 	u64 ns_from_boot = 0;
5550 	struct sdebug_queued_cmd *sqcp;
5551 	struct scsi_device *sdp;
5552 	struct sdebug_defer *sd_dp;
5553 
5554 	if (unlikely(devip == NULL)) {
5555 		if (scsi_result == 0)
5556 			scsi_result = DID_NO_CONNECT << 16;
5557 		goto respond_in_thread;
5558 	}
5559 	sdp = cmnd->device;
5560 
5561 	if (delta_jiff == 0)
5562 		goto respond_in_thread;
5563 
5564 
5565 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5566 		     (scsi_result == 0))) {
5567 		int num_in_q = scsi_device_busy(sdp);
5568 		int qdepth = cmnd->device->queue_depth;
5569 
5570 		if ((num_in_q == qdepth) &&
5571 		    (atomic_inc_return(&sdebug_a_tsf) >=
5572 		     abs(sdebug_every_nth))) {
5573 			atomic_set(&sdebug_a_tsf, 0);
5574 			scsi_result = device_qfull_result;
5575 
5576 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5577 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5578 					    __func__, num_in_q);
5579 		}
5580 	}
5581 
5582 	sqcp = sdebug_alloc_queued_cmd(cmnd);
5583 	if (!sqcp) {
5584 		pr_err("%s no alloc\n", __func__);
5585 		return SCSI_MLQUEUE_HOST_BUSY;
5586 	}
5587 	sd_dp = &sqcp->sd_dp;
5588 
5589 	if (polled)
5590 		ns_from_boot = ktime_get_boottime_ns();
5591 
5592 	/* one of the resp_*() response functions is called here */
5593 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5594 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5595 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5596 		delta_jiff = ndelay = 0;
5597 	}
5598 	if (cmnd->result == 0 && scsi_result != 0)
5599 		cmnd->result = scsi_result;
5600 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5601 		if (atomic_read(&sdeb_inject_pending)) {
5602 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5603 			atomic_set(&sdeb_inject_pending, 0);
5604 			cmnd->result = check_condition_result;
5605 		}
5606 	}
5607 
5608 	if (unlikely(sdebug_verbose && cmnd->result))
5609 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5610 			    __func__, cmnd->result);
5611 
5612 	if (delta_jiff > 0 || ndelay > 0) {
5613 		ktime_t kt;
5614 
5615 		if (delta_jiff > 0) {
5616 			u64 ns = jiffies_to_nsecs(delta_jiff);
5617 
5618 			if (sdebug_random && ns < U32_MAX) {
5619 				ns = get_random_u32_below((u32)ns);
5620 			} else if (sdebug_random) {
5621 				ns >>= 12;	/* scale to 4 usec precision */
5622 				if (ns < U32_MAX)	/* over 4 hours max */
5623 					ns = get_random_u32_below((u32)ns);
5624 				ns <<= 12;
5625 			}
5626 			kt = ns_to_ktime(ns);
5627 		} else {	/* ndelay has a 4.2 second max */
5628 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5629 					     (u32)ndelay;
5630 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5631 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5632 
5633 				if (kt <= d) {	/* elapsed duration >= kt */
5634 					/* call scsi_done() from this thread */
5635 					sdebug_free_queued_cmd(sqcp);
5636 					scsi_done(cmnd);
5637 					return 0;
5638 				}
5639 				/* otherwise reduce kt by elapsed time */
5640 				kt -= d;
5641 			}
5642 		}
5643 		if (sdebug_statistics)
5644 			sd_dp->issuing_cpu = raw_smp_processor_id();
5645 		if (polled) {
5646 			spin_lock_irqsave(&sdsc->lock, flags);
5647 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5648 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5649 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5650 			spin_unlock_irqrestore(&sdsc->lock, flags);
5651 		} else {
5652 			/* schedule the invocation of scsi_done() for a later time */
5653 			spin_lock_irqsave(&sdsc->lock, flags);
5654 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5655 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5656 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5657 			/*
5658 			 * The completion handler will try to grab sqcp->lock,
5659 			 * so there is no chance that the completion handler
5660 			 * will call scsi_done() until we release the lock
5661 			 * here (so ok to keep referencing sdsc).
5662 			 */
5663 			spin_unlock_irqrestore(&sdsc->lock, flags);
5664 		}
5665 	} else {	/* jdelay < 0, use work queue */
5666 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5667 			     atomic_read(&sdeb_inject_pending))) {
5668 			sd_dp->aborted = true;
5669 			atomic_set(&sdeb_inject_pending, 0);
5670 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5671 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5672 		}
5673 
5674 		if (sdebug_statistics)
5675 			sd_dp->issuing_cpu = raw_smp_processor_id();
5676 		if (polled) {
5677 			spin_lock_irqsave(&sdsc->lock, flags);
5678 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5679 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5680 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5681 			spin_unlock_irqrestore(&sdsc->lock, flags);
5682 		} else {
5683 			spin_lock_irqsave(&sdsc->lock, flags);
5684 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5685 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5686 			schedule_work(&sd_dp->ew.work);
5687 			spin_unlock_irqrestore(&sdsc->lock, flags);
5688 		}
5689 	}
5690 
5691 	return 0;
5692 
5693 respond_in_thread:	/* call back to mid-layer using invocation thread */
5694 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5695 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5696 	if (cmnd->result == 0 && scsi_result != 0)
5697 		cmnd->result = scsi_result;
5698 	scsi_done(cmnd);
5699 	return 0;
5700 }
5701 
5702 /* Note: The following macros create attribute files in the
5703    /sys/module/scsi_debug/parameters directory. Unfortunately this
5704    driver is unaware of a change and cannot trigger auxiliary actions
5705    as it can when the corresponding attribute in the
5706    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5707  */
5708 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5709 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5710 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5711 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5712 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5713 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5714 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5715 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5716 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5717 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5718 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5719 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5720 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5721 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5722 module_param_string(inq_product, sdebug_inq_product_id,
5723 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5724 module_param_string(inq_rev, sdebug_inq_product_rev,
5725 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5726 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5727 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5728 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5729 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5730 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5731 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5732 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5733 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5734 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5735 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5736 module_param_named(max_segment_size, sdebug_max_segment_size, uint, S_IRUGO);
5737 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5738 		   S_IRUGO | S_IWUSR);
5739 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5740 		   S_IRUGO | S_IWUSR);
5741 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5742 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5743 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5744 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5745 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5746 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5747 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5748 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5749 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5750 module_param_named(per_host_store, sdebug_per_host_store, bool,
5751 		   S_IRUGO | S_IWUSR);
5752 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5753 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5754 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5755 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5756 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5757 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5758 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5759 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5760 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5761 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5762 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5763 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5764 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5765 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5766 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5767 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5768 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5769 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5770 		   S_IRUGO | S_IWUSR);
5771 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5772 module_param_named(write_same_length, sdebug_write_same_length, int,
5773 		   S_IRUGO | S_IWUSR);
5774 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5775 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5776 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5777 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5778 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5779 
5780 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5781 MODULE_DESCRIPTION("SCSI debug adapter driver");
5782 MODULE_LICENSE("GPL");
5783 MODULE_VERSION(SDEBUG_VERSION);
5784 
5785 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5786 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5787 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5788 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5789 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5790 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5791 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5792 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5793 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5794 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5795 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5796 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5797 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5798 MODULE_PARM_DESC(host_max_queue,
5799 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5800 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5801 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5802 		 SDEBUG_VERSION "\")");
5803 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5804 MODULE_PARM_DESC(lbprz,
5805 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5806 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5807 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5808 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5809 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5810 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5811 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5812 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5813 MODULE_PARM_DESC(max_segment_size, "max bytes in a single segment");
5814 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5815 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5816 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5817 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5818 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5819 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5820 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5821 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5822 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5823 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5824 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5825 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5826 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5827 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5828 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5829 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5830 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5831 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5832 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5833 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5834 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5835 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5836 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5837 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5838 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5839 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5840 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5841 MODULE_PARM_DESC(uuid_ctl,
5842 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5843 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5844 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5845 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5846 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5847 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5848 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5849 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5850 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5851 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5852 
5853 #define SDEBUG_INFO_LEN 256
5854 static char sdebug_info[SDEBUG_INFO_LEN];
5855 
scsi_debug_info(struct Scsi_Host * shp)5856 static const char *scsi_debug_info(struct Scsi_Host *shp)
5857 {
5858 	int k;
5859 
5860 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5861 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5862 	if (k >= (SDEBUG_INFO_LEN - 1))
5863 		return sdebug_info;
5864 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5865 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5866 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5867 		  "statistics", (int)sdebug_statistics);
5868 	return sdebug_info;
5869 }
5870 
5871 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)5872 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5873 				 int length)
5874 {
5875 	char arr[16];
5876 	int opts;
5877 	int minLen = length > 15 ? 15 : length;
5878 
5879 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5880 		return -EACCES;
5881 	memcpy(arr, buffer, minLen);
5882 	arr[minLen] = '\0';
5883 	if (1 != sscanf(arr, "%d", &opts))
5884 		return -EINVAL;
5885 	sdebug_opts = opts;
5886 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5887 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5888 	if (sdebug_every_nth != 0)
5889 		tweak_cmnd_count();
5890 	return length;
5891 }
5892 
5893 struct sdebug_submit_queue_data {
5894 	int *first;
5895 	int *last;
5896 	int queue_num;
5897 };
5898 
sdebug_submit_queue_iter(struct request * rq,void * opaque)5899 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
5900 {
5901 	struct sdebug_submit_queue_data *data = opaque;
5902 	u32 unique_tag = blk_mq_unique_tag(rq);
5903 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
5904 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
5905 	int queue_num = data->queue_num;
5906 
5907 	if (hwq != queue_num)
5908 		return true;
5909 
5910 	/* Rely on iter'ing in ascending tag order */
5911 	if (*data->first == -1)
5912 		*data->first = *data->last = tag;
5913 	else
5914 		*data->last = tag;
5915 
5916 	return true;
5917 }
5918 
5919 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5920  * same for each scsi_debug host (if more than one). Some of the counters
5921  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)5922 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5923 {
5924 	struct sdebug_host_info *sdhp;
5925 	int j;
5926 
5927 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5928 		   SDEBUG_VERSION, sdebug_version_date);
5929 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5930 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5931 		   sdebug_opts, sdebug_every_nth);
5932 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5933 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5934 		   sdebug_sector_size, "bytes");
5935 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5936 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5937 		   num_aborts);
5938 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5939 		   num_dev_resets, num_target_resets, num_bus_resets,
5940 		   num_host_resets);
5941 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5942 		   dix_reads, dix_writes, dif_errors);
5943 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5944 		   sdebug_statistics);
5945 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5946 		   atomic_read(&sdebug_cmnd_count),
5947 		   atomic_read(&sdebug_completions),
5948 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5949 		   atomic_read(&sdebug_a_tsf),
5950 		   atomic_read(&sdeb_mq_poll_count));
5951 
5952 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5953 	for (j = 0; j < submit_queues; ++j) {
5954 		int f = -1, l = -1;
5955 		struct sdebug_submit_queue_data data = {
5956 			.queue_num = j,
5957 			.first = &f,
5958 			.last = &l,
5959 		};
5960 		seq_printf(m, "  queue %d:\n", j);
5961 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
5962 					&data);
5963 		if (f >= 0) {
5964 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5965 				   "first,last bits", f, l);
5966 		}
5967 	}
5968 
5969 	seq_printf(m, "this host_no=%d\n", host->host_no);
5970 	if (!xa_empty(per_store_ap)) {
5971 		bool niu;
5972 		int idx;
5973 		unsigned long l_idx;
5974 		struct sdeb_store_info *sip;
5975 
5976 		seq_puts(m, "\nhost list:\n");
5977 		j = 0;
5978 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5979 			idx = sdhp->si_idx;
5980 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5981 				   sdhp->shost->host_no, idx);
5982 			++j;
5983 		}
5984 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5985 			   sdeb_most_recent_idx);
5986 		j = 0;
5987 		xa_for_each(per_store_ap, l_idx, sip) {
5988 			niu = xa_get_mark(per_store_ap, l_idx,
5989 					  SDEB_XA_NOT_IN_USE);
5990 			idx = (int)l_idx;
5991 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5992 				   (niu ? "  not_in_use" : ""));
5993 			++j;
5994 		}
5995 	}
5996 	return 0;
5997 }
5998 
delay_show(struct device_driver * ddp,char * buf)5999 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6000 {
6001 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6002 }
6003 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6004  * of delay is jiffies.
6005  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6006 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6007 			   size_t count)
6008 {
6009 	int jdelay, res;
6010 
6011 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6012 		res = count;
6013 		if (sdebug_jdelay != jdelay) {
6014 			struct sdebug_host_info *sdhp;
6015 
6016 			mutex_lock(&sdebug_host_list_mutex);
6017 			block_unblock_all_queues(true);
6018 
6019 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6020 				struct Scsi_Host *shost = sdhp->shost;
6021 
6022 				if (scsi_host_busy(shost)) {
6023 					res = -EBUSY;   /* queued commands */
6024 					break;
6025 				}
6026 			}
6027 			if (res > 0) {
6028 				sdebug_jdelay = jdelay;
6029 				sdebug_ndelay = 0;
6030 			}
6031 			block_unblock_all_queues(false);
6032 			mutex_unlock(&sdebug_host_list_mutex);
6033 		}
6034 		return res;
6035 	}
6036 	return -EINVAL;
6037 }
6038 static DRIVER_ATTR_RW(delay);
6039 
ndelay_show(struct device_driver * ddp,char * buf)6040 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6041 {
6042 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6043 }
6044 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6045 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6046 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6047 			    size_t count)
6048 {
6049 	int ndelay, res;
6050 
6051 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6052 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6053 		res = count;
6054 		if (sdebug_ndelay != ndelay) {
6055 			struct sdebug_host_info *sdhp;
6056 
6057 			mutex_lock(&sdebug_host_list_mutex);
6058 			block_unblock_all_queues(true);
6059 
6060 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6061 				struct Scsi_Host *shost = sdhp->shost;
6062 
6063 				if (scsi_host_busy(shost)) {
6064 					res = -EBUSY;   /* queued commands */
6065 					break;
6066 				}
6067 			}
6068 
6069 			if (res > 0) {
6070 				sdebug_ndelay = ndelay;
6071 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6072 							: DEF_JDELAY;
6073 			}
6074 			block_unblock_all_queues(false);
6075 			mutex_unlock(&sdebug_host_list_mutex);
6076 		}
6077 		return res;
6078 	}
6079 	return -EINVAL;
6080 }
6081 static DRIVER_ATTR_RW(ndelay);
6082 
opts_show(struct device_driver * ddp,char * buf)6083 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6084 {
6085 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6086 }
6087 
opts_store(struct device_driver * ddp,const char * buf,size_t count)6088 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6089 			  size_t count)
6090 {
6091 	int opts;
6092 	char work[20];
6093 
6094 	if (sscanf(buf, "%10s", work) == 1) {
6095 		if (strncasecmp(work, "0x", 2) == 0) {
6096 			if (kstrtoint(work + 2, 16, &opts) == 0)
6097 				goto opts_done;
6098 		} else {
6099 			if (kstrtoint(work, 10, &opts) == 0)
6100 				goto opts_done;
6101 		}
6102 	}
6103 	return -EINVAL;
6104 opts_done:
6105 	sdebug_opts = opts;
6106 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6107 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6108 	tweak_cmnd_count();
6109 	return count;
6110 }
6111 static DRIVER_ATTR_RW(opts);
6112 
ptype_show(struct device_driver * ddp,char * buf)6113 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6114 {
6115 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6116 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)6117 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6118 			   size_t count)
6119 {
6120 	int n;
6121 
6122 	/* Cannot change from or to TYPE_ZBC with sysfs */
6123 	if (sdebug_ptype == TYPE_ZBC)
6124 		return -EINVAL;
6125 
6126 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6127 		if (n == TYPE_ZBC)
6128 			return -EINVAL;
6129 		sdebug_ptype = n;
6130 		return count;
6131 	}
6132 	return -EINVAL;
6133 }
6134 static DRIVER_ATTR_RW(ptype);
6135 
dsense_show(struct device_driver * ddp,char * buf)6136 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6137 {
6138 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6139 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)6140 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6141 			    size_t count)
6142 {
6143 	int n;
6144 
6145 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6146 		sdebug_dsense = n;
6147 		return count;
6148 	}
6149 	return -EINVAL;
6150 }
6151 static DRIVER_ATTR_RW(dsense);
6152 
fake_rw_show(struct device_driver * ddp,char * buf)6153 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6154 {
6155 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6156 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)6157 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6158 			     size_t count)
6159 {
6160 	int n, idx;
6161 
6162 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6163 		bool want_store = (n == 0);
6164 		struct sdebug_host_info *sdhp;
6165 
6166 		n = (n > 0);
6167 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6168 		if (sdebug_fake_rw == n)
6169 			return count;	/* not transitioning so do nothing */
6170 
6171 		if (want_store) {	/* 1 --> 0 transition, set up store */
6172 			if (sdeb_first_idx < 0) {
6173 				idx = sdebug_add_store();
6174 				if (idx < 0)
6175 					return idx;
6176 			} else {
6177 				idx = sdeb_first_idx;
6178 				xa_clear_mark(per_store_ap, idx,
6179 					      SDEB_XA_NOT_IN_USE);
6180 			}
6181 			/* make all hosts use same store */
6182 			list_for_each_entry(sdhp, &sdebug_host_list,
6183 					    host_list) {
6184 				if (sdhp->si_idx != idx) {
6185 					xa_set_mark(per_store_ap, sdhp->si_idx,
6186 						    SDEB_XA_NOT_IN_USE);
6187 					sdhp->si_idx = idx;
6188 				}
6189 			}
6190 			sdeb_most_recent_idx = idx;
6191 		} else {	/* 0 --> 1 transition is trigger for shrink */
6192 			sdebug_erase_all_stores(true /* apart from first */);
6193 		}
6194 		sdebug_fake_rw = n;
6195 		return count;
6196 	}
6197 	return -EINVAL;
6198 }
6199 static DRIVER_ATTR_RW(fake_rw);
6200 
no_lun_0_show(struct device_driver * ddp,char * buf)6201 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6202 {
6203 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6204 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)6205 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6206 			      size_t count)
6207 {
6208 	int n;
6209 
6210 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6211 		sdebug_no_lun_0 = n;
6212 		return count;
6213 	}
6214 	return -EINVAL;
6215 }
6216 static DRIVER_ATTR_RW(no_lun_0);
6217 
num_tgts_show(struct device_driver * ddp,char * buf)6218 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6219 {
6220 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6221 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)6222 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6223 			      size_t count)
6224 {
6225 	int n;
6226 
6227 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6228 		sdebug_num_tgts = n;
6229 		sdebug_max_tgts_luns();
6230 		return count;
6231 	}
6232 	return -EINVAL;
6233 }
6234 static DRIVER_ATTR_RW(num_tgts);
6235 
dev_size_mb_show(struct device_driver * ddp,char * buf)6236 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6237 {
6238 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6239 }
6240 static DRIVER_ATTR_RO(dev_size_mb);
6241 
per_host_store_show(struct device_driver * ddp,char * buf)6242 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6243 {
6244 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6245 }
6246 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)6247 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6248 				    size_t count)
6249 {
6250 	bool v;
6251 
6252 	if (kstrtobool(buf, &v))
6253 		return -EINVAL;
6254 
6255 	sdebug_per_host_store = v;
6256 	return count;
6257 }
6258 static DRIVER_ATTR_RW(per_host_store);
6259 
num_parts_show(struct device_driver * ddp,char * buf)6260 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6261 {
6262 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6263 }
6264 static DRIVER_ATTR_RO(num_parts);
6265 
every_nth_show(struct device_driver * ddp,char * buf)6266 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6267 {
6268 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6269 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)6270 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6271 			       size_t count)
6272 {
6273 	int nth;
6274 	char work[20];
6275 
6276 	if (sscanf(buf, "%10s", work) == 1) {
6277 		if (strncasecmp(work, "0x", 2) == 0) {
6278 			if (kstrtoint(work + 2, 16, &nth) == 0)
6279 				goto every_nth_done;
6280 		} else {
6281 			if (kstrtoint(work, 10, &nth) == 0)
6282 				goto every_nth_done;
6283 		}
6284 	}
6285 	return -EINVAL;
6286 
6287 every_nth_done:
6288 	sdebug_every_nth = nth;
6289 	if (nth && !sdebug_statistics) {
6290 		pr_info("every_nth needs statistics=1, set it\n");
6291 		sdebug_statistics = true;
6292 	}
6293 	tweak_cmnd_count();
6294 	return count;
6295 }
6296 static DRIVER_ATTR_RW(every_nth);
6297 
lun_format_show(struct device_driver * ddp,char * buf)6298 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6299 {
6300 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6301 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)6302 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6303 				size_t count)
6304 {
6305 	int n;
6306 	bool changed;
6307 
6308 	if (kstrtoint(buf, 0, &n))
6309 		return -EINVAL;
6310 	if (n >= 0) {
6311 		if (n > (int)SAM_LUN_AM_FLAT) {
6312 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6313 			return -EINVAL;
6314 		}
6315 		changed = ((int)sdebug_lun_am != n);
6316 		sdebug_lun_am = n;
6317 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6318 			struct sdebug_host_info *sdhp;
6319 			struct sdebug_dev_info *dp;
6320 
6321 			mutex_lock(&sdebug_host_list_mutex);
6322 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6323 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6324 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6325 				}
6326 			}
6327 			mutex_unlock(&sdebug_host_list_mutex);
6328 		}
6329 		return count;
6330 	}
6331 	return -EINVAL;
6332 }
6333 static DRIVER_ATTR_RW(lun_format);
6334 
max_luns_show(struct device_driver * ddp,char * buf)6335 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6336 {
6337 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6338 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)6339 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6340 			      size_t count)
6341 {
6342 	int n;
6343 	bool changed;
6344 
6345 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6346 		if (n > 256) {
6347 			pr_warn("max_luns can be no more than 256\n");
6348 			return -EINVAL;
6349 		}
6350 		changed = (sdebug_max_luns != n);
6351 		sdebug_max_luns = n;
6352 		sdebug_max_tgts_luns();
6353 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6354 			struct sdebug_host_info *sdhp;
6355 			struct sdebug_dev_info *dp;
6356 
6357 			mutex_lock(&sdebug_host_list_mutex);
6358 			list_for_each_entry(sdhp, &sdebug_host_list,
6359 					    host_list) {
6360 				list_for_each_entry(dp, &sdhp->dev_info_list,
6361 						    dev_list) {
6362 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6363 						dp->uas_bm);
6364 				}
6365 			}
6366 			mutex_unlock(&sdebug_host_list_mutex);
6367 		}
6368 		return count;
6369 	}
6370 	return -EINVAL;
6371 }
6372 static DRIVER_ATTR_RW(max_luns);
6373 
max_queue_show(struct device_driver * ddp,char * buf)6374 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6375 {
6376 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6377 }
6378 /* N.B. max_queue can be changed while there are queued commands. In flight
6379  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)6380 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6381 			       size_t count)
6382 {
6383 	int n;
6384 
6385 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6386 	    (n <= SDEBUG_CANQUEUE) &&
6387 	    (sdebug_host_max_queue == 0)) {
6388 		mutex_lock(&sdebug_host_list_mutex);
6389 
6390 		/* We may only change sdebug_max_queue when we have no shosts */
6391 		if (list_empty(&sdebug_host_list))
6392 			sdebug_max_queue = n;
6393 		else
6394 			count = -EBUSY;
6395 		mutex_unlock(&sdebug_host_list_mutex);
6396 		return count;
6397 	}
6398 	return -EINVAL;
6399 }
6400 static DRIVER_ATTR_RW(max_queue);
6401 
host_max_queue_show(struct device_driver * ddp,char * buf)6402 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6403 {
6404 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6405 }
6406 
no_rwlock_show(struct device_driver * ddp,char * buf)6407 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6408 {
6409 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6410 }
6411 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)6412 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6413 {
6414 	bool v;
6415 
6416 	if (kstrtobool(buf, &v))
6417 		return -EINVAL;
6418 
6419 	sdebug_no_rwlock = v;
6420 	return count;
6421 }
6422 static DRIVER_ATTR_RW(no_rwlock);
6423 
6424 /*
6425  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6426  * in range [0, sdebug_host_max_queue), we can't change it.
6427  */
6428 static DRIVER_ATTR_RO(host_max_queue);
6429 
no_uld_show(struct device_driver * ddp,char * buf)6430 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6431 {
6432 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6433 }
6434 static DRIVER_ATTR_RO(no_uld);
6435 
scsi_level_show(struct device_driver * ddp,char * buf)6436 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6437 {
6438 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6439 }
6440 static DRIVER_ATTR_RO(scsi_level);
6441 
virtual_gb_show(struct device_driver * ddp,char * buf)6442 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6443 {
6444 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6445 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)6446 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6447 				size_t count)
6448 {
6449 	int n;
6450 	bool changed;
6451 
6452 	/* Ignore capacity change for ZBC drives for now */
6453 	if (sdeb_zbc_in_use)
6454 		return -ENOTSUPP;
6455 
6456 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6457 		changed = (sdebug_virtual_gb != n);
6458 		sdebug_virtual_gb = n;
6459 		sdebug_capacity = get_sdebug_capacity();
6460 		if (changed) {
6461 			struct sdebug_host_info *sdhp;
6462 			struct sdebug_dev_info *dp;
6463 
6464 			mutex_lock(&sdebug_host_list_mutex);
6465 			list_for_each_entry(sdhp, &sdebug_host_list,
6466 					    host_list) {
6467 				list_for_each_entry(dp, &sdhp->dev_info_list,
6468 						    dev_list) {
6469 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6470 						dp->uas_bm);
6471 				}
6472 			}
6473 			mutex_unlock(&sdebug_host_list_mutex);
6474 		}
6475 		return count;
6476 	}
6477 	return -EINVAL;
6478 }
6479 static DRIVER_ATTR_RW(virtual_gb);
6480 
add_host_show(struct device_driver * ddp,char * buf)6481 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6482 {
6483 	/* absolute number of hosts currently active is what is shown */
6484 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6485 }
6486 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)6487 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6488 			      size_t count)
6489 {
6490 	bool found;
6491 	unsigned long idx;
6492 	struct sdeb_store_info *sip;
6493 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6494 	int delta_hosts;
6495 
6496 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6497 		return -EINVAL;
6498 	if (delta_hosts > 0) {
6499 		do {
6500 			found = false;
6501 			if (want_phs) {
6502 				xa_for_each_marked(per_store_ap, idx, sip,
6503 						   SDEB_XA_NOT_IN_USE) {
6504 					sdeb_most_recent_idx = (int)idx;
6505 					found = true;
6506 					break;
6507 				}
6508 				if (found)	/* re-use case */
6509 					sdebug_add_host_helper((int)idx);
6510 				else
6511 					sdebug_do_add_host(true);
6512 			} else {
6513 				sdebug_do_add_host(false);
6514 			}
6515 		} while (--delta_hosts);
6516 	} else if (delta_hosts < 0) {
6517 		do {
6518 			sdebug_do_remove_host(false);
6519 		} while (++delta_hosts);
6520 	}
6521 	return count;
6522 }
6523 static DRIVER_ATTR_RW(add_host);
6524 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)6525 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6526 {
6527 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6528 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)6529 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6530 				    size_t count)
6531 {
6532 	int n;
6533 
6534 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6535 		sdebug_vpd_use_hostno = n;
6536 		return count;
6537 	}
6538 	return -EINVAL;
6539 }
6540 static DRIVER_ATTR_RW(vpd_use_hostno);
6541 
statistics_show(struct device_driver * ddp,char * buf)6542 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6543 {
6544 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6545 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)6546 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6547 				size_t count)
6548 {
6549 	int n;
6550 
6551 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6552 		if (n > 0)
6553 			sdebug_statistics = true;
6554 		else {
6555 			clear_queue_stats();
6556 			sdebug_statistics = false;
6557 		}
6558 		return count;
6559 	}
6560 	return -EINVAL;
6561 }
6562 static DRIVER_ATTR_RW(statistics);
6563 
sector_size_show(struct device_driver * ddp,char * buf)6564 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6565 {
6566 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6567 }
6568 static DRIVER_ATTR_RO(sector_size);
6569 
submit_queues_show(struct device_driver * ddp,char * buf)6570 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6571 {
6572 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6573 }
6574 static DRIVER_ATTR_RO(submit_queues);
6575 
dix_show(struct device_driver * ddp,char * buf)6576 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6577 {
6578 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6579 }
6580 static DRIVER_ATTR_RO(dix);
6581 
dif_show(struct device_driver * ddp,char * buf)6582 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6583 {
6584 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6585 }
6586 static DRIVER_ATTR_RO(dif);
6587 
guard_show(struct device_driver * ddp,char * buf)6588 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6589 {
6590 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6591 }
6592 static DRIVER_ATTR_RO(guard);
6593 
ato_show(struct device_driver * ddp,char * buf)6594 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6595 {
6596 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6597 }
6598 static DRIVER_ATTR_RO(ato);
6599 
map_show(struct device_driver * ddp,char * buf)6600 static ssize_t map_show(struct device_driver *ddp, char *buf)
6601 {
6602 	ssize_t count = 0;
6603 
6604 	if (!scsi_debug_lbp())
6605 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6606 				 sdebug_store_sectors);
6607 
6608 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6609 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6610 
6611 		if (sip)
6612 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6613 					  (int)map_size, sip->map_storep);
6614 	}
6615 	buf[count++] = '\n';
6616 	buf[count] = '\0';
6617 
6618 	return count;
6619 }
6620 static DRIVER_ATTR_RO(map);
6621 
random_show(struct device_driver * ddp,char * buf)6622 static ssize_t random_show(struct device_driver *ddp, char *buf)
6623 {
6624 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6625 }
6626 
random_store(struct device_driver * ddp,const char * buf,size_t count)6627 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6628 			    size_t count)
6629 {
6630 	bool v;
6631 
6632 	if (kstrtobool(buf, &v))
6633 		return -EINVAL;
6634 
6635 	sdebug_random = v;
6636 	return count;
6637 }
6638 static DRIVER_ATTR_RW(random);
6639 
removable_show(struct device_driver * ddp,char * buf)6640 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6641 {
6642 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6643 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)6644 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6645 			       size_t count)
6646 {
6647 	int n;
6648 
6649 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6650 		sdebug_removable = (n > 0);
6651 		return count;
6652 	}
6653 	return -EINVAL;
6654 }
6655 static DRIVER_ATTR_RW(removable);
6656 
host_lock_show(struct device_driver * ddp,char * buf)6657 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6658 {
6659 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6660 }
6661 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)6662 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6663 			       size_t count)
6664 {
6665 	int n;
6666 
6667 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6668 		sdebug_host_lock = (n > 0);
6669 		return count;
6670 	}
6671 	return -EINVAL;
6672 }
6673 static DRIVER_ATTR_RW(host_lock);
6674 
strict_show(struct device_driver * ddp,char * buf)6675 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6676 {
6677 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6678 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)6679 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6680 			    size_t count)
6681 {
6682 	int n;
6683 
6684 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6685 		sdebug_strict = (n > 0);
6686 		return count;
6687 	}
6688 	return -EINVAL;
6689 }
6690 static DRIVER_ATTR_RW(strict);
6691 
uuid_ctl_show(struct device_driver * ddp,char * buf)6692 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6693 {
6694 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6695 }
6696 static DRIVER_ATTR_RO(uuid_ctl);
6697 
cdb_len_show(struct device_driver * ddp,char * buf)6698 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6699 {
6700 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6701 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)6702 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6703 			     size_t count)
6704 {
6705 	int ret, n;
6706 
6707 	ret = kstrtoint(buf, 0, &n);
6708 	if (ret)
6709 		return ret;
6710 	sdebug_cdb_len = n;
6711 	all_config_cdb_len();
6712 	return count;
6713 }
6714 static DRIVER_ATTR_RW(cdb_len);
6715 
6716 static const char * const zbc_model_strs_a[] = {
6717 	[BLK_ZONED_NONE] = "none",
6718 	[BLK_ZONED_HA]   = "host-aware",
6719 	[BLK_ZONED_HM]   = "host-managed",
6720 };
6721 
6722 static const char * const zbc_model_strs_b[] = {
6723 	[BLK_ZONED_NONE] = "no",
6724 	[BLK_ZONED_HA]   = "aware",
6725 	[BLK_ZONED_HM]   = "managed",
6726 };
6727 
6728 static const char * const zbc_model_strs_c[] = {
6729 	[BLK_ZONED_NONE] = "0",
6730 	[BLK_ZONED_HA]   = "1",
6731 	[BLK_ZONED_HM]   = "2",
6732 };
6733 
sdeb_zbc_model_str(const char * cp)6734 static int sdeb_zbc_model_str(const char *cp)
6735 {
6736 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6737 
6738 	if (res < 0) {
6739 		res = sysfs_match_string(zbc_model_strs_b, cp);
6740 		if (res < 0) {
6741 			res = sysfs_match_string(zbc_model_strs_c, cp);
6742 			if (res < 0)
6743 				return -EINVAL;
6744 		}
6745 	}
6746 	return res;
6747 }
6748 
zbc_show(struct device_driver * ddp,char * buf)6749 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6750 {
6751 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6752 			 zbc_model_strs_a[sdeb_zbc_model]);
6753 }
6754 static DRIVER_ATTR_RO(zbc);
6755 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)6756 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6757 {
6758 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6759 }
6760 static DRIVER_ATTR_RO(tur_ms_to_ready);
6761 
6762 /* Note: The following array creates attribute files in the
6763    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6764    files (over those found in the /sys/module/scsi_debug/parameters
6765    directory) is that auxiliary actions can be triggered when an attribute
6766    is changed. For example see: add_host_store() above.
6767  */
6768 
6769 static struct attribute *sdebug_drv_attrs[] = {
6770 	&driver_attr_delay.attr,
6771 	&driver_attr_opts.attr,
6772 	&driver_attr_ptype.attr,
6773 	&driver_attr_dsense.attr,
6774 	&driver_attr_fake_rw.attr,
6775 	&driver_attr_host_max_queue.attr,
6776 	&driver_attr_no_lun_0.attr,
6777 	&driver_attr_num_tgts.attr,
6778 	&driver_attr_dev_size_mb.attr,
6779 	&driver_attr_num_parts.attr,
6780 	&driver_attr_every_nth.attr,
6781 	&driver_attr_lun_format.attr,
6782 	&driver_attr_max_luns.attr,
6783 	&driver_attr_max_queue.attr,
6784 	&driver_attr_no_rwlock.attr,
6785 	&driver_attr_no_uld.attr,
6786 	&driver_attr_scsi_level.attr,
6787 	&driver_attr_virtual_gb.attr,
6788 	&driver_attr_add_host.attr,
6789 	&driver_attr_per_host_store.attr,
6790 	&driver_attr_vpd_use_hostno.attr,
6791 	&driver_attr_sector_size.attr,
6792 	&driver_attr_statistics.attr,
6793 	&driver_attr_submit_queues.attr,
6794 	&driver_attr_dix.attr,
6795 	&driver_attr_dif.attr,
6796 	&driver_attr_guard.attr,
6797 	&driver_attr_ato.attr,
6798 	&driver_attr_map.attr,
6799 	&driver_attr_random.attr,
6800 	&driver_attr_removable.attr,
6801 	&driver_attr_host_lock.attr,
6802 	&driver_attr_ndelay.attr,
6803 	&driver_attr_strict.attr,
6804 	&driver_attr_uuid_ctl.attr,
6805 	&driver_attr_cdb_len.attr,
6806 	&driver_attr_tur_ms_to_ready.attr,
6807 	&driver_attr_zbc.attr,
6808 	NULL,
6809 };
6810 ATTRIBUTE_GROUPS(sdebug_drv);
6811 
6812 static struct device *pseudo_primary;
6813 
scsi_debug_init(void)6814 static int __init scsi_debug_init(void)
6815 {
6816 	bool want_store = (sdebug_fake_rw == 0);
6817 	unsigned long sz;
6818 	int k, ret, hosts_to_add;
6819 	int idx = -1;
6820 
6821 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6822 		pr_warn("ndelay must be less than 1 second, ignored\n");
6823 		sdebug_ndelay = 0;
6824 	} else if (sdebug_ndelay > 0)
6825 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6826 
6827 	switch (sdebug_sector_size) {
6828 	case  512:
6829 	case 1024:
6830 	case 2048:
6831 	case 4096:
6832 		break;
6833 	default:
6834 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6835 		return -EINVAL;
6836 	}
6837 
6838 	switch (sdebug_dif) {
6839 	case T10_PI_TYPE0_PROTECTION:
6840 		break;
6841 	case T10_PI_TYPE1_PROTECTION:
6842 	case T10_PI_TYPE2_PROTECTION:
6843 	case T10_PI_TYPE3_PROTECTION:
6844 		have_dif_prot = true;
6845 		break;
6846 
6847 	default:
6848 		pr_err("dif must be 0, 1, 2 or 3\n");
6849 		return -EINVAL;
6850 	}
6851 
6852 	if (sdebug_num_tgts < 0) {
6853 		pr_err("num_tgts must be >= 0\n");
6854 		return -EINVAL;
6855 	}
6856 
6857 	if (sdebug_guard > 1) {
6858 		pr_err("guard must be 0 or 1\n");
6859 		return -EINVAL;
6860 	}
6861 
6862 	if (sdebug_ato > 1) {
6863 		pr_err("ato must be 0 or 1\n");
6864 		return -EINVAL;
6865 	}
6866 
6867 	if (sdebug_physblk_exp > 15) {
6868 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6869 		return -EINVAL;
6870 	}
6871 
6872 	sdebug_lun_am = sdebug_lun_am_i;
6873 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6874 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6875 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6876 	}
6877 
6878 	if (sdebug_max_luns > 256) {
6879 		if (sdebug_max_luns > 16384) {
6880 			pr_warn("max_luns can be no more than 16384, use default\n");
6881 			sdebug_max_luns = DEF_MAX_LUNS;
6882 		}
6883 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6884 	}
6885 
6886 	if (sdebug_lowest_aligned > 0x3fff) {
6887 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6888 		return -EINVAL;
6889 	}
6890 
6891 	if (submit_queues < 1) {
6892 		pr_err("submit_queues must be 1 or more\n");
6893 		return -EINVAL;
6894 	}
6895 
6896 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6897 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6898 		return -EINVAL;
6899 	}
6900 
6901 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6902 	    (sdebug_host_max_queue < 0)) {
6903 		pr_err("host_max_queue must be in range [0 %d]\n",
6904 		       SDEBUG_CANQUEUE);
6905 		return -EINVAL;
6906 	}
6907 
6908 	if (sdebug_host_max_queue &&
6909 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6910 		sdebug_max_queue = sdebug_host_max_queue;
6911 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6912 			sdebug_max_queue);
6913 	}
6914 
6915 	/*
6916 	 * check for host managed zoned block device specified with
6917 	 * ptype=0x14 or zbc=XXX.
6918 	 */
6919 	if (sdebug_ptype == TYPE_ZBC) {
6920 		sdeb_zbc_model = BLK_ZONED_HM;
6921 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6922 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6923 		if (k < 0)
6924 			return k;
6925 		sdeb_zbc_model = k;
6926 		switch (sdeb_zbc_model) {
6927 		case BLK_ZONED_NONE:
6928 		case BLK_ZONED_HA:
6929 			sdebug_ptype = TYPE_DISK;
6930 			break;
6931 		case BLK_ZONED_HM:
6932 			sdebug_ptype = TYPE_ZBC;
6933 			break;
6934 		default:
6935 			pr_err("Invalid ZBC model\n");
6936 			return -EINVAL;
6937 		}
6938 	}
6939 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6940 		sdeb_zbc_in_use = true;
6941 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6942 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6943 	}
6944 
6945 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6946 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6947 	if (sdebug_dev_size_mb < 1)
6948 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6949 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6950 	sdebug_store_sectors = sz / sdebug_sector_size;
6951 	sdebug_capacity = get_sdebug_capacity();
6952 
6953 	/* play around with geometry, don't waste too much on track 0 */
6954 	sdebug_heads = 8;
6955 	sdebug_sectors_per = 32;
6956 	if (sdebug_dev_size_mb >= 256)
6957 		sdebug_heads = 64;
6958 	else if (sdebug_dev_size_mb >= 16)
6959 		sdebug_heads = 32;
6960 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6961 			       (sdebug_sectors_per * sdebug_heads);
6962 	if (sdebug_cylinders_per >= 1024) {
6963 		/* other LLDs do this; implies >= 1GB ram disk ... */
6964 		sdebug_heads = 255;
6965 		sdebug_sectors_per = 63;
6966 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6967 			       (sdebug_sectors_per * sdebug_heads);
6968 	}
6969 	if (scsi_debug_lbp()) {
6970 		sdebug_unmap_max_blocks =
6971 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6972 
6973 		sdebug_unmap_max_desc =
6974 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6975 
6976 		sdebug_unmap_granularity =
6977 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6978 
6979 		if (sdebug_unmap_alignment &&
6980 		    sdebug_unmap_granularity <=
6981 		    sdebug_unmap_alignment) {
6982 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6983 			return -EINVAL;
6984 		}
6985 	}
6986 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6987 	if (want_store) {
6988 		idx = sdebug_add_store();
6989 		if (idx < 0)
6990 			return idx;
6991 	}
6992 
6993 	pseudo_primary = root_device_register("pseudo_0");
6994 	if (IS_ERR(pseudo_primary)) {
6995 		pr_warn("root_device_register() error\n");
6996 		ret = PTR_ERR(pseudo_primary);
6997 		goto free_vm;
6998 	}
6999 	ret = bus_register(&pseudo_lld_bus);
7000 	if (ret < 0) {
7001 		pr_warn("bus_register error: %d\n", ret);
7002 		goto dev_unreg;
7003 	}
7004 	ret = driver_register(&sdebug_driverfs_driver);
7005 	if (ret < 0) {
7006 		pr_warn("driver_register error: %d\n", ret);
7007 		goto bus_unreg;
7008 	}
7009 
7010 	hosts_to_add = sdebug_add_host;
7011 	sdebug_add_host = 0;
7012 
7013 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7014 	if (!queued_cmd_cache) {
7015 		ret = -ENOMEM;
7016 		goto driver_unreg;
7017 	}
7018 
7019 	for (k = 0; k < hosts_to_add; k++) {
7020 		if (want_store && k == 0) {
7021 			ret = sdebug_add_host_helper(idx);
7022 			if (ret < 0) {
7023 				pr_err("add_host_helper k=%d, error=%d\n",
7024 				       k, -ret);
7025 				break;
7026 			}
7027 		} else {
7028 			ret = sdebug_do_add_host(want_store &&
7029 						 sdebug_per_host_store);
7030 			if (ret < 0) {
7031 				pr_err("add_host k=%d error=%d\n", k, -ret);
7032 				break;
7033 			}
7034 		}
7035 	}
7036 	if (sdebug_verbose)
7037 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7038 
7039 	return 0;
7040 
7041 driver_unreg:
7042 	driver_unregister(&sdebug_driverfs_driver);
7043 bus_unreg:
7044 	bus_unregister(&pseudo_lld_bus);
7045 dev_unreg:
7046 	root_device_unregister(pseudo_primary);
7047 free_vm:
7048 	sdebug_erase_store(idx, NULL);
7049 	return ret;
7050 }
7051 
scsi_debug_exit(void)7052 static void __exit scsi_debug_exit(void)
7053 {
7054 	int k = sdebug_num_hosts;
7055 
7056 	for (; k; k--)
7057 		sdebug_do_remove_host(true);
7058 	kmem_cache_destroy(queued_cmd_cache);
7059 	driver_unregister(&sdebug_driverfs_driver);
7060 	bus_unregister(&pseudo_lld_bus);
7061 	root_device_unregister(pseudo_primary);
7062 
7063 	sdebug_erase_all_stores(false);
7064 	xa_destroy(per_store_ap);
7065 }
7066 
7067 device_initcall(scsi_debug_init);
7068 module_exit(scsi_debug_exit);
7069 
sdebug_release_adapter(struct device * dev)7070 static void sdebug_release_adapter(struct device *dev)
7071 {
7072 	struct sdebug_host_info *sdbg_host;
7073 
7074 	sdbg_host = dev_to_sdebug_host(dev);
7075 	kfree(sdbg_host);
7076 }
7077 
7078 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)7079 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7080 {
7081 	if (idx < 0)
7082 		return;
7083 	if (!sip) {
7084 		if (xa_empty(per_store_ap))
7085 			return;
7086 		sip = xa_load(per_store_ap, idx);
7087 		if (!sip)
7088 			return;
7089 	}
7090 	vfree(sip->map_storep);
7091 	vfree(sip->dif_storep);
7092 	vfree(sip->storep);
7093 	xa_erase(per_store_ap, idx);
7094 	kfree(sip);
7095 }
7096 
7097 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)7098 static void sdebug_erase_all_stores(bool apart_from_first)
7099 {
7100 	unsigned long idx;
7101 	struct sdeb_store_info *sip = NULL;
7102 
7103 	xa_for_each(per_store_ap, idx, sip) {
7104 		if (apart_from_first)
7105 			apart_from_first = false;
7106 		else
7107 			sdebug_erase_store(idx, sip);
7108 	}
7109 	if (apart_from_first)
7110 		sdeb_most_recent_idx = sdeb_first_idx;
7111 }
7112 
7113 /*
7114  * Returns store xarray new element index (idx) if >=0 else negated errno.
7115  * Limit the number of stores to 65536.
7116  */
sdebug_add_store(void)7117 static int sdebug_add_store(void)
7118 {
7119 	int res;
7120 	u32 n_idx;
7121 	unsigned long iflags;
7122 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7123 	struct sdeb_store_info *sip = NULL;
7124 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7125 
7126 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7127 	if (!sip)
7128 		return -ENOMEM;
7129 
7130 	xa_lock_irqsave(per_store_ap, iflags);
7131 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7132 	if (unlikely(res < 0)) {
7133 		xa_unlock_irqrestore(per_store_ap, iflags);
7134 		kfree(sip);
7135 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7136 		return res;
7137 	}
7138 	sdeb_most_recent_idx = n_idx;
7139 	if (sdeb_first_idx < 0)
7140 		sdeb_first_idx = n_idx;
7141 	xa_unlock_irqrestore(per_store_ap, iflags);
7142 
7143 	res = -ENOMEM;
7144 	sip->storep = vzalloc(sz);
7145 	if (!sip->storep) {
7146 		pr_err("user data oom\n");
7147 		goto err;
7148 	}
7149 	if (sdebug_num_parts > 0)
7150 		sdebug_build_parts(sip->storep, sz);
7151 
7152 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7153 	if (sdebug_dix) {
7154 		int dif_size;
7155 
7156 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7157 		sip->dif_storep = vmalloc(dif_size);
7158 
7159 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7160 			sip->dif_storep);
7161 
7162 		if (!sip->dif_storep) {
7163 			pr_err("DIX oom\n");
7164 			goto err;
7165 		}
7166 		memset(sip->dif_storep, 0xff, dif_size);
7167 	}
7168 	/* Logical Block Provisioning */
7169 	if (scsi_debug_lbp()) {
7170 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7171 		sip->map_storep = vmalloc(array_size(sizeof(long),
7172 						     BITS_TO_LONGS(map_size)));
7173 
7174 		pr_info("%lu provisioning blocks\n", map_size);
7175 
7176 		if (!sip->map_storep) {
7177 			pr_err("LBP map oom\n");
7178 			goto err;
7179 		}
7180 
7181 		bitmap_zero(sip->map_storep, map_size);
7182 
7183 		/* Map first 1KB for partition table */
7184 		if (sdebug_num_parts)
7185 			map_region(sip, 0, 2);
7186 	}
7187 
7188 	rwlock_init(&sip->macc_lck);
7189 	return (int)n_idx;
7190 err:
7191 	sdebug_erase_store((int)n_idx, sip);
7192 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7193 	return res;
7194 }
7195 
sdebug_add_host_helper(int per_host_idx)7196 static int sdebug_add_host_helper(int per_host_idx)
7197 {
7198 	int k, devs_per_host, idx;
7199 	int error = -ENOMEM;
7200 	struct sdebug_host_info *sdbg_host;
7201 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7202 
7203 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7204 	if (!sdbg_host)
7205 		return -ENOMEM;
7206 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7207 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7208 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7209 	sdbg_host->si_idx = idx;
7210 
7211 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7212 
7213 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7214 	for (k = 0; k < devs_per_host; k++) {
7215 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7216 		if (!sdbg_devinfo)
7217 			goto clean;
7218 	}
7219 
7220 	mutex_lock(&sdebug_host_list_mutex);
7221 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7222 	mutex_unlock(&sdebug_host_list_mutex);
7223 
7224 	sdbg_host->dev.bus = &pseudo_lld_bus;
7225 	sdbg_host->dev.parent = pseudo_primary;
7226 	sdbg_host->dev.release = &sdebug_release_adapter;
7227 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7228 
7229 	error = device_register(&sdbg_host->dev);
7230 	if (error) {
7231 		mutex_lock(&sdebug_host_list_mutex);
7232 		list_del(&sdbg_host->host_list);
7233 		mutex_unlock(&sdebug_host_list_mutex);
7234 		goto clean;
7235 	}
7236 
7237 	++sdebug_num_hosts;
7238 	return 0;
7239 
7240 clean:
7241 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7242 				 dev_list) {
7243 		list_del(&sdbg_devinfo->dev_list);
7244 		kfree(sdbg_devinfo->zstate);
7245 		kfree(sdbg_devinfo);
7246 	}
7247 	if (sdbg_host->dev.release)
7248 		put_device(&sdbg_host->dev);
7249 	else
7250 		kfree(sdbg_host);
7251 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7252 	return error;
7253 }
7254 
sdebug_do_add_host(bool mk_new_store)7255 static int sdebug_do_add_host(bool mk_new_store)
7256 {
7257 	int ph_idx = sdeb_most_recent_idx;
7258 
7259 	if (mk_new_store) {
7260 		ph_idx = sdebug_add_store();
7261 		if (ph_idx < 0)
7262 			return ph_idx;
7263 	}
7264 	return sdebug_add_host_helper(ph_idx);
7265 }
7266 
sdebug_do_remove_host(bool the_end)7267 static void sdebug_do_remove_host(bool the_end)
7268 {
7269 	int idx = -1;
7270 	struct sdebug_host_info *sdbg_host = NULL;
7271 	struct sdebug_host_info *sdbg_host2;
7272 
7273 	mutex_lock(&sdebug_host_list_mutex);
7274 	if (!list_empty(&sdebug_host_list)) {
7275 		sdbg_host = list_entry(sdebug_host_list.prev,
7276 				       struct sdebug_host_info, host_list);
7277 		idx = sdbg_host->si_idx;
7278 	}
7279 	if (!the_end && idx >= 0) {
7280 		bool unique = true;
7281 
7282 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7283 			if (sdbg_host2 == sdbg_host)
7284 				continue;
7285 			if (idx == sdbg_host2->si_idx) {
7286 				unique = false;
7287 				break;
7288 			}
7289 		}
7290 		if (unique) {
7291 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7292 			if (idx == sdeb_most_recent_idx)
7293 				--sdeb_most_recent_idx;
7294 		}
7295 	}
7296 	if (sdbg_host)
7297 		list_del(&sdbg_host->host_list);
7298 	mutex_unlock(&sdebug_host_list_mutex);
7299 
7300 	if (!sdbg_host)
7301 		return;
7302 
7303 	device_unregister(&sdbg_host->dev);
7304 	--sdebug_num_hosts;
7305 }
7306 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)7307 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7308 {
7309 	struct sdebug_dev_info *devip = sdev->hostdata;
7310 
7311 	if (!devip)
7312 		return	-ENODEV;
7313 
7314 	mutex_lock(&sdebug_host_list_mutex);
7315 	block_unblock_all_queues(true);
7316 
7317 	if (qdepth > SDEBUG_CANQUEUE) {
7318 		qdepth = SDEBUG_CANQUEUE;
7319 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7320 			qdepth, SDEBUG_CANQUEUE);
7321 	}
7322 	if (qdepth < 1)
7323 		qdepth = 1;
7324 	if (qdepth != sdev->queue_depth)
7325 		scsi_change_queue_depth(sdev, qdepth);
7326 
7327 	block_unblock_all_queues(false);
7328 	mutex_unlock(&sdebug_host_list_mutex);
7329 
7330 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7331 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7332 
7333 	return sdev->queue_depth;
7334 }
7335 
fake_timeout(struct scsi_cmnd * scp)7336 static bool fake_timeout(struct scsi_cmnd *scp)
7337 {
7338 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7339 		if (sdebug_every_nth < -1)
7340 			sdebug_every_nth = -1;
7341 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7342 			return true; /* ignore command causing timeout */
7343 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7344 			 scsi_medium_access_command(scp))
7345 			return true; /* time out reads and writes */
7346 	}
7347 	return false;
7348 }
7349 
7350 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)7351 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7352 {
7353 	int stopped_state;
7354 	u64 diff_ns = 0;
7355 	ktime_t now_ts = ktime_get_boottime();
7356 	struct scsi_device *sdp = scp->device;
7357 
7358 	stopped_state = atomic_read(&devip->stopped);
7359 	if (stopped_state == 2) {
7360 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7361 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7362 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7363 				/* tur_ms_to_ready timer extinguished */
7364 				atomic_set(&devip->stopped, 0);
7365 				return 0;
7366 			}
7367 		}
7368 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7369 		if (sdebug_verbose)
7370 			sdev_printk(KERN_INFO, sdp,
7371 				    "%s: Not ready: in process of becoming ready\n", my_name);
7372 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7373 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7374 
7375 			if (diff_ns <= tur_nanosecs_to_ready)
7376 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7377 			else
7378 				diff_ns = tur_nanosecs_to_ready;
7379 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7380 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7381 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7382 						   diff_ns);
7383 			return check_condition_result;
7384 		}
7385 	}
7386 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7387 	if (sdebug_verbose)
7388 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7389 			    my_name);
7390 	return check_condition_result;
7391 }
7392 
sdebug_map_queues(struct Scsi_Host * shost)7393 static void sdebug_map_queues(struct Scsi_Host *shost)
7394 {
7395 	int i, qoff;
7396 
7397 	if (shost->nr_hw_queues == 1)
7398 		return;
7399 
7400 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7401 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7402 
7403 		map->nr_queues  = 0;
7404 
7405 		if (i == HCTX_TYPE_DEFAULT)
7406 			map->nr_queues = submit_queues - poll_queues;
7407 		else if (i == HCTX_TYPE_POLL)
7408 			map->nr_queues = poll_queues;
7409 
7410 		if (!map->nr_queues) {
7411 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7412 			continue;
7413 		}
7414 
7415 		map->queue_offset = qoff;
7416 		blk_mq_map_queues(map);
7417 
7418 		qoff += map->nr_queues;
7419 	}
7420 }
7421 
7422 struct sdebug_blk_mq_poll_data {
7423 	unsigned int queue_num;
7424 	int *num_entries;
7425 };
7426 
7427 /*
7428  * We don't handle aborted commands here, but it does not seem possible to have
7429  * aborted polled commands from schedule_resp()
7430  */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)7431 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7432 {
7433 	struct sdebug_blk_mq_poll_data *data = opaque;
7434 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7435 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7436 	struct sdebug_defer *sd_dp;
7437 	u32 unique_tag = blk_mq_unique_tag(rq);
7438 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7439 	struct sdebug_queued_cmd *sqcp;
7440 	unsigned long flags;
7441 	int queue_num = data->queue_num;
7442 	ktime_t time;
7443 
7444 	/* We're only interested in one queue for this iteration */
7445 	if (hwq != queue_num)
7446 		return true;
7447 
7448 	/* Subsequent checks would fail if this failed, but check anyway */
7449 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7450 		return true;
7451 
7452 	time = ktime_get_boottime();
7453 
7454 	spin_lock_irqsave(&sdsc->lock, flags);
7455 	sqcp = TO_QUEUED_CMD(cmd);
7456 	if (!sqcp) {
7457 		spin_unlock_irqrestore(&sdsc->lock, flags);
7458 		return true;
7459 	}
7460 
7461 	sd_dp = &sqcp->sd_dp;
7462 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7463 		spin_unlock_irqrestore(&sdsc->lock, flags);
7464 		return true;
7465 	}
7466 
7467 	if (time < sd_dp->cmpl_ts) {
7468 		spin_unlock_irqrestore(&sdsc->lock, flags);
7469 		return true;
7470 	}
7471 
7472 	ASSIGN_QUEUED_CMD(cmd, NULL);
7473 	spin_unlock_irqrestore(&sdsc->lock, flags);
7474 
7475 	if (sdebug_statistics) {
7476 		atomic_inc(&sdebug_completions);
7477 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7478 			atomic_inc(&sdebug_miss_cpus);
7479 	}
7480 
7481 	sdebug_free_queued_cmd(sqcp);
7482 
7483 	scsi_done(cmd); /* callback to mid level */
7484 	(*data->num_entries)++;
7485 	return true;
7486 }
7487 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)7488 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7489 {
7490 	int num_entries = 0;
7491 	struct sdebug_blk_mq_poll_data data = {
7492 		.queue_num = queue_num,
7493 		.num_entries = &num_entries,
7494 	};
7495 
7496 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7497 				&data);
7498 
7499 	if (num_entries > 0)
7500 		atomic_add(num_entries, &sdeb_mq_poll_count);
7501 	return num_entries;
7502 }
7503 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)7504 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7505 				   struct scsi_cmnd *scp)
7506 {
7507 	u8 sdeb_i;
7508 	struct scsi_device *sdp = scp->device;
7509 	const struct opcode_info_t *oip;
7510 	const struct opcode_info_t *r_oip;
7511 	struct sdebug_dev_info *devip;
7512 	u8 *cmd = scp->cmnd;
7513 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7514 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7515 	int k, na;
7516 	int errsts = 0;
7517 	u64 lun_index = sdp->lun & 0x3FFF;
7518 	u32 flags;
7519 	u16 sa;
7520 	u8 opcode = cmd[0];
7521 	bool has_wlun_rl;
7522 	bool inject_now;
7523 
7524 	scsi_set_resid(scp, 0);
7525 	if (sdebug_statistics) {
7526 		atomic_inc(&sdebug_cmnd_count);
7527 		inject_now = inject_on_this_cmd();
7528 	} else {
7529 		inject_now = false;
7530 	}
7531 	if (unlikely(sdebug_verbose &&
7532 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7533 		char b[120];
7534 		int n, len, sb;
7535 
7536 		len = scp->cmd_len;
7537 		sb = (int)sizeof(b);
7538 		if (len > 32)
7539 			strcpy(b, "too long, over 32 bytes");
7540 		else {
7541 			for (k = 0, n = 0; k < len && n < sb; ++k)
7542 				n += scnprintf(b + n, sb - n, "%02x ",
7543 					       (u32)cmd[k]);
7544 		}
7545 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7546 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7547 	}
7548 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7549 		return SCSI_MLQUEUE_HOST_BUSY;
7550 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7551 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7552 		goto err_out;
7553 
7554 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7555 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7556 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7557 	if (unlikely(!devip)) {
7558 		devip = find_build_dev_info(sdp);
7559 		if (NULL == devip)
7560 			goto err_out;
7561 	}
7562 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7563 		atomic_set(&sdeb_inject_pending, 1);
7564 
7565 	na = oip->num_attached;
7566 	r_pfp = oip->pfp;
7567 	if (na) {	/* multiple commands with this opcode */
7568 		r_oip = oip;
7569 		if (FF_SA & r_oip->flags) {
7570 			if (F_SA_LOW & oip->flags)
7571 				sa = 0x1f & cmd[1];
7572 			else
7573 				sa = get_unaligned_be16(cmd + 8);
7574 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7575 				if (opcode == oip->opcode && sa == oip->sa)
7576 					break;
7577 			}
7578 		} else {   /* since no service action only check opcode */
7579 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7580 				if (opcode == oip->opcode)
7581 					break;
7582 			}
7583 		}
7584 		if (k > na) {
7585 			if (F_SA_LOW & r_oip->flags)
7586 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7587 			else if (F_SA_HIGH & r_oip->flags)
7588 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7589 			else
7590 				mk_sense_invalid_opcode(scp);
7591 			goto check_cond;
7592 		}
7593 	}	/* else (when na==0) we assume the oip is a match */
7594 	flags = oip->flags;
7595 	if (unlikely(F_INV_OP & flags)) {
7596 		mk_sense_invalid_opcode(scp);
7597 		goto check_cond;
7598 	}
7599 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7600 		if (sdebug_verbose)
7601 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7602 				    my_name, opcode, " supported for wlun");
7603 		mk_sense_invalid_opcode(scp);
7604 		goto check_cond;
7605 	}
7606 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7607 		u8 rem;
7608 		int j;
7609 
7610 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7611 			rem = ~oip->len_mask[k] & cmd[k];
7612 			if (rem) {
7613 				for (j = 7; j >= 0; --j, rem <<= 1) {
7614 					if (0x80 & rem)
7615 						break;
7616 				}
7617 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7618 				goto check_cond;
7619 			}
7620 		}
7621 	}
7622 	if (unlikely(!(F_SKIP_UA & flags) &&
7623 		     find_first_bit(devip->uas_bm,
7624 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7625 		errsts = make_ua(scp, devip);
7626 		if (errsts)
7627 			goto check_cond;
7628 	}
7629 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7630 		     atomic_read(&devip->stopped))) {
7631 		errsts = resp_not_ready(scp, devip);
7632 		if (errsts)
7633 			goto fini;
7634 	}
7635 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7636 		goto fini;
7637 	if (unlikely(sdebug_every_nth)) {
7638 		if (fake_timeout(scp))
7639 			return 0;	/* ignore command: make trouble */
7640 	}
7641 	if (likely(oip->pfp))
7642 		pfp = oip->pfp;	/* calls a resp_* function */
7643 	else
7644 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7645 
7646 fini:
7647 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7648 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7649 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7650 					    sdebug_ndelay > 10000)) {
7651 		/*
7652 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7653 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7654 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7655 		 * For Synchronize Cache want 1/20 of SSU's delay.
7656 		 */
7657 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7658 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7659 
7660 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7661 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7662 	} else
7663 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7664 				     sdebug_ndelay);
7665 check_cond:
7666 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7667 err_out:
7668 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7669 }
7670 
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)7671 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7672 {
7673 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7674 
7675 	spin_lock_init(&sdsc->lock);
7676 
7677 	return 0;
7678 }
7679 
7680 
7681 static struct scsi_host_template sdebug_driver_template = {
7682 	.show_info =		scsi_debug_show_info,
7683 	.write_info =		scsi_debug_write_info,
7684 	.proc_name =		sdebug_proc_name,
7685 	.name =			"SCSI DEBUG",
7686 	.info =			scsi_debug_info,
7687 	.slave_alloc =		scsi_debug_slave_alloc,
7688 	.slave_configure =	scsi_debug_slave_configure,
7689 	.slave_destroy =	scsi_debug_slave_destroy,
7690 	.ioctl =		scsi_debug_ioctl,
7691 	.queuecommand =		scsi_debug_queuecommand,
7692 	.change_queue_depth =	sdebug_change_qdepth,
7693 	.map_queues =		sdebug_map_queues,
7694 	.mq_poll =		sdebug_blk_mq_poll,
7695 	.eh_abort_handler =	scsi_debug_abort,
7696 	.eh_device_reset_handler = scsi_debug_device_reset,
7697 	.eh_target_reset_handler = scsi_debug_target_reset,
7698 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7699 	.eh_host_reset_handler = scsi_debug_host_reset,
7700 	.can_queue =		SDEBUG_CANQUEUE,
7701 	.this_id =		7,
7702 	.sg_tablesize =		SG_MAX_SEGMENTS,
7703 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7704 	.max_sectors =		-1U,
7705 	.max_segment_size =	-1U,
7706 	.module =		THIS_MODULE,
7707 	.track_queue_depth =	1,
7708 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
7709 	.init_cmd_priv = sdebug_init_cmd_priv,
7710 };
7711 
sdebug_driver_probe(struct device * dev)7712 static int sdebug_driver_probe(struct device *dev)
7713 {
7714 	int error = 0;
7715 	struct sdebug_host_info *sdbg_host;
7716 	struct Scsi_Host *hpnt;
7717 	int hprot;
7718 
7719 	sdbg_host = dev_to_sdebug_host(dev);
7720 
7721 	sdebug_driver_template.can_queue = sdebug_max_queue;
7722 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7723 	sdebug_driver_template.max_segment_size = sdebug_max_segment_size;
7724 	if (!sdebug_clustering)
7725 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7726 
7727 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7728 	if (NULL == hpnt) {
7729 		pr_err("scsi_host_alloc failed\n");
7730 		error = -ENODEV;
7731 		return error;
7732 	}
7733 	if (submit_queues > nr_cpu_ids) {
7734 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7735 			my_name, submit_queues, nr_cpu_ids);
7736 		submit_queues = nr_cpu_ids;
7737 	}
7738 	/*
7739 	 * Decide whether to tell scsi subsystem that we want mq. The
7740 	 * following should give the same answer for each host.
7741 	 */
7742 	hpnt->nr_hw_queues = submit_queues;
7743 	if (sdebug_host_max_queue)
7744 		hpnt->host_tagset = 1;
7745 
7746 	/* poll queues are possible for nr_hw_queues > 1 */
7747 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7748 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7749 			 my_name, poll_queues, hpnt->nr_hw_queues);
7750 		poll_queues = 0;
7751 	}
7752 
7753 	/*
7754 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7755 	 * left over for non-polled I/O.
7756 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7757 	 */
7758 	if (poll_queues >= submit_queues) {
7759 		if (submit_queues < 3)
7760 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7761 		else
7762 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7763 				my_name, submit_queues - 1);
7764 		poll_queues = 1;
7765 	}
7766 	if (poll_queues)
7767 		hpnt->nr_maps = 3;
7768 
7769 	sdbg_host->shost = hpnt;
7770 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7771 		hpnt->max_id = sdebug_num_tgts + 1;
7772 	else
7773 		hpnt->max_id = sdebug_num_tgts;
7774 	/* = sdebug_max_luns; */
7775 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7776 
7777 	hprot = 0;
7778 
7779 	switch (sdebug_dif) {
7780 
7781 	case T10_PI_TYPE1_PROTECTION:
7782 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7783 		if (sdebug_dix)
7784 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7785 		break;
7786 
7787 	case T10_PI_TYPE2_PROTECTION:
7788 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7789 		if (sdebug_dix)
7790 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7791 		break;
7792 
7793 	case T10_PI_TYPE3_PROTECTION:
7794 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7795 		if (sdebug_dix)
7796 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7797 		break;
7798 
7799 	default:
7800 		if (sdebug_dix)
7801 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7802 		break;
7803 	}
7804 
7805 	scsi_host_set_prot(hpnt, hprot);
7806 
7807 	if (have_dif_prot || sdebug_dix)
7808 		pr_info("host protection%s%s%s%s%s%s%s\n",
7809 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7810 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7811 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7812 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7813 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7814 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7815 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7816 
7817 	if (sdebug_guard == 1)
7818 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7819 	else
7820 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7821 
7822 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7823 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7824 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7825 		sdebug_statistics = true;
7826 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7827 	if (error) {
7828 		pr_err("scsi_add_host failed\n");
7829 		error = -ENODEV;
7830 		scsi_host_put(hpnt);
7831 	} else {
7832 		scsi_scan_host(hpnt);
7833 	}
7834 
7835 	return error;
7836 }
7837 
sdebug_driver_remove(struct device * dev)7838 static void sdebug_driver_remove(struct device *dev)
7839 {
7840 	struct sdebug_host_info *sdbg_host;
7841 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7842 
7843 	sdbg_host = dev_to_sdebug_host(dev);
7844 
7845 	scsi_remove_host(sdbg_host->shost);
7846 
7847 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7848 				 dev_list) {
7849 		list_del(&sdbg_devinfo->dev_list);
7850 		kfree(sdbg_devinfo->zstate);
7851 		kfree(sdbg_devinfo);
7852 	}
7853 
7854 	scsi_host_put(sdbg_host->shost);
7855 }
7856 
7857 static struct bus_type pseudo_lld_bus = {
7858 	.name = "pseudo",
7859 	.probe = sdebug_driver_probe,
7860 	.remove = sdebug_driver_remove,
7861 	.drv_groups = sdebug_drv_groups,
7862 };
7863