• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t num_in_q;
292 	atomic_t stopped;	/* 1: by SSU, 2: device start */
293 	bool used;
294 
295 	/* For ZBC devices */
296 	enum blk_zoned_model zmodel;
297 	unsigned int zcap;
298 	unsigned int zsize;
299 	unsigned int zsize_shift;
300 	unsigned int nr_zones;
301 	unsigned int nr_conv_zones;
302 	unsigned int nr_seq_zones;
303 	unsigned int nr_imp_open;
304 	unsigned int nr_exp_open;
305 	unsigned int nr_closed;
306 	unsigned int max_open;
307 	ktime_t create_ts;	/* time since bootup that this device was created */
308 	struct sdeb_zone_state *zstate;
309 };
310 
311 struct sdebug_host_info {
312 	struct list_head host_list;
313 	int si_idx;	/* sdeb_store_info (per host) xarray index */
314 	struct Scsi_Host *shost;
315 	struct device dev;
316 	struct list_head dev_info_list;
317 };
318 
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 	rwlock_t macc_lck;	/* for atomic media access on this store */
322 	u8 *storep;		/* user data storage (ram) */
323 	struct t10_pi_tuple *dif_storep; /* protection info */
324 	void *map_storep;	/* provisioning map */
325 };
326 
327 #define to_sdebug_host(d)	\
328 	container_of(d, struct sdebug_host_info, dev)
329 
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
332 
333 struct sdebug_defer {
334 	struct hrtimer hrt;
335 	struct execute_work ew;
336 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 	int sqa_idx;	/* index of sdebug_queue array */
338 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
339 	int hc_idx;	/* hostwide tag index */
340 	int issuing_cpu;
341 	bool init_hrt;
342 	bool init_wq;
343 	bool init_poll;
344 	bool aborted;	/* true when blk_abort_request() already called */
345 	enum sdeb_defer_type defer_t;
346 };
347 
348 struct sdebug_queued_cmd {
349 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 	 * instance indicates this slot is in use.
351 	 */
352 	struct sdebug_defer *sd_dp;
353 	struct scsi_cmnd *a_cmnd;
354 };
355 
356 struct sdebug_queue {
357 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
359 	spinlock_t qc_lock;
360 	atomic_t blocked;	/* to temporarily stop more being queued */
361 };
362 
363 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
364 static atomic_t sdebug_completions;  /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
369 
370 struct opcode_info_t {
371 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
372 				/* for terminating element */
373 	u8 opcode;		/* if num_attached > 0, preferred */
374 	u16 sa;			/* service action */
375 	u32 flags;		/* OR-ed set of SDEB_F_* */
376 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
378 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
379 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
380 };
381 
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 	SDEB_I_INVALID_OPCODE =	0,
385 	SDEB_I_INQUIRY = 1,
386 	SDEB_I_REPORT_LUNS = 2,
387 	SDEB_I_REQUEST_SENSE = 3,
388 	SDEB_I_TEST_UNIT_READY = 4,
389 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
390 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
391 	SDEB_I_LOG_SENSE = 7,
392 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
393 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
394 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
395 	SDEB_I_START_STOP = 11,
396 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
397 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
398 	SDEB_I_MAINT_IN = 14,
399 	SDEB_I_MAINT_OUT = 15,
400 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
401 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
402 	SDEB_I_RESERVE = 18,		/* 6, 10 */
403 	SDEB_I_RELEASE = 19,		/* 6, 10 */
404 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
405 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
406 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
407 	SDEB_I_SEND_DIAG = 23,
408 	SDEB_I_UNMAP = 24,
409 	SDEB_I_WRITE_BUFFER = 25,
410 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
411 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
412 	SDEB_I_COMP_WRITE = 28,
413 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
414 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
415 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
416 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
417 };
418 
419 
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
423 	    0, 0, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
426 	    SDEB_I_RELEASE,
427 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 	    SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
438 	    SDEB_I_RELEASE,
439 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 	0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 	0, 0, 0, SDEB_I_VERIFY,
448 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 	     SDEB_I_MAINT_OUT, 0, 0, 0,
454 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 };
464 
465 /*
466  * The following "response" functions return the SCSI mid-level's 4 byte
467  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468  * command completion, they can mask their return value with
469  * SDEG_RES_IMMED_MASK .
470  */
471 #define SDEG_RES_IMMED_MASK 0x40000000
472 
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
509 
510 /*
511  * The following are overflow arrays for cdbs that "hit" the same index in
512  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513  * should be placed in opcode_info_arr[], the others should be placed here.
514  */
515 static const struct opcode_info_t msense_iarr[] = {
516 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
517 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t mselect_iarr[] = {
521 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
522 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 };
524 
525 static const struct opcode_info_t read_iarr[] = {
526 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
528 	     0, 0, 0, 0} },
529 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533 	     0xc7, 0, 0, 0, 0} },
534 };
535 
536 static const struct opcode_info_t write_iarr[] = {
537 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
538 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
539 		   0, 0, 0, 0, 0, 0} },
540 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
541 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
542 		   0, 0, 0} },
543 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
544 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 		   0xbf, 0xc7, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t verify_iarr[] = {
549 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 };
553 
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
558 };
559 
560 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
561 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
564 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
567 };
568 
569 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
570 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
576 };
577 
578 static const struct opcode_info_t write_same_iarr[] = {
579 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
582 };
583 
584 static const struct opcode_info_t reserve_iarr[] = {
585 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
586 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t release_iarr[] = {
590 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
591 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 };
593 
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
598 };
599 
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
604 };
605 
606 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
607 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
610 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
613 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
616 };
617 
618 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
619 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 };
623 
624 
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626  * plus the terminating elements for logic that scans this table such as
627  * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
629 /* 0 */
630 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
631 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 	     0, 0} },					/* REPORT LUNS */
637 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 /* 5 */
642 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
643 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
644 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
646 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
647 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
649 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
650 	     0, 0, 0} },
651 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
652 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
653 	     0, 0} },
654 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
656 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 /* 10 */
658 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
660 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
671 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
673 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 				0xff, 0, 0xc7, 0, 0, 0, 0} },
675 /* 15 */
676 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
680 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
684 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
685 	     0xff, 0xff} },
686 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
688 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 	     0} },
690 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
692 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
693 	     0} },
694 /* 20 */
695 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
702 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 /* 25 */
706 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
709 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
711 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
712 		 0, 0, 0, 0, 0} },
713 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 	    resp_sync_cache, sync_cache_iarr,
715 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
717 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
720 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 	    resp_pre_fetch, pre_fetch_iarr,
722 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
724 
725 /* 30 */
726 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
734 /* sentinel */
735 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
736 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
737 };
738 
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue;	/* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
755 static unsigned int sdebug_max_segment_size = BLK_MAX_SEGMENT_SIZE;
756 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
757 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
758 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
759 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
760 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
761 static int sdebug_no_uld;
762 static int sdebug_num_parts = DEF_NUM_PARTS;
763 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
764 static int sdebug_opt_blks = DEF_OPT_BLKS;
765 static int sdebug_opts = DEF_OPTS;
766 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
767 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
768 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
769 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
770 static int sdebug_sector_size = DEF_SECTOR_SIZE;
771 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
772 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
773 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
774 static unsigned int sdebug_lbpu = DEF_LBPU;
775 static unsigned int sdebug_lbpws = DEF_LBPWS;
776 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
777 static unsigned int sdebug_lbprz = DEF_LBPRZ;
778 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
779 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
780 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
781 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
782 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
783 static int sdebug_uuid_ctl = DEF_UUID_CTL;
784 static bool sdebug_random = DEF_RANDOM;
785 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
786 static bool sdebug_removable = DEF_REMOVABLE;
787 static bool sdebug_clustering;
788 static bool sdebug_host_lock = DEF_HOST_LOCK;
789 static bool sdebug_strict = DEF_STRICT;
790 static bool sdebug_any_injecting_opt;
791 static bool sdebug_no_rwlock;
792 static bool sdebug_verbose;
793 static bool have_dif_prot;
794 static bool write_since_sync;
795 static bool sdebug_statistics = DEF_STATISTICS;
796 static bool sdebug_wp;
797 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
798 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
799 static char *sdeb_zbc_model_s;
800 
801 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
802 			  SAM_LUN_AM_FLAT = 0x1,
803 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
804 			  SAM_LUN_AM_EXTENDED = 0x3};
805 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
806 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
807 
808 static unsigned int sdebug_store_sectors;
809 static sector_t sdebug_capacity;	/* in sectors */
810 
811 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
812    may still need them */
813 static int sdebug_heads;		/* heads per disk */
814 static int sdebug_cylinders_per;	/* cylinders per surface */
815 static int sdebug_sectors_per;		/* sectors per cylinder */
816 
817 static LIST_HEAD(sdebug_host_list);
818 static DEFINE_SPINLOCK(sdebug_host_list_lock);
819 
820 static struct xarray per_store_arr;
821 static struct xarray *per_store_ap = &per_store_arr;
822 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
823 static int sdeb_most_recent_idx = -1;
824 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
825 
826 static unsigned long map_size;
827 static int num_aborts;
828 static int num_dev_resets;
829 static int num_target_resets;
830 static int num_bus_resets;
831 static int num_host_resets;
832 static int dix_writes;
833 static int dix_reads;
834 static int dif_errors;
835 
836 /* ZBC global data */
837 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
838 static int sdeb_zbc_zone_cap_mb;
839 static int sdeb_zbc_zone_size_mb;
840 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
841 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
842 
843 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
844 static int poll_queues; /* iouring iopoll interface.*/
845 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
846 
847 static DEFINE_RWLOCK(atomic_rw);
848 static DEFINE_RWLOCK(atomic_rw2);
849 
850 static rwlock_t *ramdisk_lck_a[2];
851 
852 static char sdebug_proc_name[] = MY_NAME;
853 static const char *my_name = MY_NAME;
854 
855 static struct bus_type pseudo_lld_bus;
856 
857 static struct device_driver sdebug_driverfs_driver = {
858 	.name 		= sdebug_proc_name,
859 	.bus		= &pseudo_lld_bus,
860 };
861 
862 static const int check_condition_result =
863 	SAM_STAT_CHECK_CONDITION;
864 
865 static const int illegal_condition_result =
866 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
867 
868 static const int device_qfull_result =
869 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
870 
871 static const int condition_met_result = SAM_STAT_CONDITION_MET;
872 
873 
874 /* Only do the extra work involved in logical block provisioning if one or
875  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
876  * real reads and writes (i.e. not skipping them for speed).
877  */
scsi_debug_lbp(void)878 static inline bool scsi_debug_lbp(void)
879 {
880 	return 0 == sdebug_fake_rw &&
881 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
882 }
883 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)884 static void *lba2fake_store(struct sdeb_store_info *sip,
885 			    unsigned long long lba)
886 {
887 	struct sdeb_store_info *lsip = sip;
888 
889 	lba = do_div(lba, sdebug_store_sectors);
890 	if (!sip || !sip->storep) {
891 		WARN_ON_ONCE(true);
892 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
893 	}
894 	return lsip->storep + lba * sdebug_sector_size;
895 }
896 
dif_store(struct sdeb_store_info * sip,sector_t sector)897 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
898 				      sector_t sector)
899 {
900 	sector = sector_div(sector, sdebug_store_sectors);
901 
902 	return sip->dif_storep + sector;
903 }
904 
sdebug_max_tgts_luns(void)905 static void sdebug_max_tgts_luns(void)
906 {
907 	struct sdebug_host_info *sdbg_host;
908 	struct Scsi_Host *hpnt;
909 
910 	spin_lock(&sdebug_host_list_lock);
911 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
912 		hpnt = sdbg_host->shost;
913 		if ((hpnt->this_id >= 0) &&
914 		    (sdebug_num_tgts > hpnt->this_id))
915 			hpnt->max_id = sdebug_num_tgts + 1;
916 		else
917 			hpnt->max_id = sdebug_num_tgts;
918 		/* sdebug_max_luns; */
919 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
920 	}
921 	spin_unlock(&sdebug_host_list_lock);
922 }
923 
924 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
925 
926 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)927 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
928 				 enum sdeb_cmd_data c_d,
929 				 int in_byte, int in_bit)
930 {
931 	unsigned char *sbuff;
932 	u8 sks[4];
933 	int sl, asc;
934 
935 	sbuff = scp->sense_buffer;
936 	if (!sbuff) {
937 		sdev_printk(KERN_ERR, scp->device,
938 			    "%s: sense_buffer is NULL\n", __func__);
939 		return;
940 	}
941 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
942 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
943 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
944 	memset(sks, 0, sizeof(sks));
945 	sks[0] = 0x80;
946 	if (c_d)
947 		sks[0] |= 0x40;
948 	if (in_bit >= 0) {
949 		sks[0] |= 0x8;
950 		sks[0] |= 0x7 & in_bit;
951 	}
952 	put_unaligned_be16(in_byte, sks + 1);
953 	if (sdebug_dsense) {
954 		sl = sbuff[7] + 8;
955 		sbuff[7] = sl;
956 		sbuff[sl] = 0x2;
957 		sbuff[sl + 1] = 0x6;
958 		memcpy(sbuff + sl + 4, sks, 3);
959 	} else
960 		memcpy(sbuff + 15, sks, 3);
961 	if (sdebug_verbose)
962 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
963 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
964 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
965 }
966 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)967 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
968 {
969 	if (!scp->sense_buffer) {
970 		sdev_printk(KERN_ERR, scp->device,
971 			    "%s: sense_buffer is NULL\n", __func__);
972 		return;
973 	}
974 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
975 
976 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
977 
978 	if (sdebug_verbose)
979 		sdev_printk(KERN_INFO, scp->device,
980 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
981 			    my_name, key, asc, asq);
982 }
983 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)984 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
985 {
986 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
987 }
988 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)989 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
990 			    void __user *arg)
991 {
992 	if (sdebug_verbose) {
993 		if (0x1261 == cmd)
994 			sdev_printk(KERN_INFO, dev,
995 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
996 		else if (0x5331 == cmd)
997 			sdev_printk(KERN_INFO, dev,
998 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
999 				    __func__);
1000 		else
1001 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1002 				    __func__, cmd);
1003 	}
1004 	return -EINVAL;
1005 	/* return -ENOTTY; // correct return but upsets fdisk */
1006 }
1007 
config_cdb_len(struct scsi_device * sdev)1008 static void config_cdb_len(struct scsi_device *sdev)
1009 {
1010 	switch (sdebug_cdb_len) {
1011 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1012 		sdev->use_10_for_rw = false;
1013 		sdev->use_16_for_rw = false;
1014 		sdev->use_10_for_ms = false;
1015 		break;
1016 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1017 		sdev->use_10_for_rw = true;
1018 		sdev->use_16_for_rw = false;
1019 		sdev->use_10_for_ms = false;
1020 		break;
1021 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1022 		sdev->use_10_for_rw = true;
1023 		sdev->use_16_for_rw = false;
1024 		sdev->use_10_for_ms = true;
1025 		break;
1026 	case 16:
1027 		sdev->use_10_for_rw = false;
1028 		sdev->use_16_for_rw = true;
1029 		sdev->use_10_for_ms = true;
1030 		break;
1031 	case 32: /* No knobs to suggest this so same as 16 for now */
1032 		sdev->use_10_for_rw = false;
1033 		sdev->use_16_for_rw = true;
1034 		sdev->use_10_for_ms = true;
1035 		break;
1036 	default:
1037 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1038 			sdebug_cdb_len);
1039 		sdev->use_10_for_rw = true;
1040 		sdev->use_16_for_rw = false;
1041 		sdev->use_10_for_ms = false;
1042 		sdebug_cdb_len = 10;
1043 		break;
1044 	}
1045 }
1046 
all_config_cdb_len(void)1047 static void all_config_cdb_len(void)
1048 {
1049 	struct sdebug_host_info *sdbg_host;
1050 	struct Scsi_Host *shost;
1051 	struct scsi_device *sdev;
1052 
1053 	spin_lock(&sdebug_host_list_lock);
1054 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1055 		shost = sdbg_host->shost;
1056 		shost_for_each_device(sdev, shost) {
1057 			config_cdb_len(sdev);
1058 		}
1059 	}
1060 	spin_unlock(&sdebug_host_list_lock);
1061 }
1062 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1063 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1064 {
1065 	struct sdebug_host_info *sdhp;
1066 	struct sdebug_dev_info *dp;
1067 
1068 	spin_lock(&sdebug_host_list_lock);
1069 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1070 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1071 			if ((devip->sdbg_host == dp->sdbg_host) &&
1072 			    (devip->target == dp->target))
1073 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1074 		}
1075 	}
1076 	spin_unlock(&sdebug_host_list_lock);
1077 }
1078 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1079 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1080 {
1081 	int k;
1082 
1083 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1084 	if (k != SDEBUG_NUM_UAS) {
1085 		const char *cp = NULL;
1086 
1087 		switch (k) {
1088 		case SDEBUG_UA_POR:
1089 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1090 					POWER_ON_RESET_ASCQ);
1091 			if (sdebug_verbose)
1092 				cp = "power on reset";
1093 			break;
1094 		case SDEBUG_UA_POOCCUR:
1095 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1096 					POWER_ON_OCCURRED_ASCQ);
1097 			if (sdebug_verbose)
1098 				cp = "power on occurred";
1099 			break;
1100 		case SDEBUG_UA_BUS_RESET:
1101 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1102 					BUS_RESET_ASCQ);
1103 			if (sdebug_verbose)
1104 				cp = "bus reset";
1105 			break;
1106 		case SDEBUG_UA_MODE_CHANGED:
1107 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1108 					MODE_CHANGED_ASCQ);
1109 			if (sdebug_verbose)
1110 				cp = "mode parameters changed";
1111 			break;
1112 		case SDEBUG_UA_CAPACITY_CHANGED:
1113 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1114 					CAPACITY_CHANGED_ASCQ);
1115 			if (sdebug_verbose)
1116 				cp = "capacity data changed";
1117 			break;
1118 		case SDEBUG_UA_MICROCODE_CHANGED:
1119 			mk_sense_buffer(scp, UNIT_ATTENTION,
1120 					TARGET_CHANGED_ASC,
1121 					MICROCODE_CHANGED_ASCQ);
1122 			if (sdebug_verbose)
1123 				cp = "microcode has been changed";
1124 			break;
1125 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1126 			mk_sense_buffer(scp, UNIT_ATTENTION,
1127 					TARGET_CHANGED_ASC,
1128 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1129 			if (sdebug_verbose)
1130 				cp = "microcode has been changed without reset";
1131 			break;
1132 		case SDEBUG_UA_LUNS_CHANGED:
1133 			/*
1134 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1135 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1136 			 * on the target, until a REPORT LUNS command is
1137 			 * received.  SPC-4 behavior is to report it only once.
1138 			 * NOTE:  sdebug_scsi_level does not use the same
1139 			 * values as struct scsi_device->scsi_level.
1140 			 */
1141 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1142 				clear_luns_changed_on_target(devip);
1143 			mk_sense_buffer(scp, UNIT_ATTENTION,
1144 					TARGET_CHANGED_ASC,
1145 					LUNS_CHANGED_ASCQ);
1146 			if (sdebug_verbose)
1147 				cp = "reported luns data has changed";
1148 			break;
1149 		default:
1150 			pr_warn("unexpected unit attention code=%d\n", k);
1151 			if (sdebug_verbose)
1152 				cp = "unknown";
1153 			break;
1154 		}
1155 		clear_bit(k, devip->uas_bm);
1156 		if (sdebug_verbose)
1157 			sdev_printk(KERN_INFO, scp->device,
1158 				   "%s reports: Unit attention: %s\n",
1159 				   my_name, cp);
1160 		return check_condition_result;
1161 	}
1162 	return 0;
1163 }
1164 
1165 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1166 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1167 				int arr_len)
1168 {
1169 	int act_len;
1170 	struct scsi_data_buffer *sdb = &scp->sdb;
1171 
1172 	if (!sdb->length)
1173 		return 0;
1174 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1175 		return DID_ERROR << 16;
1176 
1177 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1178 				      arr, arr_len);
1179 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1180 
1181 	return 0;
1182 }
1183 
1184 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1185  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1186  * calls, not required to write in ascending offset order. Assumes resid
1187  * set to scsi_bufflen() prior to any calls.
1188  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1189 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1190 				  int arr_len, unsigned int off_dst)
1191 {
1192 	unsigned int act_len, n;
1193 	struct scsi_data_buffer *sdb = &scp->sdb;
1194 	off_t skip = off_dst;
1195 
1196 	if (sdb->length <= off_dst)
1197 		return 0;
1198 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1199 		return DID_ERROR << 16;
1200 
1201 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1202 				       arr, arr_len, skip);
1203 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1204 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1205 		 scsi_get_resid(scp));
1206 	n = scsi_bufflen(scp) - (off_dst + act_len);
1207 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1208 	return 0;
1209 }
1210 
1211 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1212  * 'arr' or -1 if error.
1213  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1214 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1215 			       int arr_len)
1216 {
1217 	if (!scsi_bufflen(scp))
1218 		return 0;
1219 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1220 		return -1;
1221 
1222 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1223 }
1224 
1225 
1226 static char sdebug_inq_vendor_id[9] = "Linux   ";
1227 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1228 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1229 /* Use some locally assigned NAAs for SAS addresses. */
1230 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1231 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1232 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1233 
1234 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1235 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1236 			  int target_dev_id, int dev_id_num,
1237 			  const char *dev_id_str, int dev_id_str_len,
1238 			  const uuid_t *lu_name)
1239 {
1240 	int num, port_a;
1241 	char b[32];
1242 
1243 	port_a = target_dev_id + 1;
1244 	/* T10 vendor identifier field format (faked) */
1245 	arr[0] = 0x2;	/* ASCII */
1246 	arr[1] = 0x1;
1247 	arr[2] = 0x0;
1248 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1249 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1250 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1251 	num = 8 + 16 + dev_id_str_len;
1252 	arr[3] = num;
1253 	num += 4;
1254 	if (dev_id_num >= 0) {
1255 		if (sdebug_uuid_ctl) {
1256 			/* Locally assigned UUID */
1257 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1258 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1259 			arr[num++] = 0x0;
1260 			arr[num++] = 0x12;
1261 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1262 			arr[num++] = 0x0;
1263 			memcpy(arr + num, lu_name, 16);
1264 			num += 16;
1265 		} else {
1266 			/* NAA-3, Logical unit identifier (binary) */
1267 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1268 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1269 			arr[num++] = 0x0;
1270 			arr[num++] = 0x8;
1271 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1272 			num += 8;
1273 		}
1274 		/* Target relative port number */
1275 		arr[num++] = 0x61;	/* proto=sas, binary */
1276 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1277 		arr[num++] = 0x0;	/* reserved */
1278 		arr[num++] = 0x4;	/* length */
1279 		arr[num++] = 0x0;	/* reserved */
1280 		arr[num++] = 0x0;	/* reserved */
1281 		arr[num++] = 0x0;
1282 		arr[num++] = 0x1;	/* relative port A */
1283 	}
1284 	/* NAA-3, Target port identifier */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1287 	arr[num++] = 0x0;
1288 	arr[num++] = 0x8;
1289 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1290 	num += 8;
1291 	/* NAA-3, Target port group identifier */
1292 	arr[num++] = 0x61;	/* proto=sas, binary */
1293 	arr[num++] = 0x95;	/* piv=1, target port group id */
1294 	arr[num++] = 0x0;
1295 	arr[num++] = 0x4;
1296 	arr[num++] = 0;
1297 	arr[num++] = 0;
1298 	put_unaligned_be16(port_group_id, arr + num);
1299 	num += 2;
1300 	/* NAA-3, Target device identifier */
1301 	arr[num++] = 0x61;	/* proto=sas, binary */
1302 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1303 	arr[num++] = 0x0;
1304 	arr[num++] = 0x8;
1305 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1306 	num += 8;
1307 	/* SCSI name string: Target device identifier */
1308 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1309 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1310 	arr[num++] = 0x0;
1311 	arr[num++] = 24;
1312 	memcpy(arr + num, "naa.32222220", 12);
1313 	num += 12;
1314 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1315 	memcpy(arr + num, b, 8);
1316 	num += 8;
1317 	memset(arr + num, 0, 4);
1318 	num += 4;
1319 	return num;
1320 }
1321 
1322 static unsigned char vpd84_data[] = {
1323 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1324     0x22,0x22,0x22,0x0,0xbb,0x1,
1325     0x22,0x22,0x22,0x0,0xbb,0x2,
1326 };
1327 
1328 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1329 static int inquiry_vpd_84(unsigned char *arr)
1330 {
1331 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1332 	return sizeof(vpd84_data);
1333 }
1334 
1335 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1336 static int inquiry_vpd_85(unsigned char *arr)
1337 {
1338 	int num = 0;
1339 	const char *na1 = "https://www.kernel.org/config";
1340 	const char *na2 = "http://www.kernel.org/log";
1341 	int plen, olen;
1342 
1343 	arr[num++] = 0x1;	/* lu, storage config */
1344 	arr[num++] = 0x0;	/* reserved */
1345 	arr[num++] = 0x0;
1346 	olen = strlen(na1);
1347 	plen = olen + 1;
1348 	if (plen % 4)
1349 		plen = ((plen / 4) + 1) * 4;
1350 	arr[num++] = plen;	/* length, null termianted, padded */
1351 	memcpy(arr + num, na1, olen);
1352 	memset(arr + num + olen, 0, plen - olen);
1353 	num += plen;
1354 
1355 	arr[num++] = 0x4;	/* lu, logging */
1356 	arr[num++] = 0x0;	/* reserved */
1357 	arr[num++] = 0x0;
1358 	olen = strlen(na2);
1359 	plen = olen + 1;
1360 	if (plen % 4)
1361 		plen = ((plen / 4) + 1) * 4;
1362 	arr[num++] = plen;	/* length, null terminated, padded */
1363 	memcpy(arr + num, na2, olen);
1364 	memset(arr + num + olen, 0, plen - olen);
1365 	num += plen;
1366 
1367 	return num;
1368 }
1369 
1370 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1371 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1372 {
1373 	int num = 0;
1374 	int port_a, port_b;
1375 
1376 	port_a = target_dev_id + 1;
1377 	port_b = port_a + 1;
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;	/* reserved */
1380 	arr[num++] = 0x0;
1381 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1382 	memset(arr + num, 0, 6);
1383 	num += 6;
1384 	arr[num++] = 0x0;
1385 	arr[num++] = 12;	/* length tp descriptor */
1386 	/* naa-5 target port identifier (A) */
1387 	arr[num++] = 0x61;	/* proto=sas, binary */
1388 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1389 	arr[num++] = 0x0;	/* reserved */
1390 	arr[num++] = 0x8;	/* length */
1391 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1392 	num += 8;
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x0;	/* reserved */
1395 	arr[num++] = 0x0;
1396 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1397 	memset(arr + num, 0, 6);
1398 	num += 6;
1399 	arr[num++] = 0x0;
1400 	arr[num++] = 12;	/* length tp descriptor */
1401 	/* naa-5 target port identifier (B) */
1402 	arr[num++] = 0x61;	/* proto=sas, binary */
1403 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1404 	arr[num++] = 0x0;	/* reserved */
1405 	arr[num++] = 0x8;	/* length */
1406 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1407 	num += 8;
1408 
1409 	return num;
1410 }
1411 
1412 
1413 static unsigned char vpd89_data[] = {
1414 /* from 4th byte */ 0,0,0,0,
1415 'l','i','n','u','x',' ',' ',' ',
1416 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1417 '1','2','3','4',
1418 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1419 0xec,0,0,0,
1420 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1421 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1422 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1423 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1424 0x53,0x41,
1425 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1426 0x20,0x20,
1427 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1428 0x10,0x80,
1429 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1430 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1431 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1433 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1434 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1435 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1440 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1441 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1442 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1455 };
1456 
1457 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1458 static int inquiry_vpd_89(unsigned char *arr)
1459 {
1460 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1461 	return sizeof(vpd89_data);
1462 }
1463 
1464 
1465 static unsigned char vpdb0_data[] = {
1466 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1467 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1469 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1470 };
1471 
1472 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1473 static int inquiry_vpd_b0(unsigned char *arr)
1474 {
1475 	unsigned int gran;
1476 
1477 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1478 
1479 	/* Optimal transfer length granularity */
1480 	if (sdebug_opt_xferlen_exp != 0 &&
1481 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1482 		gran = 1 << sdebug_opt_xferlen_exp;
1483 	else
1484 		gran = 1 << sdebug_physblk_exp;
1485 	put_unaligned_be16(gran, arr + 2);
1486 
1487 	/* Maximum Transfer Length */
1488 	if (sdebug_store_sectors > 0x400)
1489 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1490 
1491 	/* Optimal Transfer Length */
1492 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1493 
1494 	if (sdebug_lbpu) {
1495 		/* Maximum Unmap LBA Count */
1496 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1497 
1498 		/* Maximum Unmap Block Descriptor Count */
1499 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1500 	}
1501 
1502 	/* Unmap Granularity Alignment */
1503 	if (sdebug_unmap_alignment) {
1504 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1505 		arr[28] |= 0x80; /* UGAVALID */
1506 	}
1507 
1508 	/* Optimal Unmap Granularity */
1509 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1510 
1511 	/* Maximum WRITE SAME Length */
1512 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1513 
1514 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1515 
1516 	return sizeof(vpdb0_data);
1517 }
1518 
1519 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1520 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1521 {
1522 	memset(arr, 0, 0x3c);
1523 	arr[0] = 0;
1524 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1525 	arr[2] = 0;
1526 	arr[3] = 5;	/* less than 1.8" */
1527 	if (devip->zmodel == BLK_ZONED_HA)
1528 		arr[4] = 1 << 4;	/* zoned field = 01b */
1529 
1530 	return 0x3c;
1531 }
1532 
1533 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1534 static int inquiry_vpd_b2(unsigned char *arr)
1535 {
1536 	memset(arr, 0, 0x4);
1537 	arr[0] = 0;			/* threshold exponent */
1538 	if (sdebug_lbpu)
1539 		arr[1] = 1 << 7;
1540 	if (sdebug_lbpws)
1541 		arr[1] |= 1 << 6;
1542 	if (sdebug_lbpws10)
1543 		arr[1] |= 1 << 5;
1544 	if (sdebug_lbprz && scsi_debug_lbp())
1545 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1546 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1547 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1548 	/* threshold_percentage=0 */
1549 	return 0x4;
1550 }
1551 
1552 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1553 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1554 {
1555 	memset(arr, 0, 0x3c);
1556 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1557 	/*
1558 	 * Set Optimal number of open sequential write preferred zones and
1559 	 * Optimal number of non-sequentially written sequential write
1560 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1561 	 * fields set to zero, apart from Max. number of open swrz_s field.
1562 	 */
1563 	put_unaligned_be32(0xffffffff, &arr[4]);
1564 	put_unaligned_be32(0xffffffff, &arr[8]);
1565 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1566 		put_unaligned_be32(devip->max_open, &arr[12]);
1567 	else
1568 		put_unaligned_be32(0xffffffff, &arr[12]);
1569 	if (devip->zcap < devip->zsize) {
1570 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1571 		put_unaligned_be64(devip->zsize, &arr[20]);
1572 	} else {
1573 		arr[19] = 0;
1574 	}
1575 	return 0x3c;
1576 }
1577 
1578 #define SDEBUG_LONG_INQ_SZ 96
1579 #define SDEBUG_MAX_INQ_ARR_SZ 584
1580 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1581 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1582 {
1583 	unsigned char pq_pdt;
1584 	unsigned char *arr;
1585 	unsigned char *cmd = scp->cmnd;
1586 	u32 alloc_len, n;
1587 	int ret;
1588 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1589 
1590 	alloc_len = get_unaligned_be16(cmd + 3);
1591 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1592 	if (! arr)
1593 		return DID_REQUEUE << 16;
1594 	is_disk = (sdebug_ptype == TYPE_DISK);
1595 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1596 	is_disk_zbc = (is_disk || is_zbc);
1597 	have_wlun = scsi_is_wlun(scp->device->lun);
1598 	if (have_wlun)
1599 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1600 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1601 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1602 	else
1603 		pq_pdt = (sdebug_ptype & 0x1f);
1604 	arr[0] = pq_pdt;
1605 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1606 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1607 		kfree(arr);
1608 		return check_condition_result;
1609 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1610 		int lu_id_num, port_group_id, target_dev_id;
1611 		u32 len;
1612 		char lu_id_str[6];
1613 		int host_no = devip->sdbg_host->shost->host_no;
1614 
1615 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1616 		    (devip->channel & 0x7f);
1617 		if (sdebug_vpd_use_hostno == 0)
1618 			host_no = 0;
1619 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1620 			    (devip->target * 1000) + devip->lun);
1621 		target_dev_id = ((host_no + 1) * 2000) +
1622 				 (devip->target * 1000) - 3;
1623 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1624 		if (0 == cmd[2]) { /* supported vital product data pages */
1625 			arr[1] = cmd[2];	/*sanity */
1626 			n = 4;
1627 			arr[n++] = 0x0;   /* this page */
1628 			arr[n++] = 0x80;  /* unit serial number */
1629 			arr[n++] = 0x83;  /* device identification */
1630 			arr[n++] = 0x84;  /* software interface ident. */
1631 			arr[n++] = 0x85;  /* management network addresses */
1632 			arr[n++] = 0x86;  /* extended inquiry */
1633 			arr[n++] = 0x87;  /* mode page policy */
1634 			arr[n++] = 0x88;  /* SCSI ports */
1635 			if (is_disk_zbc) {	  /* SBC or ZBC */
1636 				arr[n++] = 0x89;  /* ATA information */
1637 				arr[n++] = 0xb0;  /* Block limits */
1638 				arr[n++] = 0xb1;  /* Block characteristics */
1639 				if (is_disk)
1640 					arr[n++] = 0xb2;  /* LB Provisioning */
1641 				if (is_zbc)
1642 					arr[n++] = 0xb6;  /* ZB dev. char. */
1643 			}
1644 			arr[3] = n - 4;	  /* number of supported VPD pages */
1645 		} else if (0x80 == cmd[2]) { /* unit serial number */
1646 			arr[1] = cmd[2];	/*sanity */
1647 			arr[3] = len;
1648 			memcpy(&arr[4], lu_id_str, len);
1649 		} else if (0x83 == cmd[2]) { /* device identification */
1650 			arr[1] = cmd[2];	/*sanity */
1651 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1652 						target_dev_id, lu_id_num,
1653 						lu_id_str, len,
1654 						&devip->lu_name);
1655 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1656 			arr[1] = cmd[2];	/*sanity */
1657 			arr[3] = inquiry_vpd_84(&arr[4]);
1658 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = inquiry_vpd_85(&arr[4]);
1661 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1662 			arr[1] = cmd[2];	/*sanity */
1663 			arr[3] = 0x3c;	/* number of following entries */
1664 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1665 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1666 			else if (have_dif_prot)
1667 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1668 			else
1669 				arr[4] = 0x0;   /* no protection stuff */
1670 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1671 		} else if (0x87 == cmd[2]) { /* mode page policy */
1672 			arr[1] = cmd[2];	/*sanity */
1673 			arr[3] = 0x8;	/* number of following entries */
1674 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1675 			arr[6] = 0x80;	/* mlus, shared */
1676 			arr[8] = 0x18;	 /* protocol specific lu */
1677 			arr[10] = 0x82;	 /* mlus, per initiator port */
1678 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1679 			arr[1] = cmd[2];	/*sanity */
1680 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1681 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1682 			arr[1] = cmd[2];        /*sanity */
1683 			n = inquiry_vpd_89(&arr[4]);
1684 			put_unaligned_be16(n, arr + 2);
1685 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1686 			arr[1] = cmd[2];        /*sanity */
1687 			arr[3] = inquiry_vpd_b0(&arr[4]);
1688 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1689 			arr[1] = cmd[2];        /*sanity */
1690 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1691 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1692 			arr[1] = cmd[2];        /*sanity */
1693 			arr[3] = inquiry_vpd_b2(&arr[4]);
1694 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1695 			arr[1] = cmd[2];        /*sanity */
1696 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1697 		} else {
1698 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1699 			kfree(arr);
1700 			return check_condition_result;
1701 		}
1702 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1703 		ret = fill_from_dev_buffer(scp, arr,
1704 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1705 		kfree(arr);
1706 		return ret;
1707 	}
1708 	/* drops through here for a standard inquiry */
1709 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1710 	arr[2] = sdebug_scsi_level;
1711 	arr[3] = 2;    /* response_data_format==2 */
1712 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1713 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1714 	if (sdebug_vpd_use_hostno == 0)
1715 		arr[5] |= 0x10; /* claim: implicit TPGS */
1716 	arr[6] = 0x10; /* claim: MultiP */
1717 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1718 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1719 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1720 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1721 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1722 	/* Use Vendor Specific area to place driver date in ASCII hex */
1723 	memcpy(&arr[36], sdebug_version_date, 8);
1724 	/* version descriptors (2 bytes each) follow */
1725 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1726 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1727 	n = 62;
1728 	if (is_disk) {		/* SBC-4 no version claimed */
1729 		put_unaligned_be16(0x600, arr + n);
1730 		n += 2;
1731 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1732 		put_unaligned_be16(0x525, arr + n);
1733 		n += 2;
1734 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1735 		put_unaligned_be16(0x624, arr + n);
1736 		n += 2;
1737 	}
1738 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1739 	ret = fill_from_dev_buffer(scp, arr,
1740 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1741 	kfree(arr);
1742 	return ret;
1743 }
1744 
1745 /* See resp_iec_m_pg() for how this data is manipulated */
1746 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1747 				   0, 0, 0x0, 0x0};
1748 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1749 static int resp_requests(struct scsi_cmnd *scp,
1750 			 struct sdebug_dev_info *devip)
1751 {
1752 	unsigned char *cmd = scp->cmnd;
1753 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1754 	bool dsense = !!(cmd[1] & 1);
1755 	u32 alloc_len = cmd[4];
1756 	u32 len = 18;
1757 	int stopped_state = atomic_read(&devip->stopped);
1758 
1759 	memset(arr, 0, sizeof(arr));
1760 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1761 		if (dsense) {
1762 			arr[0] = 0x72;
1763 			arr[1] = NOT_READY;
1764 			arr[2] = LOGICAL_UNIT_NOT_READY;
1765 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1766 			len = 8;
1767 		} else {
1768 			arr[0] = 0x70;
1769 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1770 			arr[7] = 0xa;			/* 18 byte sense buffer */
1771 			arr[12] = LOGICAL_UNIT_NOT_READY;
1772 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1773 		}
1774 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1775 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1776 		if (dsense) {
1777 			arr[0] = 0x72;
1778 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1779 			arr[2] = THRESHOLD_EXCEEDED;
1780 			arr[3] = 0xff;		/* Failure prediction(false) */
1781 			len = 8;
1782 		} else {
1783 			arr[0] = 0x70;
1784 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1785 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1786 			arr[12] = THRESHOLD_EXCEEDED;
1787 			arr[13] = 0xff;		/* Failure prediction(false) */
1788 		}
1789 	} else {	/* nothing to report */
1790 		if (dsense) {
1791 			len = 8;
1792 			memset(arr, 0, len);
1793 			arr[0] = 0x72;
1794 		} else {
1795 			memset(arr, 0, len);
1796 			arr[0] = 0x70;
1797 			arr[7] = 0xa;
1798 		}
1799 	}
1800 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1801 }
1802 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1803 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1804 {
1805 	unsigned char *cmd = scp->cmnd;
1806 	int power_cond, want_stop, stopped_state;
1807 	bool changing;
1808 
1809 	power_cond = (cmd[4] & 0xf0) >> 4;
1810 	if (power_cond) {
1811 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1812 		return check_condition_result;
1813 	}
1814 	want_stop = !(cmd[4] & 1);
1815 	stopped_state = atomic_read(&devip->stopped);
1816 	if (stopped_state == 2) {
1817 		ktime_t now_ts = ktime_get_boottime();
1818 
1819 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1820 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1821 
1822 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1823 				/* tur_ms_to_ready timer extinguished */
1824 				atomic_set(&devip->stopped, 0);
1825 				stopped_state = 0;
1826 			}
1827 		}
1828 		if (stopped_state == 2) {
1829 			if (want_stop) {
1830 				stopped_state = 1;	/* dummy up success */
1831 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1832 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1833 				return check_condition_result;
1834 			}
1835 		}
1836 	}
1837 	changing = (stopped_state != want_stop);
1838 	if (changing)
1839 		atomic_xchg(&devip->stopped, want_stop);
1840 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1841 		return SDEG_RES_IMMED_MASK;
1842 	else
1843 		return 0;
1844 }
1845 
get_sdebug_capacity(void)1846 static sector_t get_sdebug_capacity(void)
1847 {
1848 	static const unsigned int gibibyte = 1073741824;
1849 
1850 	if (sdebug_virtual_gb > 0)
1851 		return (sector_t)sdebug_virtual_gb *
1852 			(gibibyte / sdebug_sector_size);
1853 	else
1854 		return sdebug_store_sectors;
1855 }
1856 
1857 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1858 static int resp_readcap(struct scsi_cmnd *scp,
1859 			struct sdebug_dev_info *devip)
1860 {
1861 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1862 	unsigned int capac;
1863 
1864 	/* following just in case virtual_gb changed */
1865 	sdebug_capacity = get_sdebug_capacity();
1866 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1867 	if (sdebug_capacity < 0xffffffff) {
1868 		capac = (unsigned int)sdebug_capacity - 1;
1869 		put_unaligned_be32(capac, arr + 0);
1870 	} else
1871 		put_unaligned_be32(0xffffffff, arr + 0);
1872 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1873 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1874 }
1875 
1876 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1877 static int resp_readcap16(struct scsi_cmnd *scp,
1878 			  struct sdebug_dev_info *devip)
1879 {
1880 	unsigned char *cmd = scp->cmnd;
1881 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1882 	u32 alloc_len;
1883 
1884 	alloc_len = get_unaligned_be32(cmd + 10);
1885 	/* following just in case virtual_gb changed */
1886 	sdebug_capacity = get_sdebug_capacity();
1887 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1888 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1889 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1890 	arr[13] = sdebug_physblk_exp & 0xf;
1891 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1892 
1893 	if (scsi_debug_lbp()) {
1894 		arr[14] |= 0x80; /* LBPME */
1895 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1896 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1897 		 * in the wider field maps to 0 in this field.
1898 		 */
1899 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1900 			arr[14] |= 0x40;
1901 	}
1902 
1903 	/*
1904 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1905 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1906 	 */
1907 	if (devip->zmodel == BLK_ZONED_HM)
1908 		arr[12] |= 1 << 4;
1909 
1910 	arr[15] = sdebug_lowest_aligned & 0xff;
1911 
1912 	if (have_dif_prot) {
1913 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1914 		arr[12] |= 1; /* PROT_EN */
1915 	}
1916 
1917 	return fill_from_dev_buffer(scp, arr,
1918 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1919 }
1920 
1921 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1922 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1923 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1924 			      struct sdebug_dev_info *devip)
1925 {
1926 	unsigned char *cmd = scp->cmnd;
1927 	unsigned char *arr;
1928 	int host_no = devip->sdbg_host->shost->host_no;
1929 	int port_group_a, port_group_b, port_a, port_b;
1930 	u32 alen, n, rlen;
1931 	int ret;
1932 
1933 	alen = get_unaligned_be32(cmd + 6);
1934 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1935 	if (! arr)
1936 		return DID_REQUEUE << 16;
1937 	/*
1938 	 * EVPD page 0x88 states we have two ports, one
1939 	 * real and a fake port with no device connected.
1940 	 * So we create two port groups with one port each
1941 	 * and set the group with port B to unavailable.
1942 	 */
1943 	port_a = 0x1; /* relative port A */
1944 	port_b = 0x2; /* relative port B */
1945 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1946 			(devip->channel & 0x7f);
1947 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1948 			(devip->channel & 0x7f) + 0x80;
1949 
1950 	/*
1951 	 * The asymmetric access state is cycled according to the host_id.
1952 	 */
1953 	n = 4;
1954 	if (sdebug_vpd_use_hostno == 0) {
1955 		arr[n++] = host_no % 3; /* Asymm access state */
1956 		arr[n++] = 0x0F; /* claim: all states are supported */
1957 	} else {
1958 		arr[n++] = 0x0; /* Active/Optimized path */
1959 		arr[n++] = 0x01; /* only support active/optimized paths */
1960 	}
1961 	put_unaligned_be16(port_group_a, arr + n);
1962 	n += 2;
1963 	arr[n++] = 0;    /* Reserved */
1964 	arr[n++] = 0;    /* Status code */
1965 	arr[n++] = 0;    /* Vendor unique */
1966 	arr[n++] = 0x1;  /* One port per group */
1967 	arr[n++] = 0;    /* Reserved */
1968 	arr[n++] = 0;    /* Reserved */
1969 	put_unaligned_be16(port_a, arr + n);
1970 	n += 2;
1971 	arr[n++] = 3;    /* Port unavailable */
1972 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1973 	put_unaligned_be16(port_group_b, arr + n);
1974 	n += 2;
1975 	arr[n++] = 0;    /* Reserved */
1976 	arr[n++] = 0;    /* Status code */
1977 	arr[n++] = 0;    /* Vendor unique */
1978 	arr[n++] = 0x1;  /* One port per group */
1979 	arr[n++] = 0;    /* Reserved */
1980 	arr[n++] = 0;    /* Reserved */
1981 	put_unaligned_be16(port_b, arr + n);
1982 	n += 2;
1983 
1984 	rlen = n - 4;
1985 	put_unaligned_be32(rlen, arr + 0);
1986 
1987 	/*
1988 	 * Return the smallest value of either
1989 	 * - The allocated length
1990 	 * - The constructed command length
1991 	 * - The maximum array size
1992 	 */
1993 	rlen = min(alen, n);
1994 	ret = fill_from_dev_buffer(scp, arr,
1995 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1996 	kfree(arr);
1997 	return ret;
1998 }
1999 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2000 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2001 			     struct sdebug_dev_info *devip)
2002 {
2003 	bool rctd;
2004 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2005 	u16 req_sa, u;
2006 	u32 alloc_len, a_len;
2007 	int k, offset, len, errsts, count, bump, na;
2008 	const struct opcode_info_t *oip;
2009 	const struct opcode_info_t *r_oip;
2010 	u8 *arr;
2011 	u8 *cmd = scp->cmnd;
2012 
2013 	rctd = !!(cmd[2] & 0x80);
2014 	reporting_opts = cmd[2] & 0x7;
2015 	req_opcode = cmd[3];
2016 	req_sa = get_unaligned_be16(cmd + 4);
2017 	alloc_len = get_unaligned_be32(cmd + 6);
2018 	if (alloc_len < 4 || alloc_len > 0xffff) {
2019 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2020 		return check_condition_result;
2021 	}
2022 	if (alloc_len > 8192)
2023 		a_len = 8192;
2024 	else
2025 		a_len = alloc_len;
2026 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2027 	if (NULL == arr) {
2028 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2029 				INSUFF_RES_ASCQ);
2030 		return check_condition_result;
2031 	}
2032 	switch (reporting_opts) {
2033 	case 0:	/* all commands */
2034 		/* count number of commands */
2035 		for (count = 0, oip = opcode_info_arr;
2036 		     oip->num_attached != 0xff; ++oip) {
2037 			if (F_INV_OP & oip->flags)
2038 				continue;
2039 			count += (oip->num_attached + 1);
2040 		}
2041 		bump = rctd ? 20 : 8;
2042 		put_unaligned_be32(count * bump, arr);
2043 		for (offset = 4, oip = opcode_info_arr;
2044 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2045 			if (F_INV_OP & oip->flags)
2046 				continue;
2047 			na = oip->num_attached;
2048 			arr[offset] = oip->opcode;
2049 			put_unaligned_be16(oip->sa, arr + offset + 2);
2050 			if (rctd)
2051 				arr[offset + 5] |= 0x2;
2052 			if (FF_SA & oip->flags)
2053 				arr[offset + 5] |= 0x1;
2054 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2055 			if (rctd)
2056 				put_unaligned_be16(0xa, arr + offset + 8);
2057 			r_oip = oip;
2058 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2059 				if (F_INV_OP & oip->flags)
2060 					continue;
2061 				offset += bump;
2062 				arr[offset] = oip->opcode;
2063 				put_unaligned_be16(oip->sa, arr + offset + 2);
2064 				if (rctd)
2065 					arr[offset + 5] |= 0x2;
2066 				if (FF_SA & oip->flags)
2067 					arr[offset + 5] |= 0x1;
2068 				put_unaligned_be16(oip->len_mask[0],
2069 						   arr + offset + 6);
2070 				if (rctd)
2071 					put_unaligned_be16(0xa,
2072 							   arr + offset + 8);
2073 			}
2074 			oip = r_oip;
2075 			offset += bump;
2076 		}
2077 		break;
2078 	case 1:	/* one command: opcode only */
2079 	case 2:	/* one command: opcode plus service action */
2080 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2081 		sdeb_i = opcode_ind_arr[req_opcode];
2082 		oip = &opcode_info_arr[sdeb_i];
2083 		if (F_INV_OP & oip->flags) {
2084 			supp = 1;
2085 			offset = 4;
2086 		} else {
2087 			if (1 == reporting_opts) {
2088 				if (FF_SA & oip->flags) {
2089 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2090 							     2, 2);
2091 					kfree(arr);
2092 					return check_condition_result;
2093 				}
2094 				req_sa = 0;
2095 			} else if (2 == reporting_opts &&
2096 				   0 == (FF_SA & oip->flags)) {
2097 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2098 				kfree(arr);	/* point at requested sa */
2099 				return check_condition_result;
2100 			}
2101 			if (0 == (FF_SA & oip->flags) &&
2102 			    req_opcode == oip->opcode)
2103 				supp = 3;
2104 			else if (0 == (FF_SA & oip->flags)) {
2105 				na = oip->num_attached;
2106 				for (k = 0, oip = oip->arrp; k < na;
2107 				     ++k, ++oip) {
2108 					if (req_opcode == oip->opcode)
2109 						break;
2110 				}
2111 				supp = (k >= na) ? 1 : 3;
2112 			} else if (req_sa != oip->sa) {
2113 				na = oip->num_attached;
2114 				for (k = 0, oip = oip->arrp; k < na;
2115 				     ++k, ++oip) {
2116 					if (req_sa == oip->sa)
2117 						break;
2118 				}
2119 				supp = (k >= na) ? 1 : 3;
2120 			} else
2121 				supp = 3;
2122 			if (3 == supp) {
2123 				u = oip->len_mask[0];
2124 				put_unaligned_be16(u, arr + 2);
2125 				arr[4] = oip->opcode;
2126 				for (k = 1; k < u; ++k)
2127 					arr[4 + k] = (k < 16) ?
2128 						 oip->len_mask[k] : 0xff;
2129 				offset = 4 + u;
2130 			} else
2131 				offset = 4;
2132 		}
2133 		arr[1] = (rctd ? 0x80 : 0) | supp;
2134 		if (rctd) {
2135 			put_unaligned_be16(0xa, arr + offset);
2136 			offset += 12;
2137 		}
2138 		break;
2139 	default:
2140 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2141 		kfree(arr);
2142 		return check_condition_result;
2143 	}
2144 	offset = (offset < a_len) ? offset : a_len;
2145 	len = (offset < alloc_len) ? offset : alloc_len;
2146 	errsts = fill_from_dev_buffer(scp, arr, len);
2147 	kfree(arr);
2148 	return errsts;
2149 }
2150 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2151 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2152 			  struct sdebug_dev_info *devip)
2153 {
2154 	bool repd;
2155 	u32 alloc_len, len;
2156 	u8 arr[16];
2157 	u8 *cmd = scp->cmnd;
2158 
2159 	memset(arr, 0, sizeof(arr));
2160 	repd = !!(cmd[2] & 0x80);
2161 	alloc_len = get_unaligned_be32(cmd + 6);
2162 	if (alloc_len < 4) {
2163 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2164 		return check_condition_result;
2165 	}
2166 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2167 	arr[1] = 0x1;		/* ITNRS */
2168 	if (repd) {
2169 		arr[3] = 0xc;
2170 		len = 16;
2171 	} else
2172 		len = 4;
2173 
2174 	len = (len < alloc_len) ? len : alloc_len;
2175 	return fill_from_dev_buffer(scp, arr, len);
2176 }
2177 
2178 /* <<Following mode page info copied from ST318451LW>> */
2179 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2180 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2181 {	/* Read-Write Error Recovery page for mode_sense */
2182 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2183 					5, 0, 0xff, 0xff};
2184 
2185 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2186 	if (1 == pcontrol)
2187 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2188 	return sizeof(err_recov_pg);
2189 }
2190 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2191 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2192 { 	/* Disconnect-Reconnect page for mode_sense */
2193 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2194 					 0, 0, 0, 0, 0, 0, 0, 0};
2195 
2196 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2197 	if (1 == pcontrol)
2198 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2199 	return sizeof(disconnect_pg);
2200 }
2201 
resp_format_pg(unsigned char * p,int pcontrol,int target)2202 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2203 {       /* Format device page for mode_sense */
2204 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2205 				     0, 0, 0, 0, 0, 0, 0, 0,
2206 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2207 
2208 	memcpy(p, format_pg, sizeof(format_pg));
2209 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2210 	put_unaligned_be16(sdebug_sector_size, p + 12);
2211 	if (sdebug_removable)
2212 		p[20] |= 0x20; /* should agree with INQUIRY */
2213 	if (1 == pcontrol)
2214 		memset(p + 2, 0, sizeof(format_pg) - 2);
2215 	return sizeof(format_pg);
2216 }
2217 
2218 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2219 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2220 				     0, 0, 0, 0};
2221 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2222 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2223 { 	/* Caching page for mode_sense */
2224 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2225 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2226 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2227 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2228 
2229 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2230 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2231 	memcpy(p, caching_pg, sizeof(caching_pg));
2232 	if (1 == pcontrol)
2233 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2234 	else if (2 == pcontrol)
2235 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2236 	return sizeof(caching_pg);
2237 }
2238 
2239 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2240 				    0, 0, 0x2, 0x4b};
2241 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2242 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2243 { 	/* Control mode page for mode_sense */
2244 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2245 					0, 0, 0, 0};
2246 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2247 				     0, 0, 0x2, 0x4b};
2248 
2249 	if (sdebug_dsense)
2250 		ctrl_m_pg[2] |= 0x4;
2251 	else
2252 		ctrl_m_pg[2] &= ~0x4;
2253 
2254 	if (sdebug_ato)
2255 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2256 
2257 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2258 	if (1 == pcontrol)
2259 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2260 	else if (2 == pcontrol)
2261 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2262 	return sizeof(ctrl_m_pg);
2263 }
2264 
2265 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2266 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2267 {	/* Informational Exceptions control mode page for mode_sense */
2268 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2269 				       0, 0, 0x0, 0x0};
2270 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2271 				      0, 0, 0x0, 0x0};
2272 
2273 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2274 	if (1 == pcontrol)
2275 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2276 	else if (2 == pcontrol)
2277 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2278 	return sizeof(iec_m_pg);
2279 }
2280 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2281 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2282 {	/* SAS SSP mode page - short format for mode_sense */
2283 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2284 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2285 
2286 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2287 	if (1 == pcontrol)
2288 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2289 	return sizeof(sas_sf_m_pg);
2290 }
2291 
2292 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2293 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2294 			      int target_dev_id)
2295 {	/* SAS phy control and discover mode page for mode_sense */
2296 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2297 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2298 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2299 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2300 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2301 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2302 		    0, 0, 0, 0, 0, 0, 0, 0,
2303 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2304 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2305 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2306 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2307 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2308 		    0, 0, 0, 0, 0, 0, 0, 0,
2309 		};
2310 	int port_a, port_b;
2311 
2312 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2313 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2314 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2315 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2316 	port_a = target_dev_id + 1;
2317 	port_b = port_a + 1;
2318 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2319 	put_unaligned_be32(port_a, p + 20);
2320 	put_unaligned_be32(port_b, p + 48 + 20);
2321 	if (1 == pcontrol)
2322 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2323 	return sizeof(sas_pcd_m_pg);
2324 }
2325 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2326 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2327 {	/* SAS SSP shared protocol specific port mode subpage */
2328 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2329 		    0, 0, 0, 0, 0, 0, 0, 0,
2330 		};
2331 
2332 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2333 	if (1 == pcontrol)
2334 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2335 	return sizeof(sas_sha_m_pg);
2336 }
2337 
2338 #define SDEBUG_MAX_MSENSE_SZ 256
2339 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2340 static int resp_mode_sense(struct scsi_cmnd *scp,
2341 			   struct sdebug_dev_info *devip)
2342 {
2343 	int pcontrol, pcode, subpcode, bd_len;
2344 	unsigned char dev_spec;
2345 	u32 alloc_len, offset, len;
2346 	int target_dev_id;
2347 	int target = scp->device->id;
2348 	unsigned char *ap;
2349 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2350 	unsigned char *cmd = scp->cmnd;
2351 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2352 
2353 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2354 	pcontrol = (cmd[2] & 0xc0) >> 6;
2355 	pcode = cmd[2] & 0x3f;
2356 	subpcode = cmd[3];
2357 	msense_6 = (MODE_SENSE == cmd[0]);
2358 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2359 	is_disk = (sdebug_ptype == TYPE_DISK);
2360 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2361 	if ((is_disk || is_zbc) && !dbd)
2362 		bd_len = llbaa ? 16 : 8;
2363 	else
2364 		bd_len = 0;
2365 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2366 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2367 	if (0x3 == pcontrol) {  /* Saving values not supported */
2368 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2369 		return check_condition_result;
2370 	}
2371 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2372 			(devip->target * 1000) - 3;
2373 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2374 	if (is_disk || is_zbc) {
2375 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2376 		if (sdebug_wp)
2377 			dev_spec |= 0x80;
2378 	} else
2379 		dev_spec = 0x0;
2380 	if (msense_6) {
2381 		arr[2] = dev_spec;
2382 		arr[3] = bd_len;
2383 		offset = 4;
2384 	} else {
2385 		arr[3] = dev_spec;
2386 		if (16 == bd_len)
2387 			arr[4] = 0x1;	/* set LONGLBA bit */
2388 		arr[7] = bd_len;	/* assume 255 or less */
2389 		offset = 8;
2390 	}
2391 	ap = arr + offset;
2392 	if ((bd_len > 0) && (!sdebug_capacity))
2393 		sdebug_capacity = get_sdebug_capacity();
2394 
2395 	if (8 == bd_len) {
2396 		if (sdebug_capacity > 0xfffffffe)
2397 			put_unaligned_be32(0xffffffff, ap + 0);
2398 		else
2399 			put_unaligned_be32(sdebug_capacity, ap + 0);
2400 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2401 		offset += bd_len;
2402 		ap = arr + offset;
2403 	} else if (16 == bd_len) {
2404 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2405 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2406 		offset += bd_len;
2407 		ap = arr + offset;
2408 	}
2409 
2410 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2411 		/* TODO: Control Extension page */
2412 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2413 		return check_condition_result;
2414 	}
2415 	bad_pcode = false;
2416 
2417 	switch (pcode) {
2418 	case 0x1:	/* Read-Write error recovery page, direct access */
2419 		len = resp_err_recov_pg(ap, pcontrol, target);
2420 		offset += len;
2421 		break;
2422 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2423 		len = resp_disconnect_pg(ap, pcontrol, target);
2424 		offset += len;
2425 		break;
2426 	case 0x3:       /* Format device page, direct access */
2427 		if (is_disk) {
2428 			len = resp_format_pg(ap, pcontrol, target);
2429 			offset += len;
2430 		} else
2431 			bad_pcode = true;
2432 		break;
2433 	case 0x8:	/* Caching page, direct access */
2434 		if (is_disk || is_zbc) {
2435 			len = resp_caching_pg(ap, pcontrol, target);
2436 			offset += len;
2437 		} else
2438 			bad_pcode = true;
2439 		break;
2440 	case 0xa:	/* Control Mode page, all devices */
2441 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2442 		offset += len;
2443 		break;
2444 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2445 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2446 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2447 			return check_condition_result;
2448 		}
2449 		len = 0;
2450 		if ((0x0 == subpcode) || (0xff == subpcode))
2451 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2452 		if ((0x1 == subpcode) || (0xff == subpcode))
2453 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2454 						  target_dev_id);
2455 		if ((0x2 == subpcode) || (0xff == subpcode))
2456 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2457 		offset += len;
2458 		break;
2459 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2460 		len = resp_iec_m_pg(ap, pcontrol, target);
2461 		offset += len;
2462 		break;
2463 	case 0x3f:	/* Read all Mode pages */
2464 		if ((0 == subpcode) || (0xff == subpcode)) {
2465 			len = resp_err_recov_pg(ap, pcontrol, target);
2466 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2467 			if (is_disk) {
2468 				len += resp_format_pg(ap + len, pcontrol,
2469 						      target);
2470 				len += resp_caching_pg(ap + len, pcontrol,
2471 						       target);
2472 			} else if (is_zbc) {
2473 				len += resp_caching_pg(ap + len, pcontrol,
2474 						       target);
2475 			}
2476 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2477 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2478 			if (0xff == subpcode) {
2479 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2480 						  target, target_dev_id);
2481 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2482 			}
2483 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2484 			offset += len;
2485 		} else {
2486 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2487 			return check_condition_result;
2488 		}
2489 		break;
2490 	default:
2491 		bad_pcode = true;
2492 		break;
2493 	}
2494 	if (bad_pcode) {
2495 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2496 		return check_condition_result;
2497 	}
2498 	if (msense_6)
2499 		arr[0] = offset - 1;
2500 	else
2501 		put_unaligned_be16((offset - 2), arr + 0);
2502 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2503 }
2504 
2505 #define SDEBUG_MAX_MSELECT_SZ 512
2506 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2507 static int resp_mode_select(struct scsi_cmnd *scp,
2508 			    struct sdebug_dev_info *devip)
2509 {
2510 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2511 	int param_len, res, mpage;
2512 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2513 	unsigned char *cmd = scp->cmnd;
2514 	int mselect6 = (MODE_SELECT == cmd[0]);
2515 
2516 	memset(arr, 0, sizeof(arr));
2517 	pf = cmd[1] & 0x10;
2518 	sp = cmd[1] & 0x1;
2519 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2520 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2521 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2522 		return check_condition_result;
2523 	}
2524 	res = fetch_to_dev_buffer(scp, arr, param_len);
2525 	if (-1 == res)
2526 		return DID_ERROR << 16;
2527 	else if (sdebug_verbose && (res < param_len))
2528 		sdev_printk(KERN_INFO, scp->device,
2529 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2530 			    __func__, param_len, res);
2531 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2532 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2533 	off = bd_len + (mselect6 ? 4 : 8);
2534 	if (md_len > 2 || off >= res) {
2535 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2536 		return check_condition_result;
2537 	}
2538 	mpage = arr[off] & 0x3f;
2539 	ps = !!(arr[off] & 0x80);
2540 	if (ps) {
2541 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2542 		return check_condition_result;
2543 	}
2544 	spf = !!(arr[off] & 0x40);
2545 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2546 		       (arr[off + 1] + 2);
2547 	if ((pg_len + off) > param_len) {
2548 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2549 				PARAMETER_LIST_LENGTH_ERR, 0);
2550 		return check_condition_result;
2551 	}
2552 	switch (mpage) {
2553 	case 0x8:      /* Caching Mode page */
2554 		if (caching_pg[1] == arr[off + 1]) {
2555 			memcpy(caching_pg + 2, arr + off + 2,
2556 			       sizeof(caching_pg) - 2);
2557 			goto set_mode_changed_ua;
2558 		}
2559 		break;
2560 	case 0xa:      /* Control Mode page */
2561 		if (ctrl_m_pg[1] == arr[off + 1]) {
2562 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2563 			       sizeof(ctrl_m_pg) - 2);
2564 			if (ctrl_m_pg[4] & 0x8)
2565 				sdebug_wp = true;
2566 			else
2567 				sdebug_wp = false;
2568 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2569 			goto set_mode_changed_ua;
2570 		}
2571 		break;
2572 	case 0x1c:      /* Informational Exceptions Mode page */
2573 		if (iec_m_pg[1] == arr[off + 1]) {
2574 			memcpy(iec_m_pg + 2, arr + off + 2,
2575 			       sizeof(iec_m_pg) - 2);
2576 			goto set_mode_changed_ua;
2577 		}
2578 		break;
2579 	default:
2580 		break;
2581 	}
2582 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2583 	return check_condition_result;
2584 set_mode_changed_ua:
2585 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2586 	return 0;
2587 }
2588 
resp_temp_l_pg(unsigned char * arr)2589 static int resp_temp_l_pg(unsigned char *arr)
2590 {
2591 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2592 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2593 		};
2594 
2595 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2596 	return sizeof(temp_l_pg);
2597 }
2598 
resp_ie_l_pg(unsigned char * arr)2599 static int resp_ie_l_pg(unsigned char *arr)
2600 {
2601 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2602 		};
2603 
2604 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2605 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2606 		arr[4] = THRESHOLD_EXCEEDED;
2607 		arr[5] = 0xff;
2608 	}
2609 	return sizeof(ie_l_pg);
2610 }
2611 
resp_env_rep_l_spg(unsigned char * arr)2612 static int resp_env_rep_l_spg(unsigned char *arr)
2613 {
2614 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2615 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2616 					 0x1, 0x0, 0x23, 0x8,
2617 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2618 		};
2619 
2620 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2621 	return sizeof(env_rep_l_spg);
2622 }
2623 
2624 #define SDEBUG_MAX_LSENSE_SZ 512
2625 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2626 static int resp_log_sense(struct scsi_cmnd *scp,
2627 			  struct sdebug_dev_info *devip)
2628 {
2629 	int ppc, sp, pcode, subpcode;
2630 	u32 alloc_len, len, n;
2631 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2632 	unsigned char *cmd = scp->cmnd;
2633 
2634 	memset(arr, 0, sizeof(arr));
2635 	ppc = cmd[1] & 0x2;
2636 	sp = cmd[1] & 0x1;
2637 	if (ppc || sp) {
2638 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2639 		return check_condition_result;
2640 	}
2641 	pcode = cmd[2] & 0x3f;
2642 	subpcode = cmd[3] & 0xff;
2643 	alloc_len = get_unaligned_be16(cmd + 7);
2644 	arr[0] = pcode;
2645 	if (0 == subpcode) {
2646 		switch (pcode) {
2647 		case 0x0:	/* Supported log pages log page */
2648 			n = 4;
2649 			arr[n++] = 0x0;		/* this page */
2650 			arr[n++] = 0xd;		/* Temperature */
2651 			arr[n++] = 0x2f;	/* Informational exceptions */
2652 			arr[3] = n - 4;
2653 			break;
2654 		case 0xd:	/* Temperature log page */
2655 			arr[3] = resp_temp_l_pg(arr + 4);
2656 			break;
2657 		case 0x2f:	/* Informational exceptions log page */
2658 			arr[3] = resp_ie_l_pg(arr + 4);
2659 			break;
2660 		default:
2661 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2662 			return check_condition_result;
2663 		}
2664 	} else if (0xff == subpcode) {
2665 		arr[0] |= 0x40;
2666 		arr[1] = subpcode;
2667 		switch (pcode) {
2668 		case 0x0:	/* Supported log pages and subpages log page */
2669 			n = 4;
2670 			arr[n++] = 0x0;
2671 			arr[n++] = 0x0;		/* 0,0 page */
2672 			arr[n++] = 0x0;
2673 			arr[n++] = 0xff;	/* this page */
2674 			arr[n++] = 0xd;
2675 			arr[n++] = 0x0;		/* Temperature */
2676 			arr[n++] = 0xd;
2677 			arr[n++] = 0x1;		/* Environment reporting */
2678 			arr[n++] = 0xd;
2679 			arr[n++] = 0xff;	/* all 0xd subpages */
2680 			arr[n++] = 0x2f;
2681 			arr[n++] = 0x0;	/* Informational exceptions */
2682 			arr[n++] = 0x2f;
2683 			arr[n++] = 0xff;	/* all 0x2f subpages */
2684 			arr[3] = n - 4;
2685 			break;
2686 		case 0xd:	/* Temperature subpages */
2687 			n = 4;
2688 			arr[n++] = 0xd;
2689 			arr[n++] = 0x0;		/* Temperature */
2690 			arr[n++] = 0xd;
2691 			arr[n++] = 0x1;		/* Environment reporting */
2692 			arr[n++] = 0xd;
2693 			arr[n++] = 0xff;	/* these subpages */
2694 			arr[3] = n - 4;
2695 			break;
2696 		case 0x2f:	/* Informational exceptions subpages */
2697 			n = 4;
2698 			arr[n++] = 0x2f;
2699 			arr[n++] = 0x0;		/* Informational exceptions */
2700 			arr[n++] = 0x2f;
2701 			arr[n++] = 0xff;	/* these subpages */
2702 			arr[3] = n - 4;
2703 			break;
2704 		default:
2705 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2706 			return check_condition_result;
2707 		}
2708 	} else if (subpcode > 0) {
2709 		arr[0] |= 0x40;
2710 		arr[1] = subpcode;
2711 		if (pcode == 0xd && subpcode == 1)
2712 			arr[3] = resp_env_rep_l_spg(arr + 4);
2713 		else {
2714 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2715 			return check_condition_result;
2716 		}
2717 	} else {
2718 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2719 		return check_condition_result;
2720 	}
2721 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2722 	return fill_from_dev_buffer(scp, arr,
2723 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2724 }
2725 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)2726 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2727 {
2728 	return devip->nr_zones != 0;
2729 }
2730 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)2731 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2732 					unsigned long long lba)
2733 {
2734 	u32 zno = lba >> devip->zsize_shift;
2735 	struct sdeb_zone_state *zsp;
2736 
2737 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2738 		return &devip->zstate[zno];
2739 
2740 	/*
2741 	 * If the zone capacity is less than the zone size, adjust for gap
2742 	 * zones.
2743 	 */
2744 	zno = 2 * zno - devip->nr_conv_zones;
2745 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2746 	zsp = &devip->zstate[zno];
2747 	if (lba >= zsp->z_start + zsp->z_size)
2748 		zsp++;
2749 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2750 	return zsp;
2751 }
2752 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)2753 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2754 {
2755 	return zsp->z_type == ZBC_ZTYPE_CNV;
2756 }
2757 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)2758 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2759 {
2760 	return zsp->z_type == ZBC_ZTYPE_GAP;
2761 }
2762 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)2763 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2764 {
2765 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2766 }
2767 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2768 static void zbc_close_zone(struct sdebug_dev_info *devip,
2769 			   struct sdeb_zone_state *zsp)
2770 {
2771 	enum sdebug_z_cond zc;
2772 
2773 	if (!zbc_zone_is_seq(zsp))
2774 		return;
2775 
2776 	zc = zsp->z_cond;
2777 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2778 		return;
2779 
2780 	if (zc == ZC2_IMPLICIT_OPEN)
2781 		devip->nr_imp_open--;
2782 	else
2783 		devip->nr_exp_open--;
2784 
2785 	if (zsp->z_wp == zsp->z_start) {
2786 		zsp->z_cond = ZC1_EMPTY;
2787 	} else {
2788 		zsp->z_cond = ZC4_CLOSED;
2789 		devip->nr_closed++;
2790 	}
2791 }
2792 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)2793 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2794 {
2795 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2796 	unsigned int i;
2797 
2798 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2799 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2800 			zbc_close_zone(devip, zsp);
2801 			return;
2802 		}
2803 	}
2804 }
2805 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)2806 static void zbc_open_zone(struct sdebug_dev_info *devip,
2807 			  struct sdeb_zone_state *zsp, bool explicit)
2808 {
2809 	enum sdebug_z_cond zc;
2810 
2811 	if (!zbc_zone_is_seq(zsp))
2812 		return;
2813 
2814 	zc = zsp->z_cond;
2815 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2816 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2817 		return;
2818 
2819 	/* Close an implicit open zone if necessary */
2820 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2821 		zbc_close_zone(devip, zsp);
2822 	else if (devip->max_open &&
2823 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2824 		zbc_close_imp_open_zone(devip);
2825 
2826 	if (zsp->z_cond == ZC4_CLOSED)
2827 		devip->nr_closed--;
2828 	if (explicit) {
2829 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2830 		devip->nr_exp_open++;
2831 	} else {
2832 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2833 		devip->nr_imp_open++;
2834 	}
2835 }
2836 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2837 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2838 				     struct sdeb_zone_state *zsp)
2839 {
2840 	switch (zsp->z_cond) {
2841 	case ZC2_IMPLICIT_OPEN:
2842 		devip->nr_imp_open--;
2843 		break;
2844 	case ZC3_EXPLICIT_OPEN:
2845 		devip->nr_exp_open--;
2846 		break;
2847 	default:
2848 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2849 			  zsp->z_start, zsp->z_cond);
2850 		break;
2851 	}
2852 	zsp->z_cond = ZC5_FULL;
2853 }
2854 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)2855 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2856 		       unsigned long long lba, unsigned int num)
2857 {
2858 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2859 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2860 
2861 	if (!zbc_zone_is_seq(zsp))
2862 		return;
2863 
2864 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2865 		zsp->z_wp += num;
2866 		if (zsp->z_wp >= zend)
2867 			zbc_set_zone_full(devip, zsp);
2868 		return;
2869 	}
2870 
2871 	while (num) {
2872 		if (lba != zsp->z_wp)
2873 			zsp->z_non_seq_resource = true;
2874 
2875 		end = lba + num;
2876 		if (end >= zend) {
2877 			n = zend - lba;
2878 			zsp->z_wp = zend;
2879 		} else if (end > zsp->z_wp) {
2880 			n = num;
2881 			zsp->z_wp = end;
2882 		} else {
2883 			n = num;
2884 		}
2885 		if (zsp->z_wp >= zend)
2886 			zbc_set_zone_full(devip, zsp);
2887 
2888 		num -= n;
2889 		lba += n;
2890 		if (num) {
2891 			zsp++;
2892 			zend = zsp->z_start + zsp->z_size;
2893 		}
2894 	}
2895 }
2896 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2897 static int check_zbc_access_params(struct scsi_cmnd *scp,
2898 			unsigned long long lba, unsigned int num, bool write)
2899 {
2900 	struct scsi_device *sdp = scp->device;
2901 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2902 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2903 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2904 
2905 	if (!write) {
2906 		if (devip->zmodel == BLK_ZONED_HA)
2907 			return 0;
2908 		/* For host-managed, reads cannot cross zone types boundaries */
2909 		if (zsp->z_type != zsp_end->z_type) {
2910 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2911 					LBA_OUT_OF_RANGE,
2912 					READ_INVDATA_ASCQ);
2913 			return check_condition_result;
2914 		}
2915 		return 0;
2916 	}
2917 
2918 	/* Writing into a gap zone is not allowed */
2919 	if (zbc_zone_is_gap(zsp)) {
2920 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2921 				ATTEMPT_ACCESS_GAP);
2922 		return check_condition_result;
2923 	}
2924 
2925 	/* No restrictions for writes within conventional zones */
2926 	if (zbc_zone_is_conv(zsp)) {
2927 		if (!zbc_zone_is_conv(zsp_end)) {
2928 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2929 					LBA_OUT_OF_RANGE,
2930 					WRITE_BOUNDARY_ASCQ);
2931 			return check_condition_result;
2932 		}
2933 		return 0;
2934 	}
2935 
2936 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2937 		/* Writes cannot cross sequential zone boundaries */
2938 		if (zsp_end != zsp) {
2939 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2940 					LBA_OUT_OF_RANGE,
2941 					WRITE_BOUNDARY_ASCQ);
2942 			return check_condition_result;
2943 		}
2944 		/* Cannot write full zones */
2945 		if (zsp->z_cond == ZC5_FULL) {
2946 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2947 					INVALID_FIELD_IN_CDB, 0);
2948 			return check_condition_result;
2949 		}
2950 		/* Writes must be aligned to the zone WP */
2951 		if (lba != zsp->z_wp) {
2952 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2953 					LBA_OUT_OF_RANGE,
2954 					UNALIGNED_WRITE_ASCQ);
2955 			return check_condition_result;
2956 		}
2957 	}
2958 
2959 	/* Handle implicit open of closed and empty zones */
2960 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2961 		if (devip->max_open &&
2962 		    devip->nr_exp_open >= devip->max_open) {
2963 			mk_sense_buffer(scp, DATA_PROTECT,
2964 					INSUFF_RES_ASC,
2965 					INSUFF_ZONE_ASCQ);
2966 			return check_condition_result;
2967 		}
2968 		zbc_open_zone(devip, zsp, false);
2969 	}
2970 
2971 	return 0;
2972 }
2973 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2974 static inline int check_device_access_params
2975 			(struct scsi_cmnd *scp, unsigned long long lba,
2976 			 unsigned int num, bool write)
2977 {
2978 	struct scsi_device *sdp = scp->device;
2979 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2980 
2981 	if (lba + num > sdebug_capacity) {
2982 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2983 		return check_condition_result;
2984 	}
2985 	/* transfer length excessive (tie in to block limits VPD page) */
2986 	if (num > sdebug_store_sectors) {
2987 		/* needs work to find which cdb byte 'num' comes from */
2988 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2989 		return check_condition_result;
2990 	}
2991 	if (write && unlikely(sdebug_wp)) {
2992 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2993 		return check_condition_result;
2994 	}
2995 	if (sdebug_dev_is_zoned(devip))
2996 		return check_zbc_access_params(scp, lba, num, write);
2997 
2998 	return 0;
2999 }
3000 
3001 /*
3002  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3003  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3004  * that access any of the "stores" in struct sdeb_store_info should call this
3005  * function with bug_if_fake_rw set to true.
3006  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)3007 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3008 						bool bug_if_fake_rw)
3009 {
3010 	if (sdebug_fake_rw) {
3011 		BUG_ON(bug_if_fake_rw);	/* See note above */
3012 		return NULL;
3013 	}
3014 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3015 }
3016 
3017 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,bool do_write)3018 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3019 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3020 {
3021 	int ret;
3022 	u64 block, rest = 0;
3023 	enum dma_data_direction dir;
3024 	struct scsi_data_buffer *sdb = &scp->sdb;
3025 	u8 *fsp;
3026 
3027 	if (do_write) {
3028 		dir = DMA_TO_DEVICE;
3029 		write_since_sync = true;
3030 	} else {
3031 		dir = DMA_FROM_DEVICE;
3032 	}
3033 
3034 	if (!sdb->length || !sip)
3035 		return 0;
3036 	if (scp->sc_data_direction != dir)
3037 		return -1;
3038 	fsp = sip->storep;
3039 
3040 	block = do_div(lba, sdebug_store_sectors);
3041 	if (block + num > sdebug_store_sectors)
3042 		rest = block + num - sdebug_store_sectors;
3043 
3044 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3045 		   fsp + (block * sdebug_sector_size),
3046 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3047 	if (ret != (num - rest) * sdebug_sector_size)
3048 		return ret;
3049 
3050 	if (rest) {
3051 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3052 			    fsp, rest * sdebug_sector_size,
3053 			    sg_skip + ((num - rest) * sdebug_sector_size),
3054 			    do_write);
3055 	}
3056 
3057 	return ret;
3058 }
3059 
3060 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3061 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3062 {
3063 	struct scsi_data_buffer *sdb = &scp->sdb;
3064 
3065 	if (!sdb->length)
3066 		return 0;
3067 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3068 		return -1;
3069 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3070 			      num * sdebug_sector_size, 0, true);
3071 }
3072 
3073 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3074  * arr into sip->storep+lba and return true. If comparison fails then
3075  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3076 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3077 			      const u8 *arr, bool compare_only)
3078 {
3079 	bool res;
3080 	u64 block, rest = 0;
3081 	u32 store_blks = sdebug_store_sectors;
3082 	u32 lb_size = sdebug_sector_size;
3083 	u8 *fsp = sip->storep;
3084 
3085 	block = do_div(lba, store_blks);
3086 	if (block + num > store_blks)
3087 		rest = block + num - store_blks;
3088 
3089 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3090 	if (!res)
3091 		return res;
3092 	if (rest)
3093 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3094 			     rest * lb_size);
3095 	if (!res)
3096 		return res;
3097 	if (compare_only)
3098 		return true;
3099 	arr += num * lb_size;
3100 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3101 	if (rest)
3102 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3103 	return res;
3104 }
3105 
dif_compute_csum(const void * buf,int len)3106 static __be16 dif_compute_csum(const void *buf, int len)
3107 {
3108 	__be16 csum;
3109 
3110 	if (sdebug_guard)
3111 		csum = (__force __be16)ip_compute_csum(buf, len);
3112 	else
3113 		csum = cpu_to_be16(crc_t10dif(buf, len));
3114 
3115 	return csum;
3116 }
3117 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3118 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3119 		      sector_t sector, u32 ei_lba)
3120 {
3121 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3122 
3123 	if (sdt->guard_tag != csum) {
3124 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3125 			(unsigned long)sector,
3126 			be16_to_cpu(sdt->guard_tag),
3127 			be16_to_cpu(csum));
3128 		return 0x01;
3129 	}
3130 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3131 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3132 		pr_err("REF check failed on sector %lu\n",
3133 			(unsigned long)sector);
3134 		return 0x03;
3135 	}
3136 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3137 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3138 		pr_err("REF check failed on sector %lu\n",
3139 			(unsigned long)sector);
3140 		return 0x03;
3141 	}
3142 	return 0;
3143 }
3144 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3145 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3146 			  unsigned int sectors, bool read)
3147 {
3148 	size_t resid;
3149 	void *paddr;
3150 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3151 						scp->device->hostdata, true);
3152 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3153 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3154 	struct sg_mapping_iter miter;
3155 
3156 	/* Bytes of protection data to copy into sgl */
3157 	resid = sectors * sizeof(*dif_storep);
3158 
3159 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3160 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3161 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3162 
3163 	while (sg_miter_next(&miter) && resid > 0) {
3164 		size_t len = min_t(size_t, miter.length, resid);
3165 		void *start = dif_store(sip, sector);
3166 		size_t rest = 0;
3167 
3168 		if (dif_store_end < start + len)
3169 			rest = start + len - dif_store_end;
3170 
3171 		paddr = miter.addr;
3172 
3173 		if (read)
3174 			memcpy(paddr, start, len - rest);
3175 		else
3176 			memcpy(start, paddr, len - rest);
3177 
3178 		if (rest) {
3179 			if (read)
3180 				memcpy(paddr + len - rest, dif_storep, rest);
3181 			else
3182 				memcpy(dif_storep, paddr + len - rest, rest);
3183 		}
3184 
3185 		sector += len / sizeof(*dif_storep);
3186 		resid -= len;
3187 	}
3188 	sg_miter_stop(&miter);
3189 }
3190 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3191 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3192 			    unsigned int sectors, u32 ei_lba)
3193 {
3194 	int ret = 0;
3195 	unsigned int i;
3196 	sector_t sector;
3197 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3198 						scp->device->hostdata, true);
3199 	struct t10_pi_tuple *sdt;
3200 
3201 	for (i = 0; i < sectors; i++, ei_lba++) {
3202 		sector = start_sec + i;
3203 		sdt = dif_store(sip, sector);
3204 
3205 		if (sdt->app_tag == cpu_to_be16(0xffff))
3206 			continue;
3207 
3208 		/*
3209 		 * Because scsi_debug acts as both initiator and
3210 		 * target we proceed to verify the PI even if
3211 		 * RDPROTECT=3. This is done so the "initiator" knows
3212 		 * which type of error to return. Otherwise we would
3213 		 * have to iterate over the PI twice.
3214 		 */
3215 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3216 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3217 					 sector, ei_lba);
3218 			if (ret) {
3219 				dif_errors++;
3220 				break;
3221 			}
3222 		}
3223 	}
3224 
3225 	dif_copy_prot(scp, start_sec, sectors, true);
3226 	dix_reads++;
3227 
3228 	return ret;
3229 }
3230 
3231 static inline void
sdeb_read_lock(struct sdeb_store_info * sip)3232 sdeb_read_lock(struct sdeb_store_info *sip)
3233 {
3234 	if (sdebug_no_rwlock) {
3235 		if (sip)
3236 			__acquire(&sip->macc_lck);
3237 		else
3238 			__acquire(&sdeb_fake_rw_lck);
3239 	} else {
3240 		if (sip)
3241 			read_lock(&sip->macc_lck);
3242 		else
3243 			read_lock(&sdeb_fake_rw_lck);
3244 	}
3245 }
3246 
3247 static inline void
sdeb_read_unlock(struct sdeb_store_info * sip)3248 sdeb_read_unlock(struct sdeb_store_info *sip)
3249 {
3250 	if (sdebug_no_rwlock) {
3251 		if (sip)
3252 			__release(&sip->macc_lck);
3253 		else
3254 			__release(&sdeb_fake_rw_lck);
3255 	} else {
3256 		if (sip)
3257 			read_unlock(&sip->macc_lck);
3258 		else
3259 			read_unlock(&sdeb_fake_rw_lck);
3260 	}
3261 }
3262 
3263 static inline void
sdeb_write_lock(struct sdeb_store_info * sip)3264 sdeb_write_lock(struct sdeb_store_info *sip)
3265 {
3266 	if (sdebug_no_rwlock) {
3267 		if (sip)
3268 			__acquire(&sip->macc_lck);
3269 		else
3270 			__acquire(&sdeb_fake_rw_lck);
3271 	} else {
3272 		if (sip)
3273 			write_lock(&sip->macc_lck);
3274 		else
3275 			write_lock(&sdeb_fake_rw_lck);
3276 	}
3277 }
3278 
3279 static inline void
sdeb_write_unlock(struct sdeb_store_info * sip)3280 sdeb_write_unlock(struct sdeb_store_info *sip)
3281 {
3282 	if (sdebug_no_rwlock) {
3283 		if (sip)
3284 			__release(&sip->macc_lck);
3285 		else
3286 			__release(&sdeb_fake_rw_lck);
3287 	} else {
3288 		if (sip)
3289 			write_unlock(&sip->macc_lck);
3290 		else
3291 			write_unlock(&sdeb_fake_rw_lck);
3292 	}
3293 }
3294 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3295 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3296 {
3297 	bool check_prot;
3298 	u32 num;
3299 	u32 ei_lba;
3300 	int ret;
3301 	u64 lba;
3302 	struct sdeb_store_info *sip = devip2sip(devip, true);
3303 	u8 *cmd = scp->cmnd;
3304 
3305 	switch (cmd[0]) {
3306 	case READ_16:
3307 		ei_lba = 0;
3308 		lba = get_unaligned_be64(cmd + 2);
3309 		num = get_unaligned_be32(cmd + 10);
3310 		check_prot = true;
3311 		break;
3312 	case READ_10:
3313 		ei_lba = 0;
3314 		lba = get_unaligned_be32(cmd + 2);
3315 		num = get_unaligned_be16(cmd + 7);
3316 		check_prot = true;
3317 		break;
3318 	case READ_6:
3319 		ei_lba = 0;
3320 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3321 		      (u32)(cmd[1] & 0x1f) << 16;
3322 		num = (0 == cmd[4]) ? 256 : cmd[4];
3323 		check_prot = true;
3324 		break;
3325 	case READ_12:
3326 		ei_lba = 0;
3327 		lba = get_unaligned_be32(cmd + 2);
3328 		num = get_unaligned_be32(cmd + 6);
3329 		check_prot = true;
3330 		break;
3331 	case XDWRITEREAD_10:
3332 		ei_lba = 0;
3333 		lba = get_unaligned_be32(cmd + 2);
3334 		num = get_unaligned_be16(cmd + 7);
3335 		check_prot = false;
3336 		break;
3337 	default:	/* assume READ(32) */
3338 		lba = get_unaligned_be64(cmd + 12);
3339 		ei_lba = get_unaligned_be32(cmd + 20);
3340 		num = get_unaligned_be32(cmd + 28);
3341 		check_prot = false;
3342 		break;
3343 	}
3344 	if (unlikely(have_dif_prot && check_prot)) {
3345 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3346 		    (cmd[1] & 0xe0)) {
3347 			mk_sense_invalid_opcode(scp);
3348 			return check_condition_result;
3349 		}
3350 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3351 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3352 		    (cmd[1] & 0xe0) == 0)
3353 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3354 				    "to DIF device\n");
3355 	}
3356 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3357 		     atomic_read(&sdeb_inject_pending))) {
3358 		num /= 2;
3359 		atomic_set(&sdeb_inject_pending, 0);
3360 	}
3361 
3362 	ret = check_device_access_params(scp, lba, num, false);
3363 	if (ret)
3364 		return ret;
3365 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3366 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3367 		     ((lba + num) > sdebug_medium_error_start))) {
3368 		/* claim unrecoverable read error */
3369 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3370 		/* set info field and valid bit for fixed descriptor */
3371 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3372 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3373 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3374 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3375 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3376 		}
3377 		scsi_set_resid(scp, scsi_bufflen(scp));
3378 		return check_condition_result;
3379 	}
3380 
3381 	sdeb_read_lock(sip);
3382 
3383 	/* DIX + T10 DIF */
3384 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3385 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3386 		case 1: /* Guard tag error */
3387 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3388 				sdeb_read_unlock(sip);
3389 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3390 				return check_condition_result;
3391 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3392 				sdeb_read_unlock(sip);
3393 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3394 				return illegal_condition_result;
3395 			}
3396 			break;
3397 		case 3: /* Reference tag error */
3398 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3399 				sdeb_read_unlock(sip);
3400 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3401 				return check_condition_result;
3402 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3403 				sdeb_read_unlock(sip);
3404 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3405 				return illegal_condition_result;
3406 			}
3407 			break;
3408 		}
3409 	}
3410 
3411 	ret = do_device_access(sip, scp, 0, lba, num, false);
3412 	sdeb_read_unlock(sip);
3413 	if (unlikely(ret == -1))
3414 		return DID_ERROR << 16;
3415 
3416 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3417 
3418 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3419 		     atomic_read(&sdeb_inject_pending))) {
3420 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3421 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3422 			atomic_set(&sdeb_inject_pending, 0);
3423 			return check_condition_result;
3424 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3425 			/* Logical block guard check failed */
3426 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3427 			atomic_set(&sdeb_inject_pending, 0);
3428 			return illegal_condition_result;
3429 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3430 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3431 			atomic_set(&sdeb_inject_pending, 0);
3432 			return illegal_condition_result;
3433 		}
3434 	}
3435 	return 0;
3436 }
3437 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)3438 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3439 			     unsigned int sectors, u32 ei_lba)
3440 {
3441 	int ret;
3442 	struct t10_pi_tuple *sdt;
3443 	void *daddr;
3444 	sector_t sector = start_sec;
3445 	int ppage_offset;
3446 	int dpage_offset;
3447 	struct sg_mapping_iter diter;
3448 	struct sg_mapping_iter piter;
3449 
3450 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3451 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3452 
3453 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3454 			scsi_prot_sg_count(SCpnt),
3455 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3456 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3457 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3458 
3459 	/* For each protection page */
3460 	while (sg_miter_next(&piter)) {
3461 		dpage_offset = 0;
3462 		if (WARN_ON(!sg_miter_next(&diter))) {
3463 			ret = 0x01;
3464 			goto out;
3465 		}
3466 
3467 		for (ppage_offset = 0; ppage_offset < piter.length;
3468 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3469 			/* If we're at the end of the current
3470 			 * data page advance to the next one
3471 			 */
3472 			if (dpage_offset >= diter.length) {
3473 				if (WARN_ON(!sg_miter_next(&diter))) {
3474 					ret = 0x01;
3475 					goto out;
3476 				}
3477 				dpage_offset = 0;
3478 			}
3479 
3480 			sdt = piter.addr + ppage_offset;
3481 			daddr = diter.addr + dpage_offset;
3482 
3483 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3484 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3485 				if (ret)
3486 					goto out;
3487 			}
3488 
3489 			sector++;
3490 			ei_lba++;
3491 			dpage_offset += sdebug_sector_size;
3492 		}
3493 		diter.consumed = dpage_offset;
3494 		sg_miter_stop(&diter);
3495 	}
3496 	sg_miter_stop(&piter);
3497 
3498 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3499 	dix_writes++;
3500 
3501 	return 0;
3502 
3503 out:
3504 	dif_errors++;
3505 	sg_miter_stop(&diter);
3506 	sg_miter_stop(&piter);
3507 	return ret;
3508 }
3509 
lba_to_map_index(sector_t lba)3510 static unsigned long lba_to_map_index(sector_t lba)
3511 {
3512 	if (sdebug_unmap_alignment)
3513 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3514 	sector_div(lba, sdebug_unmap_granularity);
3515 	return lba;
3516 }
3517 
map_index_to_lba(unsigned long index)3518 static sector_t map_index_to_lba(unsigned long index)
3519 {
3520 	sector_t lba = index * sdebug_unmap_granularity;
3521 
3522 	if (sdebug_unmap_alignment)
3523 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3524 	return lba;
3525 }
3526 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)3527 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3528 			      unsigned int *num)
3529 {
3530 	sector_t end;
3531 	unsigned int mapped;
3532 	unsigned long index;
3533 	unsigned long next;
3534 
3535 	index = lba_to_map_index(lba);
3536 	mapped = test_bit(index, sip->map_storep);
3537 
3538 	if (mapped)
3539 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3540 	else
3541 		next = find_next_bit(sip->map_storep, map_size, index);
3542 
3543 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3544 	*num = end - lba;
3545 	return mapped;
3546 }
3547 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3548 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3549 		       unsigned int len)
3550 {
3551 	sector_t end = lba + len;
3552 
3553 	while (lba < end) {
3554 		unsigned long index = lba_to_map_index(lba);
3555 
3556 		if (index < map_size)
3557 			set_bit(index, sip->map_storep);
3558 
3559 		lba = map_index_to_lba(index + 1);
3560 	}
3561 }
3562 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3563 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3564 			 unsigned int len)
3565 {
3566 	sector_t end = lba + len;
3567 	u8 *fsp = sip->storep;
3568 
3569 	while (lba < end) {
3570 		unsigned long index = lba_to_map_index(lba);
3571 
3572 		if (lba == map_index_to_lba(index) &&
3573 		    lba + sdebug_unmap_granularity <= end &&
3574 		    index < map_size) {
3575 			clear_bit(index, sip->map_storep);
3576 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3577 				memset(fsp + lba * sdebug_sector_size,
3578 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3579 				       sdebug_sector_size *
3580 				       sdebug_unmap_granularity);
3581 			}
3582 			if (sip->dif_storep) {
3583 				memset(sip->dif_storep + lba, 0xff,
3584 				       sizeof(*sip->dif_storep) *
3585 				       sdebug_unmap_granularity);
3586 			}
3587 		}
3588 		lba = map_index_to_lba(index + 1);
3589 	}
3590 }
3591 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3592 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3593 {
3594 	bool check_prot;
3595 	u32 num;
3596 	u32 ei_lba;
3597 	int ret;
3598 	u64 lba;
3599 	struct sdeb_store_info *sip = devip2sip(devip, true);
3600 	u8 *cmd = scp->cmnd;
3601 
3602 	switch (cmd[0]) {
3603 	case WRITE_16:
3604 		ei_lba = 0;
3605 		lba = get_unaligned_be64(cmd + 2);
3606 		num = get_unaligned_be32(cmd + 10);
3607 		check_prot = true;
3608 		break;
3609 	case WRITE_10:
3610 		ei_lba = 0;
3611 		lba = get_unaligned_be32(cmd + 2);
3612 		num = get_unaligned_be16(cmd + 7);
3613 		check_prot = true;
3614 		break;
3615 	case WRITE_6:
3616 		ei_lba = 0;
3617 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3618 		      (u32)(cmd[1] & 0x1f) << 16;
3619 		num = (0 == cmd[4]) ? 256 : cmd[4];
3620 		check_prot = true;
3621 		break;
3622 	case WRITE_12:
3623 		ei_lba = 0;
3624 		lba = get_unaligned_be32(cmd + 2);
3625 		num = get_unaligned_be32(cmd + 6);
3626 		check_prot = true;
3627 		break;
3628 	case 0x53:	/* XDWRITEREAD(10) */
3629 		ei_lba = 0;
3630 		lba = get_unaligned_be32(cmd + 2);
3631 		num = get_unaligned_be16(cmd + 7);
3632 		check_prot = false;
3633 		break;
3634 	default:	/* assume WRITE(32) */
3635 		lba = get_unaligned_be64(cmd + 12);
3636 		ei_lba = get_unaligned_be32(cmd + 20);
3637 		num = get_unaligned_be32(cmd + 28);
3638 		check_prot = false;
3639 		break;
3640 	}
3641 	if (unlikely(have_dif_prot && check_prot)) {
3642 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3643 		    (cmd[1] & 0xe0)) {
3644 			mk_sense_invalid_opcode(scp);
3645 			return check_condition_result;
3646 		}
3647 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3648 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3649 		    (cmd[1] & 0xe0) == 0)
3650 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3651 				    "to DIF device\n");
3652 	}
3653 
3654 	sdeb_write_lock(sip);
3655 	ret = check_device_access_params(scp, lba, num, true);
3656 	if (ret) {
3657 		sdeb_write_unlock(sip);
3658 		return ret;
3659 	}
3660 
3661 	/* DIX + T10 DIF */
3662 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3663 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3664 		case 1: /* Guard tag error */
3665 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3666 				sdeb_write_unlock(sip);
3667 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3668 				return illegal_condition_result;
3669 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3670 				sdeb_write_unlock(sip);
3671 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3672 				return check_condition_result;
3673 			}
3674 			break;
3675 		case 3: /* Reference tag error */
3676 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3677 				sdeb_write_unlock(sip);
3678 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3679 				return illegal_condition_result;
3680 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3681 				sdeb_write_unlock(sip);
3682 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3683 				return check_condition_result;
3684 			}
3685 			break;
3686 		}
3687 	}
3688 
3689 	ret = do_device_access(sip, scp, 0, lba, num, true);
3690 	if (unlikely(scsi_debug_lbp()))
3691 		map_region(sip, lba, num);
3692 	/* If ZBC zone then bump its write pointer */
3693 	if (sdebug_dev_is_zoned(devip))
3694 		zbc_inc_wp(devip, lba, num);
3695 	sdeb_write_unlock(sip);
3696 	if (unlikely(-1 == ret))
3697 		return DID_ERROR << 16;
3698 	else if (unlikely(sdebug_verbose &&
3699 			  (ret < (num * sdebug_sector_size))))
3700 		sdev_printk(KERN_INFO, scp->device,
3701 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3702 			    my_name, num * sdebug_sector_size, ret);
3703 
3704 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3705 		     atomic_read(&sdeb_inject_pending))) {
3706 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3707 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3708 			atomic_set(&sdeb_inject_pending, 0);
3709 			return check_condition_result;
3710 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3711 			/* Logical block guard check failed */
3712 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3713 			atomic_set(&sdeb_inject_pending, 0);
3714 			return illegal_condition_result;
3715 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3716 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3717 			atomic_set(&sdeb_inject_pending, 0);
3718 			return illegal_condition_result;
3719 		}
3720 	}
3721 	return 0;
3722 }
3723 
3724 /*
3725  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3726  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3727  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3728 static int resp_write_scat(struct scsi_cmnd *scp,
3729 			   struct sdebug_dev_info *devip)
3730 {
3731 	u8 *cmd = scp->cmnd;
3732 	u8 *lrdp = NULL;
3733 	u8 *up;
3734 	struct sdeb_store_info *sip = devip2sip(devip, true);
3735 	u8 wrprotect;
3736 	u16 lbdof, num_lrd, k;
3737 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3738 	u32 lb_size = sdebug_sector_size;
3739 	u32 ei_lba;
3740 	u64 lba;
3741 	int ret, res;
3742 	bool is_16;
3743 	static const u32 lrd_size = 32; /* + parameter list header size */
3744 
3745 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3746 		is_16 = false;
3747 		wrprotect = (cmd[10] >> 5) & 0x7;
3748 		lbdof = get_unaligned_be16(cmd + 12);
3749 		num_lrd = get_unaligned_be16(cmd + 16);
3750 		bt_len = get_unaligned_be32(cmd + 28);
3751 	} else {        /* that leaves WRITE SCATTERED(16) */
3752 		is_16 = true;
3753 		wrprotect = (cmd[2] >> 5) & 0x7;
3754 		lbdof = get_unaligned_be16(cmd + 4);
3755 		num_lrd = get_unaligned_be16(cmd + 8);
3756 		bt_len = get_unaligned_be32(cmd + 10);
3757 		if (unlikely(have_dif_prot)) {
3758 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3759 			    wrprotect) {
3760 				mk_sense_invalid_opcode(scp);
3761 				return illegal_condition_result;
3762 			}
3763 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3764 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3765 			     wrprotect == 0)
3766 				sdev_printk(KERN_ERR, scp->device,
3767 					    "Unprotected WR to DIF device\n");
3768 		}
3769 	}
3770 	if ((num_lrd == 0) || (bt_len == 0))
3771 		return 0;       /* T10 says these do-nothings are not errors */
3772 	if (lbdof == 0) {
3773 		if (sdebug_verbose)
3774 			sdev_printk(KERN_INFO, scp->device,
3775 				"%s: %s: LB Data Offset field bad\n",
3776 				my_name, __func__);
3777 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3778 		return illegal_condition_result;
3779 	}
3780 	lbdof_blen = lbdof * lb_size;
3781 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3782 		if (sdebug_verbose)
3783 			sdev_printk(KERN_INFO, scp->device,
3784 				"%s: %s: LBA range descriptors don't fit\n",
3785 				my_name, __func__);
3786 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3787 		return illegal_condition_result;
3788 	}
3789 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3790 	if (lrdp == NULL)
3791 		return SCSI_MLQUEUE_HOST_BUSY;
3792 	if (sdebug_verbose)
3793 		sdev_printk(KERN_INFO, scp->device,
3794 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3795 			my_name, __func__, lbdof_blen);
3796 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3797 	if (res == -1) {
3798 		ret = DID_ERROR << 16;
3799 		goto err_out;
3800 	}
3801 
3802 	sdeb_write_lock(sip);
3803 	sg_off = lbdof_blen;
3804 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3805 	cum_lb = 0;
3806 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3807 		lba = get_unaligned_be64(up + 0);
3808 		num = get_unaligned_be32(up + 8);
3809 		if (sdebug_verbose)
3810 			sdev_printk(KERN_INFO, scp->device,
3811 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3812 				my_name, __func__, k, lba, num, sg_off);
3813 		if (num == 0)
3814 			continue;
3815 		ret = check_device_access_params(scp, lba, num, true);
3816 		if (ret)
3817 			goto err_out_unlock;
3818 		num_by = num * lb_size;
3819 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3820 
3821 		if ((cum_lb + num) > bt_len) {
3822 			if (sdebug_verbose)
3823 				sdev_printk(KERN_INFO, scp->device,
3824 				    "%s: %s: sum of blocks > data provided\n",
3825 				    my_name, __func__);
3826 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3827 					0);
3828 			ret = illegal_condition_result;
3829 			goto err_out_unlock;
3830 		}
3831 
3832 		/* DIX + T10 DIF */
3833 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3834 			int prot_ret = prot_verify_write(scp, lba, num,
3835 							 ei_lba);
3836 
3837 			if (prot_ret) {
3838 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3839 						prot_ret);
3840 				ret = illegal_condition_result;
3841 				goto err_out_unlock;
3842 			}
3843 		}
3844 
3845 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3846 		/* If ZBC zone then bump its write pointer */
3847 		if (sdebug_dev_is_zoned(devip))
3848 			zbc_inc_wp(devip, lba, num);
3849 		if (unlikely(scsi_debug_lbp()))
3850 			map_region(sip, lba, num);
3851 		if (unlikely(-1 == ret)) {
3852 			ret = DID_ERROR << 16;
3853 			goto err_out_unlock;
3854 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3855 			sdev_printk(KERN_INFO, scp->device,
3856 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3857 			    my_name, num_by, ret);
3858 
3859 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3860 			     atomic_read(&sdeb_inject_pending))) {
3861 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3862 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3863 				atomic_set(&sdeb_inject_pending, 0);
3864 				ret = check_condition_result;
3865 				goto err_out_unlock;
3866 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3867 				/* Logical block guard check failed */
3868 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3869 				atomic_set(&sdeb_inject_pending, 0);
3870 				ret = illegal_condition_result;
3871 				goto err_out_unlock;
3872 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3873 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3874 				atomic_set(&sdeb_inject_pending, 0);
3875 				ret = illegal_condition_result;
3876 				goto err_out_unlock;
3877 			}
3878 		}
3879 		sg_off += num_by;
3880 		cum_lb += num;
3881 	}
3882 	ret = 0;
3883 err_out_unlock:
3884 	sdeb_write_unlock(sip);
3885 err_out:
3886 	kfree(lrdp);
3887 	return ret;
3888 }
3889 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)3890 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3891 			   u32 ei_lba, bool unmap, bool ndob)
3892 {
3893 	struct scsi_device *sdp = scp->device;
3894 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3895 	unsigned long long i;
3896 	u64 block, lbaa;
3897 	u32 lb_size = sdebug_sector_size;
3898 	int ret;
3899 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3900 						scp->device->hostdata, true);
3901 	u8 *fs1p;
3902 	u8 *fsp;
3903 
3904 	sdeb_write_lock(sip);
3905 
3906 	ret = check_device_access_params(scp, lba, num, true);
3907 	if (ret) {
3908 		sdeb_write_unlock(sip);
3909 		return ret;
3910 	}
3911 
3912 	if (unmap && scsi_debug_lbp()) {
3913 		unmap_region(sip, lba, num);
3914 		goto out;
3915 	}
3916 	lbaa = lba;
3917 	block = do_div(lbaa, sdebug_store_sectors);
3918 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3919 	fsp = sip->storep;
3920 	fs1p = fsp + (block * lb_size);
3921 	if (ndob) {
3922 		memset(fs1p, 0, lb_size);
3923 		ret = 0;
3924 	} else
3925 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3926 
3927 	if (-1 == ret) {
3928 		sdeb_write_unlock(sip);
3929 		return DID_ERROR << 16;
3930 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3931 		sdev_printk(KERN_INFO, scp->device,
3932 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3933 			    my_name, "write same", lb_size, ret);
3934 
3935 	/* Copy first sector to remaining blocks */
3936 	for (i = 1 ; i < num ; i++) {
3937 		lbaa = lba + i;
3938 		block = do_div(lbaa, sdebug_store_sectors);
3939 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3940 	}
3941 	if (scsi_debug_lbp())
3942 		map_region(sip, lba, num);
3943 	/* If ZBC zone then bump its write pointer */
3944 	if (sdebug_dev_is_zoned(devip))
3945 		zbc_inc_wp(devip, lba, num);
3946 out:
3947 	sdeb_write_unlock(sip);
3948 
3949 	return 0;
3950 }
3951 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3952 static int resp_write_same_10(struct scsi_cmnd *scp,
3953 			      struct sdebug_dev_info *devip)
3954 {
3955 	u8 *cmd = scp->cmnd;
3956 	u32 lba;
3957 	u16 num;
3958 	u32 ei_lba = 0;
3959 	bool unmap = false;
3960 
3961 	if (cmd[1] & 0x8) {
3962 		if (sdebug_lbpws10 == 0) {
3963 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3964 			return check_condition_result;
3965 		} else
3966 			unmap = true;
3967 	}
3968 	lba = get_unaligned_be32(cmd + 2);
3969 	num = get_unaligned_be16(cmd + 7);
3970 	if (num > sdebug_write_same_length) {
3971 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3972 		return check_condition_result;
3973 	}
3974 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3975 }
3976 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3977 static int resp_write_same_16(struct scsi_cmnd *scp,
3978 			      struct sdebug_dev_info *devip)
3979 {
3980 	u8 *cmd = scp->cmnd;
3981 	u64 lba;
3982 	u32 num;
3983 	u32 ei_lba = 0;
3984 	bool unmap = false;
3985 	bool ndob = false;
3986 
3987 	if (cmd[1] & 0x8) {	/* UNMAP */
3988 		if (sdebug_lbpws == 0) {
3989 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3990 			return check_condition_result;
3991 		} else
3992 			unmap = true;
3993 	}
3994 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3995 		ndob = true;
3996 	lba = get_unaligned_be64(cmd + 2);
3997 	num = get_unaligned_be32(cmd + 10);
3998 	if (num > sdebug_write_same_length) {
3999 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4000 		return check_condition_result;
4001 	}
4002 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4003 }
4004 
4005 /* Note the mode field is in the same position as the (lower) service action
4006  * field. For the Report supported operation codes command, SPC-4 suggests
4007  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4008 static int resp_write_buffer(struct scsi_cmnd *scp,
4009 			     struct sdebug_dev_info *devip)
4010 {
4011 	u8 *cmd = scp->cmnd;
4012 	struct scsi_device *sdp = scp->device;
4013 	struct sdebug_dev_info *dp;
4014 	u8 mode;
4015 
4016 	mode = cmd[1] & 0x1f;
4017 	switch (mode) {
4018 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4019 		/* set UAs on this device only */
4020 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4021 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4022 		break;
4023 	case 0x5:	/* download MC, save and ACT */
4024 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4025 		break;
4026 	case 0x6:	/* download MC with offsets and ACT */
4027 		/* set UAs on most devices (LUs) in this target */
4028 		list_for_each_entry(dp,
4029 				    &devip->sdbg_host->dev_info_list,
4030 				    dev_list)
4031 			if (dp->target == sdp->id) {
4032 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4033 				if (devip != dp)
4034 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4035 						dp->uas_bm);
4036 			}
4037 		break;
4038 	case 0x7:	/* download MC with offsets, save, and ACT */
4039 		/* set UA on all devices (LUs) in this target */
4040 		list_for_each_entry(dp,
4041 				    &devip->sdbg_host->dev_info_list,
4042 				    dev_list)
4043 			if (dp->target == sdp->id)
4044 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4045 					dp->uas_bm);
4046 		break;
4047 	default:
4048 		/* do nothing for this command for other mode values */
4049 		break;
4050 	}
4051 	return 0;
4052 }
4053 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4054 static int resp_comp_write(struct scsi_cmnd *scp,
4055 			   struct sdebug_dev_info *devip)
4056 {
4057 	u8 *cmd = scp->cmnd;
4058 	u8 *arr;
4059 	struct sdeb_store_info *sip = devip2sip(devip, true);
4060 	u64 lba;
4061 	u32 dnum;
4062 	u32 lb_size = sdebug_sector_size;
4063 	u8 num;
4064 	int ret;
4065 	int retval = 0;
4066 
4067 	lba = get_unaligned_be64(cmd + 2);
4068 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4069 	if (0 == num)
4070 		return 0;	/* degenerate case, not an error */
4071 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4072 	    (cmd[1] & 0xe0)) {
4073 		mk_sense_invalid_opcode(scp);
4074 		return check_condition_result;
4075 	}
4076 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4077 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4078 	    (cmd[1] & 0xe0) == 0)
4079 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4080 			    "to DIF device\n");
4081 	ret = check_device_access_params(scp, lba, num, false);
4082 	if (ret)
4083 		return ret;
4084 	dnum = 2 * num;
4085 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4086 	if (NULL == arr) {
4087 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4088 				INSUFF_RES_ASCQ);
4089 		return check_condition_result;
4090 	}
4091 
4092 	sdeb_write_lock(sip);
4093 
4094 	ret = do_dout_fetch(scp, dnum, arr);
4095 	if (ret == -1) {
4096 		retval = DID_ERROR << 16;
4097 		goto cleanup;
4098 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4099 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4100 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4101 			    dnum * lb_size, ret);
4102 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4103 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4104 		retval = check_condition_result;
4105 		goto cleanup;
4106 	}
4107 	if (scsi_debug_lbp())
4108 		map_region(sip, lba, num);
4109 cleanup:
4110 	sdeb_write_unlock(sip);
4111 	kfree(arr);
4112 	return retval;
4113 }
4114 
4115 struct unmap_block_desc {
4116 	__be64	lba;
4117 	__be32	blocks;
4118 	__be32	__reserved;
4119 };
4120 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4121 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4122 {
4123 	unsigned char *buf;
4124 	struct unmap_block_desc *desc;
4125 	struct sdeb_store_info *sip = devip2sip(devip, true);
4126 	unsigned int i, payload_len, descriptors;
4127 	int ret;
4128 
4129 	if (!scsi_debug_lbp())
4130 		return 0;	/* fib and say its done */
4131 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4132 	BUG_ON(scsi_bufflen(scp) != payload_len);
4133 
4134 	descriptors = (payload_len - 8) / 16;
4135 	if (descriptors > sdebug_unmap_max_desc) {
4136 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4137 		return check_condition_result;
4138 	}
4139 
4140 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4141 	if (!buf) {
4142 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4143 				INSUFF_RES_ASCQ);
4144 		return check_condition_result;
4145 	}
4146 
4147 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4148 
4149 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4150 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4151 
4152 	desc = (void *)&buf[8];
4153 
4154 	sdeb_write_lock(sip);
4155 
4156 	for (i = 0 ; i < descriptors ; i++) {
4157 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4158 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4159 
4160 		ret = check_device_access_params(scp, lba, num, true);
4161 		if (ret)
4162 			goto out;
4163 
4164 		unmap_region(sip, lba, num);
4165 	}
4166 
4167 	ret = 0;
4168 
4169 out:
4170 	sdeb_write_unlock(sip);
4171 	kfree(buf);
4172 
4173 	return ret;
4174 }
4175 
4176 #define SDEBUG_GET_LBA_STATUS_LEN 32
4177 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4178 static int resp_get_lba_status(struct scsi_cmnd *scp,
4179 			       struct sdebug_dev_info *devip)
4180 {
4181 	u8 *cmd = scp->cmnd;
4182 	u64 lba;
4183 	u32 alloc_len, mapped, num;
4184 	int ret;
4185 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4186 
4187 	lba = get_unaligned_be64(cmd + 2);
4188 	alloc_len = get_unaligned_be32(cmd + 10);
4189 
4190 	if (alloc_len < 24)
4191 		return 0;
4192 
4193 	ret = check_device_access_params(scp, lba, 1, false);
4194 	if (ret)
4195 		return ret;
4196 
4197 	if (scsi_debug_lbp()) {
4198 		struct sdeb_store_info *sip = devip2sip(devip, true);
4199 
4200 		mapped = map_state(sip, lba, &num);
4201 	} else {
4202 		mapped = 1;
4203 		/* following just in case virtual_gb changed */
4204 		sdebug_capacity = get_sdebug_capacity();
4205 		if (sdebug_capacity - lba <= 0xffffffff)
4206 			num = sdebug_capacity - lba;
4207 		else
4208 			num = 0xffffffff;
4209 	}
4210 
4211 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4212 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4213 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4214 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4215 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4216 
4217 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4218 }
4219 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4220 static int resp_sync_cache(struct scsi_cmnd *scp,
4221 			   struct sdebug_dev_info *devip)
4222 {
4223 	int res = 0;
4224 	u64 lba;
4225 	u32 num_blocks;
4226 	u8 *cmd = scp->cmnd;
4227 
4228 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4229 		lba = get_unaligned_be32(cmd + 2);
4230 		num_blocks = get_unaligned_be16(cmd + 7);
4231 	} else {				/* SYNCHRONIZE_CACHE(16) */
4232 		lba = get_unaligned_be64(cmd + 2);
4233 		num_blocks = get_unaligned_be32(cmd + 10);
4234 	}
4235 	if (lba + num_blocks > sdebug_capacity) {
4236 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4237 		return check_condition_result;
4238 	}
4239 	if (!write_since_sync || (cmd[1] & 0x2))
4240 		res = SDEG_RES_IMMED_MASK;
4241 	else		/* delay if write_since_sync and IMMED clear */
4242 		write_since_sync = false;
4243 	return res;
4244 }
4245 
4246 /*
4247  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4248  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4249  * a GOOD status otherwise. Model a disk with a big cache and yield
4250  * CONDITION MET. Actually tries to bring range in main memory into the
4251  * cache associated with the CPU(s).
4252  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4253 static int resp_pre_fetch(struct scsi_cmnd *scp,
4254 			  struct sdebug_dev_info *devip)
4255 {
4256 	int res = 0;
4257 	u64 lba;
4258 	u64 block, rest = 0;
4259 	u32 nblks;
4260 	u8 *cmd = scp->cmnd;
4261 	struct sdeb_store_info *sip = devip2sip(devip, true);
4262 	u8 *fsp = sip->storep;
4263 
4264 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4265 		lba = get_unaligned_be32(cmd + 2);
4266 		nblks = get_unaligned_be16(cmd + 7);
4267 	} else {			/* PRE-FETCH(16) */
4268 		lba = get_unaligned_be64(cmd + 2);
4269 		nblks = get_unaligned_be32(cmd + 10);
4270 	}
4271 	if (lba + nblks > sdebug_capacity) {
4272 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4273 		return check_condition_result;
4274 	}
4275 	if (!fsp)
4276 		goto fini;
4277 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4278 	block = do_div(lba, sdebug_store_sectors);
4279 	if (block + nblks > sdebug_store_sectors)
4280 		rest = block + nblks - sdebug_store_sectors;
4281 
4282 	/* Try to bring the PRE-FETCH range into CPU's cache */
4283 	sdeb_read_lock(sip);
4284 	prefetch_range(fsp + (sdebug_sector_size * block),
4285 		       (nblks - rest) * sdebug_sector_size);
4286 	if (rest)
4287 		prefetch_range(fsp, rest * sdebug_sector_size);
4288 	sdeb_read_unlock(sip);
4289 fini:
4290 	if (cmd[1] & 0x2)
4291 		res = SDEG_RES_IMMED_MASK;
4292 	return res | condition_met_result;
4293 }
4294 
4295 #define RL_BUCKET_ELEMS 8
4296 
4297 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4298  * (W-LUN), the normal Linux scanning logic does not associate it with a
4299  * device (e.g. /dev/sg7). The following magic will make that association:
4300  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4301  * where <n> is a host number. If there are multiple targets in a host then
4302  * the above will associate a W-LUN to each target. To only get a W-LUN
4303  * for target 2, then use "echo '- 2 49409' > scan" .
4304  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4305 static int resp_report_luns(struct scsi_cmnd *scp,
4306 			    struct sdebug_dev_info *devip)
4307 {
4308 	unsigned char *cmd = scp->cmnd;
4309 	unsigned int alloc_len;
4310 	unsigned char select_report;
4311 	u64 lun;
4312 	struct scsi_lun *lun_p;
4313 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4314 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4315 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4316 	unsigned int tlun_cnt;	/* total LUN count */
4317 	unsigned int rlen;	/* response length (in bytes) */
4318 	int k, j, n, res;
4319 	unsigned int off_rsp = 0;
4320 	const int sz_lun = sizeof(struct scsi_lun);
4321 
4322 	clear_luns_changed_on_target(devip);
4323 
4324 	select_report = cmd[2];
4325 	alloc_len = get_unaligned_be32(cmd + 6);
4326 
4327 	if (alloc_len < 4) {
4328 		pr_err("alloc len too small %d\n", alloc_len);
4329 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4330 		return check_condition_result;
4331 	}
4332 
4333 	switch (select_report) {
4334 	case 0:		/* all LUNs apart from W-LUNs */
4335 		lun_cnt = sdebug_max_luns;
4336 		wlun_cnt = 0;
4337 		break;
4338 	case 1:		/* only W-LUNs */
4339 		lun_cnt = 0;
4340 		wlun_cnt = 1;
4341 		break;
4342 	case 2:		/* all LUNs */
4343 		lun_cnt = sdebug_max_luns;
4344 		wlun_cnt = 1;
4345 		break;
4346 	case 0x10:	/* only administrative LUs */
4347 	case 0x11:	/* see SPC-5 */
4348 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4349 	default:
4350 		pr_debug("select report invalid %d\n", select_report);
4351 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4352 		return check_condition_result;
4353 	}
4354 
4355 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4356 		--lun_cnt;
4357 
4358 	tlun_cnt = lun_cnt + wlun_cnt;
4359 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4360 	scsi_set_resid(scp, scsi_bufflen(scp));
4361 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4362 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4363 
4364 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4365 	lun = sdebug_no_lun_0 ? 1 : 0;
4366 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4367 		memset(arr, 0, sizeof(arr));
4368 		lun_p = (struct scsi_lun *)&arr[0];
4369 		if (k == 0) {
4370 			put_unaligned_be32(rlen, &arr[0]);
4371 			++lun_p;
4372 			j = 1;
4373 		}
4374 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4375 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4376 				break;
4377 			int_to_scsilun(lun++, lun_p);
4378 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4379 				lun_p->scsi_lun[0] |= 0x40;
4380 		}
4381 		if (j < RL_BUCKET_ELEMS)
4382 			break;
4383 		n = j * sz_lun;
4384 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4385 		if (res)
4386 			return res;
4387 		off_rsp += n;
4388 	}
4389 	if (wlun_cnt) {
4390 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4391 		++j;
4392 	}
4393 	if (j > 0)
4394 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4395 	return res;
4396 }
4397 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4398 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4399 {
4400 	bool is_bytchk3 = false;
4401 	u8 bytchk;
4402 	int ret, j;
4403 	u32 vnum, a_num, off;
4404 	const u32 lb_size = sdebug_sector_size;
4405 	u64 lba;
4406 	u8 *arr;
4407 	u8 *cmd = scp->cmnd;
4408 	struct sdeb_store_info *sip = devip2sip(devip, true);
4409 
4410 	bytchk = (cmd[1] >> 1) & 0x3;
4411 	if (bytchk == 0) {
4412 		return 0;	/* always claim internal verify okay */
4413 	} else if (bytchk == 2) {
4414 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4415 		return check_condition_result;
4416 	} else if (bytchk == 3) {
4417 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4418 	}
4419 	switch (cmd[0]) {
4420 	case VERIFY_16:
4421 		lba = get_unaligned_be64(cmd + 2);
4422 		vnum = get_unaligned_be32(cmd + 10);
4423 		break;
4424 	case VERIFY:		/* is VERIFY(10) */
4425 		lba = get_unaligned_be32(cmd + 2);
4426 		vnum = get_unaligned_be16(cmd + 7);
4427 		break;
4428 	default:
4429 		mk_sense_invalid_opcode(scp);
4430 		return check_condition_result;
4431 	}
4432 	if (vnum == 0)
4433 		return 0;	/* not an error */
4434 	a_num = is_bytchk3 ? 1 : vnum;
4435 	/* Treat following check like one for read (i.e. no write) access */
4436 	ret = check_device_access_params(scp, lba, a_num, false);
4437 	if (ret)
4438 		return ret;
4439 
4440 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4441 	if (!arr) {
4442 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4443 				INSUFF_RES_ASCQ);
4444 		return check_condition_result;
4445 	}
4446 	/* Not changing store, so only need read access */
4447 	sdeb_read_lock(sip);
4448 
4449 	ret = do_dout_fetch(scp, a_num, arr);
4450 	if (ret == -1) {
4451 		ret = DID_ERROR << 16;
4452 		goto cleanup;
4453 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4454 		sdev_printk(KERN_INFO, scp->device,
4455 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4456 			    my_name, __func__, a_num * lb_size, ret);
4457 	}
4458 	if (is_bytchk3) {
4459 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4460 			memcpy(arr + off, arr, lb_size);
4461 	}
4462 	ret = 0;
4463 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4464 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4465 		ret = check_condition_result;
4466 		goto cleanup;
4467 	}
4468 cleanup:
4469 	sdeb_read_unlock(sip);
4470 	kfree(arr);
4471 	return ret;
4472 }
4473 
4474 #define RZONES_DESC_HD 64
4475 
4476 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4477 static int resp_report_zones(struct scsi_cmnd *scp,
4478 			     struct sdebug_dev_info *devip)
4479 {
4480 	unsigned int rep_max_zones, nrz = 0;
4481 	int ret = 0;
4482 	u32 alloc_len, rep_opts, rep_len;
4483 	bool partial;
4484 	u64 lba, zs_lba;
4485 	u8 *arr = NULL, *desc;
4486 	u8 *cmd = scp->cmnd;
4487 	struct sdeb_zone_state *zsp = NULL;
4488 	struct sdeb_store_info *sip = devip2sip(devip, false);
4489 
4490 	if (!sdebug_dev_is_zoned(devip)) {
4491 		mk_sense_invalid_opcode(scp);
4492 		return check_condition_result;
4493 	}
4494 	zs_lba = get_unaligned_be64(cmd + 2);
4495 	alloc_len = get_unaligned_be32(cmd + 10);
4496 	if (alloc_len == 0)
4497 		return 0;	/* not an error */
4498 	rep_opts = cmd[14] & 0x3f;
4499 	partial = cmd[14] & 0x80;
4500 
4501 	if (zs_lba >= sdebug_capacity) {
4502 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4503 		return check_condition_result;
4504 	}
4505 
4506 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4507 
4508 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4509 	if (!arr) {
4510 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4511 				INSUFF_RES_ASCQ);
4512 		return check_condition_result;
4513 	}
4514 
4515 	sdeb_read_lock(sip);
4516 
4517 	desc = arr + 64;
4518 	for (lba = zs_lba; lba < sdebug_capacity;
4519 	     lba = zsp->z_start + zsp->z_size) {
4520 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4521 			break;
4522 		zsp = zbc_zone(devip, lba);
4523 		switch (rep_opts) {
4524 		case 0x00:
4525 			/* All zones */
4526 			break;
4527 		case 0x01:
4528 			/* Empty zones */
4529 			if (zsp->z_cond != ZC1_EMPTY)
4530 				continue;
4531 			break;
4532 		case 0x02:
4533 			/* Implicit open zones */
4534 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4535 				continue;
4536 			break;
4537 		case 0x03:
4538 			/* Explicit open zones */
4539 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4540 				continue;
4541 			break;
4542 		case 0x04:
4543 			/* Closed zones */
4544 			if (zsp->z_cond != ZC4_CLOSED)
4545 				continue;
4546 			break;
4547 		case 0x05:
4548 			/* Full zones */
4549 			if (zsp->z_cond != ZC5_FULL)
4550 				continue;
4551 			break;
4552 		case 0x06:
4553 		case 0x07:
4554 		case 0x10:
4555 			/*
4556 			 * Read-only, offline, reset WP recommended are
4557 			 * not emulated: no zones to report;
4558 			 */
4559 			continue;
4560 		case 0x11:
4561 			/* non-seq-resource set */
4562 			if (!zsp->z_non_seq_resource)
4563 				continue;
4564 			break;
4565 		case 0x3e:
4566 			/* All zones except gap zones. */
4567 			if (zbc_zone_is_gap(zsp))
4568 				continue;
4569 			break;
4570 		case 0x3f:
4571 			/* Not write pointer (conventional) zones */
4572 			if (zbc_zone_is_seq(zsp))
4573 				continue;
4574 			break;
4575 		default:
4576 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4577 					INVALID_FIELD_IN_CDB, 0);
4578 			ret = check_condition_result;
4579 			goto fini;
4580 		}
4581 
4582 		if (nrz < rep_max_zones) {
4583 			/* Fill zone descriptor */
4584 			desc[0] = zsp->z_type;
4585 			desc[1] = zsp->z_cond << 4;
4586 			if (zsp->z_non_seq_resource)
4587 				desc[1] |= 1 << 1;
4588 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4589 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4590 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4591 			desc += 64;
4592 		}
4593 
4594 		if (partial && nrz >= rep_max_zones)
4595 			break;
4596 
4597 		nrz++;
4598 	}
4599 
4600 	/* Report header */
4601 	/* Zone list length. */
4602 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4603 	/* Maximum LBA */
4604 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4605 	/* Zone starting LBA granularity. */
4606 	if (devip->zcap < devip->zsize)
4607 		put_unaligned_be64(devip->zsize, arr + 16);
4608 
4609 	rep_len = (unsigned long)desc - (unsigned long)arr;
4610 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4611 
4612 fini:
4613 	sdeb_read_unlock(sip);
4614 	kfree(arr);
4615 	return ret;
4616 }
4617 
4618 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)4619 static void zbc_open_all(struct sdebug_dev_info *devip)
4620 {
4621 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4622 	unsigned int i;
4623 
4624 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4625 		if (zsp->z_cond == ZC4_CLOSED)
4626 			zbc_open_zone(devip, &devip->zstate[i], true);
4627 	}
4628 }
4629 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4630 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4631 {
4632 	int res = 0;
4633 	u64 z_id;
4634 	enum sdebug_z_cond zc;
4635 	u8 *cmd = scp->cmnd;
4636 	struct sdeb_zone_state *zsp;
4637 	bool all = cmd[14] & 0x01;
4638 	struct sdeb_store_info *sip = devip2sip(devip, false);
4639 
4640 	if (!sdebug_dev_is_zoned(devip)) {
4641 		mk_sense_invalid_opcode(scp);
4642 		return check_condition_result;
4643 	}
4644 
4645 	sdeb_write_lock(sip);
4646 
4647 	if (all) {
4648 		/* Check if all closed zones can be open */
4649 		if (devip->max_open &&
4650 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4651 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4652 					INSUFF_ZONE_ASCQ);
4653 			res = check_condition_result;
4654 			goto fini;
4655 		}
4656 		/* Open all closed zones */
4657 		zbc_open_all(devip);
4658 		goto fini;
4659 	}
4660 
4661 	/* Open the specified zone */
4662 	z_id = get_unaligned_be64(cmd + 2);
4663 	if (z_id >= sdebug_capacity) {
4664 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4665 		res = check_condition_result;
4666 		goto fini;
4667 	}
4668 
4669 	zsp = zbc_zone(devip, z_id);
4670 	if (z_id != zsp->z_start) {
4671 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4672 		res = check_condition_result;
4673 		goto fini;
4674 	}
4675 	if (zbc_zone_is_conv(zsp)) {
4676 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4677 		res = check_condition_result;
4678 		goto fini;
4679 	}
4680 
4681 	zc = zsp->z_cond;
4682 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4683 		goto fini;
4684 
4685 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4686 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4687 				INSUFF_ZONE_ASCQ);
4688 		res = check_condition_result;
4689 		goto fini;
4690 	}
4691 
4692 	zbc_open_zone(devip, zsp, true);
4693 fini:
4694 	sdeb_write_unlock(sip);
4695 	return res;
4696 }
4697 
zbc_close_all(struct sdebug_dev_info * devip)4698 static void zbc_close_all(struct sdebug_dev_info *devip)
4699 {
4700 	unsigned int i;
4701 
4702 	for (i = 0; i < devip->nr_zones; i++)
4703 		zbc_close_zone(devip, &devip->zstate[i]);
4704 }
4705 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4706 static int resp_close_zone(struct scsi_cmnd *scp,
4707 			   struct sdebug_dev_info *devip)
4708 {
4709 	int res = 0;
4710 	u64 z_id;
4711 	u8 *cmd = scp->cmnd;
4712 	struct sdeb_zone_state *zsp;
4713 	bool all = cmd[14] & 0x01;
4714 	struct sdeb_store_info *sip = devip2sip(devip, false);
4715 
4716 	if (!sdebug_dev_is_zoned(devip)) {
4717 		mk_sense_invalid_opcode(scp);
4718 		return check_condition_result;
4719 	}
4720 
4721 	sdeb_write_lock(sip);
4722 
4723 	if (all) {
4724 		zbc_close_all(devip);
4725 		goto fini;
4726 	}
4727 
4728 	/* Close specified zone */
4729 	z_id = get_unaligned_be64(cmd + 2);
4730 	if (z_id >= sdebug_capacity) {
4731 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4732 		res = check_condition_result;
4733 		goto fini;
4734 	}
4735 
4736 	zsp = zbc_zone(devip, z_id);
4737 	if (z_id != zsp->z_start) {
4738 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4739 		res = check_condition_result;
4740 		goto fini;
4741 	}
4742 	if (zbc_zone_is_conv(zsp)) {
4743 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4744 		res = check_condition_result;
4745 		goto fini;
4746 	}
4747 
4748 	zbc_close_zone(devip, zsp);
4749 fini:
4750 	sdeb_write_unlock(sip);
4751 	return res;
4752 }
4753 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)4754 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4755 			    struct sdeb_zone_state *zsp, bool empty)
4756 {
4757 	enum sdebug_z_cond zc = zsp->z_cond;
4758 
4759 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4760 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4761 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4762 			zbc_close_zone(devip, zsp);
4763 		if (zsp->z_cond == ZC4_CLOSED)
4764 			devip->nr_closed--;
4765 		zsp->z_wp = zsp->z_start + zsp->z_size;
4766 		zsp->z_cond = ZC5_FULL;
4767 	}
4768 }
4769 
zbc_finish_all(struct sdebug_dev_info * devip)4770 static void zbc_finish_all(struct sdebug_dev_info *devip)
4771 {
4772 	unsigned int i;
4773 
4774 	for (i = 0; i < devip->nr_zones; i++)
4775 		zbc_finish_zone(devip, &devip->zstate[i], false);
4776 }
4777 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4778 static int resp_finish_zone(struct scsi_cmnd *scp,
4779 			    struct sdebug_dev_info *devip)
4780 {
4781 	struct sdeb_zone_state *zsp;
4782 	int res = 0;
4783 	u64 z_id;
4784 	u8 *cmd = scp->cmnd;
4785 	bool all = cmd[14] & 0x01;
4786 	struct sdeb_store_info *sip = devip2sip(devip, false);
4787 
4788 	if (!sdebug_dev_is_zoned(devip)) {
4789 		mk_sense_invalid_opcode(scp);
4790 		return check_condition_result;
4791 	}
4792 
4793 	sdeb_write_lock(sip);
4794 
4795 	if (all) {
4796 		zbc_finish_all(devip);
4797 		goto fini;
4798 	}
4799 
4800 	/* Finish the specified zone */
4801 	z_id = get_unaligned_be64(cmd + 2);
4802 	if (z_id >= sdebug_capacity) {
4803 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4804 		res = check_condition_result;
4805 		goto fini;
4806 	}
4807 
4808 	zsp = zbc_zone(devip, z_id);
4809 	if (z_id != zsp->z_start) {
4810 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4811 		res = check_condition_result;
4812 		goto fini;
4813 	}
4814 	if (zbc_zone_is_conv(zsp)) {
4815 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4816 		res = check_condition_result;
4817 		goto fini;
4818 	}
4819 
4820 	zbc_finish_zone(devip, zsp, true);
4821 fini:
4822 	sdeb_write_unlock(sip);
4823 	return res;
4824 }
4825 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)4826 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4827 			 struct sdeb_zone_state *zsp)
4828 {
4829 	enum sdebug_z_cond zc;
4830 	struct sdeb_store_info *sip = devip2sip(devip, false);
4831 
4832 	if (!zbc_zone_is_seq(zsp))
4833 		return;
4834 
4835 	zc = zsp->z_cond;
4836 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4837 		zbc_close_zone(devip, zsp);
4838 
4839 	if (zsp->z_cond == ZC4_CLOSED)
4840 		devip->nr_closed--;
4841 
4842 	if (zsp->z_wp > zsp->z_start)
4843 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4844 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4845 
4846 	zsp->z_non_seq_resource = false;
4847 	zsp->z_wp = zsp->z_start;
4848 	zsp->z_cond = ZC1_EMPTY;
4849 }
4850 
zbc_rwp_all(struct sdebug_dev_info * devip)4851 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4852 {
4853 	unsigned int i;
4854 
4855 	for (i = 0; i < devip->nr_zones; i++)
4856 		zbc_rwp_zone(devip, &devip->zstate[i]);
4857 }
4858 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4859 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4860 {
4861 	struct sdeb_zone_state *zsp;
4862 	int res = 0;
4863 	u64 z_id;
4864 	u8 *cmd = scp->cmnd;
4865 	bool all = cmd[14] & 0x01;
4866 	struct sdeb_store_info *sip = devip2sip(devip, false);
4867 
4868 	if (!sdebug_dev_is_zoned(devip)) {
4869 		mk_sense_invalid_opcode(scp);
4870 		return check_condition_result;
4871 	}
4872 
4873 	sdeb_write_lock(sip);
4874 
4875 	if (all) {
4876 		zbc_rwp_all(devip);
4877 		goto fini;
4878 	}
4879 
4880 	z_id = get_unaligned_be64(cmd + 2);
4881 	if (z_id >= sdebug_capacity) {
4882 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4883 		res = check_condition_result;
4884 		goto fini;
4885 	}
4886 
4887 	zsp = zbc_zone(devip, z_id);
4888 	if (z_id != zsp->z_start) {
4889 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4890 		res = check_condition_result;
4891 		goto fini;
4892 	}
4893 	if (zbc_zone_is_conv(zsp)) {
4894 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4895 		res = check_condition_result;
4896 		goto fini;
4897 	}
4898 
4899 	zbc_rwp_zone(devip, zsp);
4900 fini:
4901 	sdeb_write_unlock(sip);
4902 	return res;
4903 }
4904 
get_queue(struct scsi_cmnd * cmnd)4905 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4906 {
4907 	u16 hwq;
4908 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4909 
4910 	hwq = blk_mq_unique_tag_to_hwq(tag);
4911 
4912 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4913 	if (WARN_ON_ONCE(hwq >= submit_queues))
4914 		hwq = 0;
4915 
4916 	return sdebug_q_arr + hwq;
4917 }
4918 
get_tag(struct scsi_cmnd * cmnd)4919 static u32 get_tag(struct scsi_cmnd *cmnd)
4920 {
4921 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4922 }
4923 
4924 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)4925 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4926 {
4927 	bool aborted = sd_dp->aborted;
4928 	int qc_idx;
4929 	int retiring = 0;
4930 	unsigned long iflags;
4931 	struct sdebug_queue *sqp;
4932 	struct sdebug_queued_cmd *sqcp;
4933 	struct scsi_cmnd *scp;
4934 	struct sdebug_dev_info *devip;
4935 
4936 	if (unlikely(aborted))
4937 		sd_dp->aborted = false;
4938 	qc_idx = sd_dp->qc_idx;
4939 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4940 	if (sdebug_statistics) {
4941 		atomic_inc(&sdebug_completions);
4942 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4943 			atomic_inc(&sdebug_miss_cpus);
4944 	}
4945 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4946 		pr_err("wild qc_idx=%d\n", qc_idx);
4947 		return;
4948 	}
4949 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4950 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4951 	sqcp = &sqp->qc_arr[qc_idx];
4952 	scp = sqcp->a_cmnd;
4953 	if (unlikely(scp == NULL)) {
4954 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4955 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4956 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4957 		return;
4958 	}
4959 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4960 	if (likely(devip))
4961 		atomic_dec(&devip->num_in_q);
4962 	else
4963 		pr_err("devip=NULL\n");
4964 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4965 		retiring = 1;
4966 
4967 	sqcp->a_cmnd = NULL;
4968 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4969 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4970 		pr_err("Unexpected completion\n");
4971 		return;
4972 	}
4973 
4974 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4975 		int k, retval;
4976 
4977 		retval = atomic_read(&retired_max_queue);
4978 		if (qc_idx >= retval) {
4979 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4980 			pr_err("index %d too large\n", retval);
4981 			return;
4982 		}
4983 		k = find_last_bit(sqp->in_use_bm, retval);
4984 		if ((k < sdebug_max_queue) || (k == retval))
4985 			atomic_set(&retired_max_queue, 0);
4986 		else
4987 			atomic_set(&retired_max_queue, k + 1);
4988 	}
4989 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4990 	if (unlikely(aborted)) {
4991 		if (sdebug_verbose)
4992 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4993 		return;
4994 	}
4995 	scsi_done(scp); /* callback to mid level */
4996 }
4997 
4998 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)4999 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5000 {
5001 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5002 						  hrt);
5003 	sdebug_q_cmd_complete(sd_dp);
5004 	return HRTIMER_NORESTART;
5005 }
5006 
5007 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)5008 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5009 {
5010 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5011 						  ew.work);
5012 	sdebug_q_cmd_complete(sd_dp);
5013 }
5014 
5015 static bool got_shared_uuid;
5016 static uuid_t shared_uuid;
5017 
sdebug_device_create_zones(struct sdebug_dev_info * devip)5018 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5019 {
5020 	struct sdeb_zone_state *zsp;
5021 	sector_t capacity = get_sdebug_capacity();
5022 	sector_t conv_capacity;
5023 	sector_t zstart = 0;
5024 	unsigned int i;
5025 
5026 	/*
5027 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5028 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5029 	 * use the specified zone size checking that at least 2 zones can be
5030 	 * created for the device.
5031 	 */
5032 	if (!sdeb_zbc_zone_size_mb) {
5033 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5034 			>> ilog2(sdebug_sector_size);
5035 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5036 			devip->zsize >>= 1;
5037 		if (devip->zsize < 2) {
5038 			pr_err("Device capacity too small\n");
5039 			return -EINVAL;
5040 		}
5041 	} else {
5042 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5043 			pr_err("Zone size is not a power of 2\n");
5044 			return -EINVAL;
5045 		}
5046 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5047 			>> ilog2(sdebug_sector_size);
5048 		if (devip->zsize >= capacity) {
5049 			pr_err("Zone size too large for device capacity\n");
5050 			return -EINVAL;
5051 		}
5052 	}
5053 
5054 	devip->zsize_shift = ilog2(devip->zsize);
5055 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5056 
5057 	if (sdeb_zbc_zone_cap_mb == 0) {
5058 		devip->zcap = devip->zsize;
5059 	} else {
5060 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5061 			      ilog2(sdebug_sector_size);
5062 		if (devip->zcap > devip->zsize) {
5063 			pr_err("Zone capacity too large\n");
5064 			return -EINVAL;
5065 		}
5066 	}
5067 
5068 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5069 	if (conv_capacity >= capacity) {
5070 		pr_err("Number of conventional zones too large\n");
5071 		return -EINVAL;
5072 	}
5073 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5074 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5075 			      devip->zsize_shift;
5076 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5077 
5078 	/* Add gap zones if zone capacity is smaller than the zone size */
5079 	if (devip->zcap < devip->zsize)
5080 		devip->nr_zones += devip->nr_seq_zones;
5081 
5082 	if (devip->zmodel == BLK_ZONED_HM) {
5083 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5084 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5085 			devip->max_open = (devip->nr_zones - 1) / 2;
5086 		else
5087 			devip->max_open = sdeb_zbc_max_open;
5088 	}
5089 
5090 	devip->zstate = kcalloc(devip->nr_zones,
5091 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5092 	if (!devip->zstate)
5093 		return -ENOMEM;
5094 
5095 	for (i = 0; i < devip->nr_zones; i++) {
5096 		zsp = &devip->zstate[i];
5097 
5098 		zsp->z_start = zstart;
5099 
5100 		if (i < devip->nr_conv_zones) {
5101 			zsp->z_type = ZBC_ZTYPE_CNV;
5102 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5103 			zsp->z_wp = (sector_t)-1;
5104 			zsp->z_size =
5105 				min_t(u64, devip->zsize, capacity - zstart);
5106 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5107 			if (devip->zmodel == BLK_ZONED_HM)
5108 				zsp->z_type = ZBC_ZTYPE_SWR;
5109 			else
5110 				zsp->z_type = ZBC_ZTYPE_SWP;
5111 			zsp->z_cond = ZC1_EMPTY;
5112 			zsp->z_wp = zsp->z_start;
5113 			zsp->z_size =
5114 				min_t(u64, devip->zcap, capacity - zstart);
5115 		} else {
5116 			zsp->z_type = ZBC_ZTYPE_GAP;
5117 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5118 			zsp->z_wp = (sector_t)-1;
5119 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5120 					    capacity - zstart);
5121 		}
5122 
5123 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5124 		zstart += zsp->z_size;
5125 	}
5126 
5127 	return 0;
5128 }
5129 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5130 static struct sdebug_dev_info *sdebug_device_create(
5131 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5132 {
5133 	struct sdebug_dev_info *devip;
5134 
5135 	devip = kzalloc(sizeof(*devip), flags);
5136 	if (devip) {
5137 		if (sdebug_uuid_ctl == 1)
5138 			uuid_gen(&devip->lu_name);
5139 		else if (sdebug_uuid_ctl == 2) {
5140 			if (got_shared_uuid)
5141 				devip->lu_name = shared_uuid;
5142 			else {
5143 				uuid_gen(&shared_uuid);
5144 				got_shared_uuid = true;
5145 				devip->lu_name = shared_uuid;
5146 			}
5147 		}
5148 		devip->sdbg_host = sdbg_host;
5149 		if (sdeb_zbc_in_use) {
5150 			devip->zmodel = sdeb_zbc_model;
5151 			if (sdebug_device_create_zones(devip)) {
5152 				kfree(devip);
5153 				return NULL;
5154 			}
5155 		} else {
5156 			devip->zmodel = BLK_ZONED_NONE;
5157 		}
5158 		devip->sdbg_host = sdbg_host;
5159 		devip->create_ts = ktime_get_boottime();
5160 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5161 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5162 	}
5163 	return devip;
5164 }
5165 
find_build_dev_info(struct scsi_device * sdev)5166 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5167 {
5168 	struct sdebug_host_info *sdbg_host;
5169 	struct sdebug_dev_info *open_devip = NULL;
5170 	struct sdebug_dev_info *devip;
5171 
5172 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5173 	if (!sdbg_host) {
5174 		pr_err("Host info NULL\n");
5175 		return NULL;
5176 	}
5177 
5178 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5179 		if ((devip->used) && (devip->channel == sdev->channel) &&
5180 		    (devip->target == sdev->id) &&
5181 		    (devip->lun == sdev->lun))
5182 			return devip;
5183 		else {
5184 			if ((!devip->used) && (!open_devip))
5185 				open_devip = devip;
5186 		}
5187 	}
5188 	if (!open_devip) { /* try and make a new one */
5189 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5190 		if (!open_devip) {
5191 			pr_err("out of memory at line %d\n", __LINE__);
5192 			return NULL;
5193 		}
5194 	}
5195 
5196 	open_devip->channel = sdev->channel;
5197 	open_devip->target = sdev->id;
5198 	open_devip->lun = sdev->lun;
5199 	open_devip->sdbg_host = sdbg_host;
5200 	atomic_set(&open_devip->num_in_q, 0);
5201 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5202 	open_devip->used = true;
5203 	return open_devip;
5204 }
5205 
scsi_debug_slave_alloc(struct scsi_device * sdp)5206 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5207 {
5208 	if (sdebug_verbose)
5209 		pr_info("slave_alloc <%u %u %u %llu>\n",
5210 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5211 	return 0;
5212 }
5213 
scsi_debug_slave_configure(struct scsi_device * sdp)5214 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5215 {
5216 	struct sdebug_dev_info *devip =
5217 			(struct sdebug_dev_info *)sdp->hostdata;
5218 
5219 	if (sdebug_verbose)
5220 		pr_info("slave_configure <%u %u %u %llu>\n",
5221 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5222 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5223 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5224 	if (devip == NULL) {
5225 		devip = find_build_dev_info(sdp);
5226 		if (devip == NULL)
5227 			return 1;  /* no resources, will be marked offline */
5228 	}
5229 	sdp->hostdata = devip;
5230 	if (sdebug_no_uld)
5231 		sdp->no_uld_attach = 1;
5232 	config_cdb_len(sdp);
5233 	return 0;
5234 }
5235 
scsi_debug_slave_destroy(struct scsi_device * sdp)5236 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5237 {
5238 	struct sdebug_dev_info *devip =
5239 		(struct sdebug_dev_info *)sdp->hostdata;
5240 
5241 	if (sdebug_verbose)
5242 		pr_info("slave_destroy <%u %u %u %llu>\n",
5243 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5244 	if (devip) {
5245 		/* make this slot available for re-use */
5246 		devip->used = false;
5247 		sdp->hostdata = NULL;
5248 	}
5249 }
5250 
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5251 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5252 			   enum sdeb_defer_type defer_t)
5253 {
5254 	if (!sd_dp)
5255 		return;
5256 	if (defer_t == SDEB_DEFER_HRT)
5257 		hrtimer_cancel(&sd_dp->hrt);
5258 	else if (defer_t == SDEB_DEFER_WQ)
5259 		cancel_work_sync(&sd_dp->ew.work);
5260 }
5261 
5262 /* If @cmnd found deletes its timer or work queue and returns true; else
5263    returns false */
stop_queued_cmnd(struct scsi_cmnd * cmnd)5264 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5265 {
5266 	unsigned long iflags;
5267 	int j, k, qmax, r_qmax;
5268 	enum sdeb_defer_type l_defer_t;
5269 	struct sdebug_queue *sqp;
5270 	struct sdebug_queued_cmd *sqcp;
5271 	struct sdebug_dev_info *devip;
5272 	struct sdebug_defer *sd_dp;
5273 
5274 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5275 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5276 		qmax = sdebug_max_queue;
5277 		r_qmax = atomic_read(&retired_max_queue);
5278 		if (r_qmax > qmax)
5279 			qmax = r_qmax;
5280 		for (k = 0; k < qmax; ++k) {
5281 			if (test_bit(k, sqp->in_use_bm)) {
5282 				sqcp = &sqp->qc_arr[k];
5283 				if (cmnd != sqcp->a_cmnd)
5284 					continue;
5285 				/* found */
5286 				devip = (struct sdebug_dev_info *)
5287 						cmnd->device->hostdata;
5288 				if (devip)
5289 					atomic_dec(&devip->num_in_q);
5290 				sqcp->a_cmnd = NULL;
5291 				sd_dp = sqcp->sd_dp;
5292 				if (sd_dp) {
5293 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5294 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5295 				} else
5296 					l_defer_t = SDEB_DEFER_NONE;
5297 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5298 				stop_qc_helper(sd_dp, l_defer_t);
5299 				clear_bit(k, sqp->in_use_bm);
5300 				return true;
5301 			}
5302 		}
5303 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5304 	}
5305 	return false;
5306 }
5307 
5308 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)5309 static void stop_all_queued(void)
5310 {
5311 	unsigned long iflags;
5312 	int j, k;
5313 	enum sdeb_defer_type l_defer_t;
5314 	struct sdebug_queue *sqp;
5315 	struct sdebug_queued_cmd *sqcp;
5316 	struct sdebug_dev_info *devip;
5317 	struct sdebug_defer *sd_dp;
5318 
5319 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5320 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5321 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5322 			if (test_bit(k, sqp->in_use_bm)) {
5323 				sqcp = &sqp->qc_arr[k];
5324 				if (sqcp->a_cmnd == NULL)
5325 					continue;
5326 				devip = (struct sdebug_dev_info *)
5327 					sqcp->a_cmnd->device->hostdata;
5328 				if (devip)
5329 					atomic_dec(&devip->num_in_q);
5330 				sqcp->a_cmnd = NULL;
5331 				sd_dp = sqcp->sd_dp;
5332 				if (sd_dp) {
5333 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5334 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5335 				} else
5336 					l_defer_t = SDEB_DEFER_NONE;
5337 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5338 				stop_qc_helper(sd_dp, l_defer_t);
5339 				clear_bit(k, sqp->in_use_bm);
5340 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5341 			}
5342 		}
5343 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5344 	}
5345 }
5346 
5347 /* Free queued command memory on heap */
free_all_queued(void)5348 static void free_all_queued(void)
5349 {
5350 	int j, k;
5351 	struct sdebug_queue *sqp;
5352 	struct sdebug_queued_cmd *sqcp;
5353 
5354 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5355 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5356 			sqcp = &sqp->qc_arr[k];
5357 			kfree(sqcp->sd_dp);
5358 			sqcp->sd_dp = NULL;
5359 		}
5360 	}
5361 }
5362 
scsi_debug_abort(struct scsi_cmnd * SCpnt)5363 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5364 {
5365 	bool ok;
5366 
5367 	++num_aborts;
5368 	if (SCpnt) {
5369 		ok = stop_queued_cmnd(SCpnt);
5370 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5371 			sdev_printk(KERN_INFO, SCpnt->device,
5372 				    "%s: command%s found\n", __func__,
5373 				    ok ? "" : " not");
5374 	}
5375 	return SUCCESS;
5376 }
5377 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)5378 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5379 {
5380 	++num_dev_resets;
5381 	if (SCpnt && SCpnt->device) {
5382 		struct scsi_device *sdp = SCpnt->device;
5383 		struct sdebug_dev_info *devip =
5384 				(struct sdebug_dev_info *)sdp->hostdata;
5385 
5386 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5387 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5388 		if (devip)
5389 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5390 	}
5391 	return SUCCESS;
5392 }
5393 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)5394 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5395 {
5396 	struct sdebug_host_info *sdbg_host;
5397 	struct sdebug_dev_info *devip;
5398 	struct scsi_device *sdp;
5399 	struct Scsi_Host *hp;
5400 	int k = 0;
5401 
5402 	++num_target_resets;
5403 	if (!SCpnt)
5404 		goto lie;
5405 	sdp = SCpnt->device;
5406 	if (!sdp)
5407 		goto lie;
5408 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5409 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5410 	hp = sdp->host;
5411 	if (!hp)
5412 		goto lie;
5413 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5414 	if (sdbg_host) {
5415 		list_for_each_entry(devip,
5416 				    &sdbg_host->dev_info_list,
5417 				    dev_list)
5418 			if (devip->target == sdp->id) {
5419 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5420 				++k;
5421 			}
5422 	}
5423 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5424 		sdev_printk(KERN_INFO, sdp,
5425 			    "%s: %d device(s) found in target\n", __func__, k);
5426 lie:
5427 	return SUCCESS;
5428 }
5429 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)5430 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5431 {
5432 	struct sdebug_host_info *sdbg_host;
5433 	struct sdebug_dev_info *devip;
5434 	struct scsi_device *sdp;
5435 	struct Scsi_Host *hp;
5436 	int k = 0;
5437 
5438 	++num_bus_resets;
5439 	if (!(SCpnt && SCpnt->device))
5440 		goto lie;
5441 	sdp = SCpnt->device;
5442 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5443 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5444 	hp = sdp->host;
5445 	if (hp) {
5446 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5447 		if (sdbg_host) {
5448 			list_for_each_entry(devip,
5449 					    &sdbg_host->dev_info_list,
5450 					    dev_list) {
5451 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5452 				++k;
5453 			}
5454 		}
5455 	}
5456 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5457 		sdev_printk(KERN_INFO, sdp,
5458 			    "%s: %d device(s) found in host\n", __func__, k);
5459 lie:
5460 	return SUCCESS;
5461 }
5462 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)5463 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5464 {
5465 	struct sdebug_host_info *sdbg_host;
5466 	struct sdebug_dev_info *devip;
5467 	int k = 0;
5468 
5469 	++num_host_resets;
5470 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5471 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5472 	spin_lock(&sdebug_host_list_lock);
5473 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5474 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5475 				    dev_list) {
5476 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5477 			++k;
5478 		}
5479 	}
5480 	spin_unlock(&sdebug_host_list_lock);
5481 	stop_all_queued();
5482 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5483 		sdev_printk(KERN_INFO, SCpnt->device,
5484 			    "%s: %d device(s) found\n", __func__, k);
5485 	return SUCCESS;
5486 }
5487 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)5488 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5489 {
5490 	struct msdos_partition *pp;
5491 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5492 	int sectors_per_part, num_sectors, k;
5493 	int heads_by_sects, start_sec, end_sec;
5494 
5495 	/* assume partition table already zeroed */
5496 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5497 		return;
5498 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5499 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5500 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5501 	}
5502 	num_sectors = (int)get_sdebug_capacity();
5503 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5504 			   / sdebug_num_parts;
5505 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5506 	starts[0] = sdebug_sectors_per;
5507 	max_part_secs = sectors_per_part;
5508 	for (k = 1; k < sdebug_num_parts; ++k) {
5509 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5510 			    * heads_by_sects;
5511 		if (starts[k] - starts[k - 1] < max_part_secs)
5512 			max_part_secs = starts[k] - starts[k - 1];
5513 	}
5514 	starts[sdebug_num_parts] = num_sectors;
5515 	starts[sdebug_num_parts + 1] = 0;
5516 
5517 	ramp[510] = 0x55;	/* magic partition markings */
5518 	ramp[511] = 0xAA;
5519 	pp = (struct msdos_partition *)(ramp + 0x1be);
5520 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5521 		start_sec = starts[k];
5522 		end_sec = starts[k] + max_part_secs - 1;
5523 		pp->boot_ind = 0;
5524 
5525 		pp->cyl = start_sec / heads_by_sects;
5526 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5527 			   / sdebug_sectors_per;
5528 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5529 
5530 		pp->end_cyl = end_sec / heads_by_sects;
5531 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5532 			       / sdebug_sectors_per;
5533 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5534 
5535 		pp->start_sect = cpu_to_le32(start_sec);
5536 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5537 		pp->sys_ind = 0x83;	/* plain Linux partition */
5538 	}
5539 }
5540 
block_unblock_all_queues(bool block)5541 static void block_unblock_all_queues(bool block)
5542 {
5543 	int j;
5544 	struct sdebug_queue *sqp;
5545 
5546 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5547 		atomic_set(&sqp->blocked, (int)block);
5548 }
5549 
5550 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5551  * commands will be processed normally before triggers occur.
5552  */
tweak_cmnd_count(void)5553 static void tweak_cmnd_count(void)
5554 {
5555 	int count, modulo;
5556 
5557 	modulo = abs(sdebug_every_nth);
5558 	if (modulo < 2)
5559 		return;
5560 	block_unblock_all_queues(true);
5561 	count = atomic_read(&sdebug_cmnd_count);
5562 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5563 	block_unblock_all_queues(false);
5564 }
5565 
clear_queue_stats(void)5566 static void clear_queue_stats(void)
5567 {
5568 	atomic_set(&sdebug_cmnd_count, 0);
5569 	atomic_set(&sdebug_completions, 0);
5570 	atomic_set(&sdebug_miss_cpus, 0);
5571 	atomic_set(&sdebug_a_tsf, 0);
5572 }
5573 
inject_on_this_cmd(void)5574 static bool inject_on_this_cmd(void)
5575 {
5576 	if (sdebug_every_nth == 0)
5577 		return false;
5578 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5579 }
5580 
5581 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5582 
5583 /* Complete the processing of the thread that queued a SCSI command to this
5584  * driver. It either completes the command by calling cmnd_done() or
5585  * schedules a hr timer or work queue then returns 0. Returns
5586  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5587  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)5588 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5589 			 int scsi_result,
5590 			 int (*pfp)(struct scsi_cmnd *,
5591 				    struct sdebug_dev_info *),
5592 			 int delta_jiff, int ndelay)
5593 {
5594 	bool new_sd_dp;
5595 	bool inject = false;
5596 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5597 	int k, num_in_q, qdepth;
5598 	unsigned long iflags;
5599 	u64 ns_from_boot = 0;
5600 	struct sdebug_queue *sqp;
5601 	struct sdebug_queued_cmd *sqcp;
5602 	struct scsi_device *sdp;
5603 	struct sdebug_defer *sd_dp;
5604 
5605 	if (unlikely(devip == NULL)) {
5606 		if (scsi_result == 0)
5607 			scsi_result = DID_NO_CONNECT << 16;
5608 		goto respond_in_thread;
5609 	}
5610 	sdp = cmnd->device;
5611 
5612 	if (delta_jiff == 0)
5613 		goto respond_in_thread;
5614 
5615 	sqp = get_queue(cmnd);
5616 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5617 	if (unlikely(atomic_read(&sqp->blocked))) {
5618 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5619 		return SCSI_MLQUEUE_HOST_BUSY;
5620 	}
5621 	num_in_q = atomic_read(&devip->num_in_q);
5622 	qdepth = cmnd->device->queue_depth;
5623 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5624 		if (scsi_result) {
5625 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5626 			goto respond_in_thread;
5627 		} else
5628 			scsi_result = device_qfull_result;
5629 	} else if (unlikely(sdebug_every_nth &&
5630 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5631 			    (scsi_result == 0))) {
5632 		if ((num_in_q == (qdepth - 1)) &&
5633 		    (atomic_inc_return(&sdebug_a_tsf) >=
5634 		     abs(sdebug_every_nth))) {
5635 			atomic_set(&sdebug_a_tsf, 0);
5636 			inject = true;
5637 			scsi_result = device_qfull_result;
5638 		}
5639 	}
5640 
5641 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5642 	if (unlikely(k >= sdebug_max_queue)) {
5643 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5644 		if (scsi_result)
5645 			goto respond_in_thread;
5646 		scsi_result = device_qfull_result;
5647 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5648 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5649 				    __func__, sdebug_max_queue);
5650 		goto respond_in_thread;
5651 	}
5652 	set_bit(k, sqp->in_use_bm);
5653 	atomic_inc(&devip->num_in_q);
5654 	sqcp = &sqp->qc_arr[k];
5655 	sqcp->a_cmnd = cmnd;
5656 	cmnd->host_scribble = (unsigned char *)sqcp;
5657 	sd_dp = sqcp->sd_dp;
5658 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5659 
5660 	if (!sd_dp) {
5661 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5662 		if (!sd_dp) {
5663 			atomic_dec(&devip->num_in_q);
5664 			clear_bit(k, sqp->in_use_bm);
5665 			return SCSI_MLQUEUE_HOST_BUSY;
5666 		}
5667 		new_sd_dp = true;
5668 	} else {
5669 		new_sd_dp = false;
5670 	}
5671 
5672 	/* Set the hostwide tag */
5673 	if (sdebug_host_max_queue)
5674 		sd_dp->hc_idx = get_tag(cmnd);
5675 
5676 	if (polled)
5677 		ns_from_boot = ktime_get_boottime_ns();
5678 
5679 	/* one of the resp_*() response functions is called here */
5680 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5681 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5682 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5683 		delta_jiff = ndelay = 0;
5684 	}
5685 	if (cmnd->result == 0 && scsi_result != 0)
5686 		cmnd->result = scsi_result;
5687 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5688 		if (atomic_read(&sdeb_inject_pending)) {
5689 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5690 			atomic_set(&sdeb_inject_pending, 0);
5691 			cmnd->result = check_condition_result;
5692 		}
5693 	}
5694 
5695 	if (unlikely(sdebug_verbose && cmnd->result))
5696 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5697 			    __func__, cmnd->result);
5698 
5699 	if (delta_jiff > 0 || ndelay > 0) {
5700 		ktime_t kt;
5701 
5702 		if (delta_jiff > 0) {
5703 			u64 ns = jiffies_to_nsecs(delta_jiff);
5704 
5705 			if (sdebug_random && ns < U32_MAX) {
5706 				ns = prandom_u32_max((u32)ns);
5707 			} else if (sdebug_random) {
5708 				ns >>= 12;	/* scale to 4 usec precision */
5709 				if (ns < U32_MAX)	/* over 4 hours max */
5710 					ns = prandom_u32_max((u32)ns);
5711 				ns <<= 12;
5712 			}
5713 			kt = ns_to_ktime(ns);
5714 		} else {	/* ndelay has a 4.2 second max */
5715 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5716 					     (u32)ndelay;
5717 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5718 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5719 
5720 				if (kt <= d) {	/* elapsed duration >= kt */
5721 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5722 					sqcp->a_cmnd = NULL;
5723 					atomic_dec(&devip->num_in_q);
5724 					clear_bit(k, sqp->in_use_bm);
5725 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5726 					if (new_sd_dp)
5727 						kfree(sd_dp);
5728 					/* call scsi_done() from this thread */
5729 					scsi_done(cmnd);
5730 					return 0;
5731 				}
5732 				/* otherwise reduce kt by elapsed time */
5733 				kt -= d;
5734 			}
5735 		}
5736 		if (polled) {
5737 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5738 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5739 			if (!sd_dp->init_poll) {
5740 				sd_dp->init_poll = true;
5741 				sqcp->sd_dp = sd_dp;
5742 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5743 				sd_dp->qc_idx = k;
5744 			}
5745 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5746 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5747 		} else {
5748 			if (!sd_dp->init_hrt) {
5749 				sd_dp->init_hrt = true;
5750 				sqcp->sd_dp = sd_dp;
5751 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5752 					     HRTIMER_MODE_REL_PINNED);
5753 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5754 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5755 				sd_dp->qc_idx = k;
5756 			}
5757 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5758 			/* schedule the invocation of scsi_done() for a later time */
5759 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5760 		}
5761 		if (sdebug_statistics)
5762 			sd_dp->issuing_cpu = raw_smp_processor_id();
5763 	} else {	/* jdelay < 0, use work queue */
5764 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5765 			     atomic_read(&sdeb_inject_pending)))
5766 			sd_dp->aborted = true;
5767 		if (polled) {
5768 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5769 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5770 			if (!sd_dp->init_poll) {
5771 				sd_dp->init_poll = true;
5772 				sqcp->sd_dp = sd_dp;
5773 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5774 				sd_dp->qc_idx = k;
5775 			}
5776 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5777 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5778 		} else {
5779 			if (!sd_dp->init_wq) {
5780 				sd_dp->init_wq = true;
5781 				sqcp->sd_dp = sd_dp;
5782 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5783 				sd_dp->qc_idx = k;
5784 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5785 			}
5786 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5787 			schedule_work(&sd_dp->ew.work);
5788 		}
5789 		if (sdebug_statistics)
5790 			sd_dp->issuing_cpu = raw_smp_processor_id();
5791 		if (unlikely(sd_dp->aborted)) {
5792 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5793 				    scsi_cmd_to_rq(cmnd)->tag);
5794 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5795 			atomic_set(&sdeb_inject_pending, 0);
5796 			sd_dp->aborted = false;
5797 		}
5798 	}
5799 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5800 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5801 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5802 	return 0;
5803 
5804 respond_in_thread:	/* call back to mid-layer using invocation thread */
5805 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5806 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5807 	if (cmnd->result == 0 && scsi_result != 0)
5808 		cmnd->result = scsi_result;
5809 	scsi_done(cmnd);
5810 	return 0;
5811 }
5812 
5813 /* Note: The following macros create attribute files in the
5814    /sys/module/scsi_debug/parameters directory. Unfortunately this
5815    driver is unaware of a change and cannot trigger auxiliary actions
5816    as it can when the corresponding attribute in the
5817    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5818  */
5819 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5820 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5821 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5822 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5823 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5824 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5825 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5826 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5827 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5828 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5829 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5830 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5831 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5832 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5833 module_param_string(inq_product, sdebug_inq_product_id,
5834 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5835 module_param_string(inq_rev, sdebug_inq_product_rev,
5836 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5837 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5838 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5839 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5840 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5841 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5842 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5843 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5844 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5845 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5846 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5847 module_param_named(max_segment_size, sdebug_max_segment_size, uint, S_IRUGO);
5848 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5849 		   S_IRUGO | S_IWUSR);
5850 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5851 		   S_IRUGO | S_IWUSR);
5852 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5853 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5854 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5855 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5856 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5857 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5858 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5859 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5860 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5861 module_param_named(per_host_store, sdebug_per_host_store, bool,
5862 		   S_IRUGO | S_IWUSR);
5863 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5864 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5865 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5866 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5867 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5868 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5869 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5870 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5871 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5872 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5873 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5874 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5875 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5876 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5877 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5878 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5879 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5880 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5881 		   S_IRUGO | S_IWUSR);
5882 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5883 module_param_named(write_same_length, sdebug_write_same_length, int,
5884 		   S_IRUGO | S_IWUSR);
5885 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5886 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5887 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5888 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5889 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5890 
5891 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5892 MODULE_DESCRIPTION("SCSI debug adapter driver");
5893 MODULE_LICENSE("GPL");
5894 MODULE_VERSION(SDEBUG_VERSION);
5895 
5896 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5897 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5898 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5899 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5900 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5901 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5902 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5903 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5904 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5905 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5906 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5907 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5908 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5909 MODULE_PARM_DESC(host_max_queue,
5910 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5911 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5912 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5913 		 SDEBUG_VERSION "\")");
5914 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5915 MODULE_PARM_DESC(lbprz,
5916 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5917 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5918 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5919 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5920 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5921 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5922 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5923 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5924 MODULE_PARM_DESC(max_segment_size, "max bytes in a single segment");
5925 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5926 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5927 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5928 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5929 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5930 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5931 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5932 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5933 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5934 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5935 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5936 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5937 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5938 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5939 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5940 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5941 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5942 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5943 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5944 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5945 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5946 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5947 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5948 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5949 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5950 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5951 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5952 MODULE_PARM_DESC(uuid_ctl,
5953 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5954 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5955 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5956 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5957 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5958 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5959 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5960 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5961 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5962 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5963 
5964 #define SDEBUG_INFO_LEN 256
5965 static char sdebug_info[SDEBUG_INFO_LEN];
5966 
scsi_debug_info(struct Scsi_Host * shp)5967 static const char *scsi_debug_info(struct Scsi_Host *shp)
5968 {
5969 	int k;
5970 
5971 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5972 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5973 	if (k >= (SDEBUG_INFO_LEN - 1))
5974 		return sdebug_info;
5975 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5976 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5977 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5978 		  "statistics", (int)sdebug_statistics);
5979 	return sdebug_info;
5980 }
5981 
5982 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)5983 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5984 				 int length)
5985 {
5986 	char arr[16];
5987 	int opts;
5988 	int minLen = length > 15 ? 15 : length;
5989 
5990 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5991 		return -EACCES;
5992 	memcpy(arr, buffer, minLen);
5993 	arr[minLen] = '\0';
5994 	if (1 != sscanf(arr, "%d", &opts))
5995 		return -EINVAL;
5996 	sdebug_opts = opts;
5997 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5998 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5999 	if (sdebug_every_nth != 0)
6000 		tweak_cmnd_count();
6001 	return length;
6002 }
6003 
6004 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6005  * same for each scsi_debug host (if more than one). Some of the counters
6006  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)6007 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6008 {
6009 	int f, j, l;
6010 	struct sdebug_queue *sqp;
6011 	struct sdebug_host_info *sdhp;
6012 
6013 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6014 		   SDEBUG_VERSION, sdebug_version_date);
6015 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6016 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6017 		   sdebug_opts, sdebug_every_nth);
6018 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6019 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6020 		   sdebug_sector_size, "bytes");
6021 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6022 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6023 		   num_aborts);
6024 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6025 		   num_dev_resets, num_target_resets, num_bus_resets,
6026 		   num_host_resets);
6027 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6028 		   dix_reads, dix_writes, dif_errors);
6029 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6030 		   sdebug_statistics);
6031 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6032 		   atomic_read(&sdebug_cmnd_count),
6033 		   atomic_read(&sdebug_completions),
6034 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6035 		   atomic_read(&sdebug_a_tsf),
6036 		   atomic_read(&sdeb_mq_poll_count));
6037 
6038 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6039 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6040 		seq_printf(m, "  queue %d:\n", j);
6041 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6042 		if (f != sdebug_max_queue) {
6043 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6044 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6045 				   "first,last bits", f, l);
6046 		}
6047 	}
6048 
6049 	seq_printf(m, "this host_no=%d\n", host->host_no);
6050 	if (!xa_empty(per_store_ap)) {
6051 		bool niu;
6052 		int idx;
6053 		unsigned long l_idx;
6054 		struct sdeb_store_info *sip;
6055 
6056 		seq_puts(m, "\nhost list:\n");
6057 		j = 0;
6058 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6059 			idx = sdhp->si_idx;
6060 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6061 				   sdhp->shost->host_no, idx);
6062 			++j;
6063 		}
6064 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6065 			   sdeb_most_recent_idx);
6066 		j = 0;
6067 		xa_for_each(per_store_ap, l_idx, sip) {
6068 			niu = xa_get_mark(per_store_ap, l_idx,
6069 					  SDEB_XA_NOT_IN_USE);
6070 			idx = (int)l_idx;
6071 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6072 				   (niu ? "  not_in_use" : ""));
6073 			++j;
6074 		}
6075 	}
6076 	return 0;
6077 }
6078 
delay_show(struct device_driver * ddp,char * buf)6079 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6080 {
6081 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6082 }
6083 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6084  * of delay is jiffies.
6085  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6086 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6087 			   size_t count)
6088 {
6089 	int jdelay, res;
6090 
6091 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6092 		res = count;
6093 		if (sdebug_jdelay != jdelay) {
6094 			int j, k;
6095 			struct sdebug_queue *sqp;
6096 
6097 			block_unblock_all_queues(true);
6098 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6099 			     ++j, ++sqp) {
6100 				k = find_first_bit(sqp->in_use_bm,
6101 						   sdebug_max_queue);
6102 				if (k != sdebug_max_queue) {
6103 					res = -EBUSY;   /* queued commands */
6104 					break;
6105 				}
6106 			}
6107 			if (res > 0) {
6108 				sdebug_jdelay = jdelay;
6109 				sdebug_ndelay = 0;
6110 			}
6111 			block_unblock_all_queues(false);
6112 		}
6113 		return res;
6114 	}
6115 	return -EINVAL;
6116 }
6117 static DRIVER_ATTR_RW(delay);
6118 
ndelay_show(struct device_driver * ddp,char * buf)6119 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6120 {
6121 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6122 }
6123 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6124 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6125 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6126 			    size_t count)
6127 {
6128 	int ndelay, res;
6129 
6130 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6131 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6132 		res = count;
6133 		if (sdebug_ndelay != ndelay) {
6134 			int j, k;
6135 			struct sdebug_queue *sqp;
6136 
6137 			block_unblock_all_queues(true);
6138 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6139 			     ++j, ++sqp) {
6140 				k = find_first_bit(sqp->in_use_bm,
6141 						   sdebug_max_queue);
6142 				if (k != sdebug_max_queue) {
6143 					res = -EBUSY;   /* queued commands */
6144 					break;
6145 				}
6146 			}
6147 			if (res > 0) {
6148 				sdebug_ndelay = ndelay;
6149 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6150 							: DEF_JDELAY;
6151 			}
6152 			block_unblock_all_queues(false);
6153 		}
6154 		return res;
6155 	}
6156 	return -EINVAL;
6157 }
6158 static DRIVER_ATTR_RW(ndelay);
6159 
opts_show(struct device_driver * ddp,char * buf)6160 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6161 {
6162 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6163 }
6164 
opts_store(struct device_driver * ddp,const char * buf,size_t count)6165 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6166 			  size_t count)
6167 {
6168 	int opts;
6169 	char work[20];
6170 
6171 	if (sscanf(buf, "%10s", work) == 1) {
6172 		if (strncasecmp(work, "0x", 2) == 0) {
6173 			if (kstrtoint(work + 2, 16, &opts) == 0)
6174 				goto opts_done;
6175 		} else {
6176 			if (kstrtoint(work, 10, &opts) == 0)
6177 				goto opts_done;
6178 		}
6179 	}
6180 	return -EINVAL;
6181 opts_done:
6182 	sdebug_opts = opts;
6183 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6184 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6185 	tweak_cmnd_count();
6186 	return count;
6187 }
6188 static DRIVER_ATTR_RW(opts);
6189 
ptype_show(struct device_driver * ddp,char * buf)6190 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6191 {
6192 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6193 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)6194 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6195 			   size_t count)
6196 {
6197 	int n;
6198 
6199 	/* Cannot change from or to TYPE_ZBC with sysfs */
6200 	if (sdebug_ptype == TYPE_ZBC)
6201 		return -EINVAL;
6202 
6203 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6204 		if (n == TYPE_ZBC)
6205 			return -EINVAL;
6206 		sdebug_ptype = n;
6207 		return count;
6208 	}
6209 	return -EINVAL;
6210 }
6211 static DRIVER_ATTR_RW(ptype);
6212 
dsense_show(struct device_driver * ddp,char * buf)6213 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6214 {
6215 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6216 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)6217 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6218 			    size_t count)
6219 {
6220 	int n;
6221 
6222 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6223 		sdebug_dsense = n;
6224 		return count;
6225 	}
6226 	return -EINVAL;
6227 }
6228 static DRIVER_ATTR_RW(dsense);
6229 
fake_rw_show(struct device_driver * ddp,char * buf)6230 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6231 {
6232 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6233 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)6234 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6235 			     size_t count)
6236 {
6237 	int n, idx;
6238 
6239 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6240 		bool want_store = (n == 0);
6241 		struct sdebug_host_info *sdhp;
6242 
6243 		n = (n > 0);
6244 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6245 		if (sdebug_fake_rw == n)
6246 			return count;	/* not transitioning so do nothing */
6247 
6248 		if (want_store) {	/* 1 --> 0 transition, set up store */
6249 			if (sdeb_first_idx < 0) {
6250 				idx = sdebug_add_store();
6251 				if (idx < 0)
6252 					return idx;
6253 			} else {
6254 				idx = sdeb_first_idx;
6255 				xa_clear_mark(per_store_ap, idx,
6256 					      SDEB_XA_NOT_IN_USE);
6257 			}
6258 			/* make all hosts use same store */
6259 			list_for_each_entry(sdhp, &sdebug_host_list,
6260 					    host_list) {
6261 				if (sdhp->si_idx != idx) {
6262 					xa_set_mark(per_store_ap, sdhp->si_idx,
6263 						    SDEB_XA_NOT_IN_USE);
6264 					sdhp->si_idx = idx;
6265 				}
6266 			}
6267 			sdeb_most_recent_idx = idx;
6268 		} else {	/* 0 --> 1 transition is trigger for shrink */
6269 			sdebug_erase_all_stores(true /* apart from first */);
6270 		}
6271 		sdebug_fake_rw = n;
6272 		return count;
6273 	}
6274 	return -EINVAL;
6275 }
6276 static DRIVER_ATTR_RW(fake_rw);
6277 
no_lun_0_show(struct device_driver * ddp,char * buf)6278 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6279 {
6280 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6281 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)6282 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6283 			      size_t count)
6284 {
6285 	int n;
6286 
6287 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6288 		sdebug_no_lun_0 = n;
6289 		return count;
6290 	}
6291 	return -EINVAL;
6292 }
6293 static DRIVER_ATTR_RW(no_lun_0);
6294 
num_tgts_show(struct device_driver * ddp,char * buf)6295 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6296 {
6297 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6298 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)6299 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6300 			      size_t count)
6301 {
6302 	int n;
6303 
6304 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6305 		sdebug_num_tgts = n;
6306 		sdebug_max_tgts_luns();
6307 		return count;
6308 	}
6309 	return -EINVAL;
6310 }
6311 static DRIVER_ATTR_RW(num_tgts);
6312 
dev_size_mb_show(struct device_driver * ddp,char * buf)6313 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6314 {
6315 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6316 }
6317 static DRIVER_ATTR_RO(dev_size_mb);
6318 
per_host_store_show(struct device_driver * ddp,char * buf)6319 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6320 {
6321 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6322 }
6323 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)6324 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6325 				    size_t count)
6326 {
6327 	bool v;
6328 
6329 	if (kstrtobool(buf, &v))
6330 		return -EINVAL;
6331 
6332 	sdebug_per_host_store = v;
6333 	return count;
6334 }
6335 static DRIVER_ATTR_RW(per_host_store);
6336 
num_parts_show(struct device_driver * ddp,char * buf)6337 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6338 {
6339 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6340 }
6341 static DRIVER_ATTR_RO(num_parts);
6342 
every_nth_show(struct device_driver * ddp,char * buf)6343 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6344 {
6345 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6346 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)6347 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6348 			       size_t count)
6349 {
6350 	int nth;
6351 	char work[20];
6352 
6353 	if (sscanf(buf, "%10s", work) == 1) {
6354 		if (strncasecmp(work, "0x", 2) == 0) {
6355 			if (kstrtoint(work + 2, 16, &nth) == 0)
6356 				goto every_nth_done;
6357 		} else {
6358 			if (kstrtoint(work, 10, &nth) == 0)
6359 				goto every_nth_done;
6360 		}
6361 	}
6362 	return -EINVAL;
6363 
6364 every_nth_done:
6365 	sdebug_every_nth = nth;
6366 	if (nth && !sdebug_statistics) {
6367 		pr_info("every_nth needs statistics=1, set it\n");
6368 		sdebug_statistics = true;
6369 	}
6370 	tweak_cmnd_count();
6371 	return count;
6372 }
6373 static DRIVER_ATTR_RW(every_nth);
6374 
lun_format_show(struct device_driver * ddp,char * buf)6375 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6376 {
6377 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6378 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)6379 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6380 				size_t count)
6381 {
6382 	int n;
6383 	bool changed;
6384 
6385 	if (kstrtoint(buf, 0, &n))
6386 		return -EINVAL;
6387 	if (n >= 0) {
6388 		if (n > (int)SAM_LUN_AM_FLAT) {
6389 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6390 			return -EINVAL;
6391 		}
6392 		changed = ((int)sdebug_lun_am != n);
6393 		sdebug_lun_am = n;
6394 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6395 			struct sdebug_host_info *sdhp;
6396 			struct sdebug_dev_info *dp;
6397 
6398 			spin_lock(&sdebug_host_list_lock);
6399 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6400 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6401 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6402 				}
6403 			}
6404 			spin_unlock(&sdebug_host_list_lock);
6405 		}
6406 		return count;
6407 	}
6408 	return -EINVAL;
6409 }
6410 static DRIVER_ATTR_RW(lun_format);
6411 
max_luns_show(struct device_driver * ddp,char * buf)6412 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6413 {
6414 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6415 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)6416 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6417 			      size_t count)
6418 {
6419 	int n;
6420 	bool changed;
6421 
6422 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6423 		if (n > 256) {
6424 			pr_warn("max_luns can be no more than 256\n");
6425 			return -EINVAL;
6426 		}
6427 		changed = (sdebug_max_luns != n);
6428 		sdebug_max_luns = n;
6429 		sdebug_max_tgts_luns();
6430 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6431 			struct sdebug_host_info *sdhp;
6432 			struct sdebug_dev_info *dp;
6433 
6434 			spin_lock(&sdebug_host_list_lock);
6435 			list_for_each_entry(sdhp, &sdebug_host_list,
6436 					    host_list) {
6437 				list_for_each_entry(dp, &sdhp->dev_info_list,
6438 						    dev_list) {
6439 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6440 						dp->uas_bm);
6441 				}
6442 			}
6443 			spin_unlock(&sdebug_host_list_lock);
6444 		}
6445 		return count;
6446 	}
6447 	return -EINVAL;
6448 }
6449 static DRIVER_ATTR_RW(max_luns);
6450 
max_queue_show(struct device_driver * ddp,char * buf)6451 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6452 {
6453 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6454 }
6455 /* N.B. max_queue can be changed while there are queued commands. In flight
6456  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)6457 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6458 			       size_t count)
6459 {
6460 	int j, n, k, a;
6461 	struct sdebug_queue *sqp;
6462 
6463 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6464 	    (n <= SDEBUG_CANQUEUE) &&
6465 	    (sdebug_host_max_queue == 0)) {
6466 		block_unblock_all_queues(true);
6467 		k = 0;
6468 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6469 		     ++j, ++sqp) {
6470 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6471 			if (a > k)
6472 				k = a;
6473 		}
6474 		sdebug_max_queue = n;
6475 		if (k == SDEBUG_CANQUEUE)
6476 			atomic_set(&retired_max_queue, 0);
6477 		else if (k >= n)
6478 			atomic_set(&retired_max_queue, k + 1);
6479 		else
6480 			atomic_set(&retired_max_queue, 0);
6481 		block_unblock_all_queues(false);
6482 		return count;
6483 	}
6484 	return -EINVAL;
6485 }
6486 static DRIVER_ATTR_RW(max_queue);
6487 
host_max_queue_show(struct device_driver * ddp,char * buf)6488 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6489 {
6490 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6491 }
6492 
no_rwlock_show(struct device_driver * ddp,char * buf)6493 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6494 {
6495 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6496 }
6497 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)6498 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6499 {
6500 	bool v;
6501 
6502 	if (kstrtobool(buf, &v))
6503 		return -EINVAL;
6504 
6505 	sdebug_no_rwlock = v;
6506 	return count;
6507 }
6508 static DRIVER_ATTR_RW(no_rwlock);
6509 
6510 /*
6511  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6512  * in range [0, sdebug_host_max_queue), we can't change it.
6513  */
6514 static DRIVER_ATTR_RO(host_max_queue);
6515 
no_uld_show(struct device_driver * ddp,char * buf)6516 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6517 {
6518 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6519 }
6520 static DRIVER_ATTR_RO(no_uld);
6521 
scsi_level_show(struct device_driver * ddp,char * buf)6522 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6523 {
6524 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6525 }
6526 static DRIVER_ATTR_RO(scsi_level);
6527 
virtual_gb_show(struct device_driver * ddp,char * buf)6528 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6529 {
6530 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6531 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)6532 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6533 				size_t count)
6534 {
6535 	int n;
6536 	bool changed;
6537 
6538 	/* Ignore capacity change for ZBC drives for now */
6539 	if (sdeb_zbc_in_use)
6540 		return -ENOTSUPP;
6541 
6542 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6543 		changed = (sdebug_virtual_gb != n);
6544 		sdebug_virtual_gb = n;
6545 		sdebug_capacity = get_sdebug_capacity();
6546 		if (changed) {
6547 			struct sdebug_host_info *sdhp;
6548 			struct sdebug_dev_info *dp;
6549 
6550 			spin_lock(&sdebug_host_list_lock);
6551 			list_for_each_entry(sdhp, &sdebug_host_list,
6552 					    host_list) {
6553 				list_for_each_entry(dp, &sdhp->dev_info_list,
6554 						    dev_list) {
6555 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6556 						dp->uas_bm);
6557 				}
6558 			}
6559 			spin_unlock(&sdebug_host_list_lock);
6560 		}
6561 		return count;
6562 	}
6563 	return -EINVAL;
6564 }
6565 static DRIVER_ATTR_RW(virtual_gb);
6566 
add_host_show(struct device_driver * ddp,char * buf)6567 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6568 {
6569 	/* absolute number of hosts currently active is what is shown */
6570 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6571 }
6572 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)6573 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6574 			      size_t count)
6575 {
6576 	bool found;
6577 	unsigned long idx;
6578 	struct sdeb_store_info *sip;
6579 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6580 	int delta_hosts;
6581 
6582 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6583 		return -EINVAL;
6584 	if (delta_hosts > 0) {
6585 		do {
6586 			found = false;
6587 			if (want_phs) {
6588 				xa_for_each_marked(per_store_ap, idx, sip,
6589 						   SDEB_XA_NOT_IN_USE) {
6590 					sdeb_most_recent_idx = (int)idx;
6591 					found = true;
6592 					break;
6593 				}
6594 				if (found)	/* re-use case */
6595 					sdebug_add_host_helper((int)idx);
6596 				else
6597 					sdebug_do_add_host(true);
6598 			} else {
6599 				sdebug_do_add_host(false);
6600 			}
6601 		} while (--delta_hosts);
6602 	} else if (delta_hosts < 0) {
6603 		do {
6604 			sdebug_do_remove_host(false);
6605 		} while (++delta_hosts);
6606 	}
6607 	return count;
6608 }
6609 static DRIVER_ATTR_RW(add_host);
6610 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)6611 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6612 {
6613 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6614 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)6615 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6616 				    size_t count)
6617 {
6618 	int n;
6619 
6620 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6621 		sdebug_vpd_use_hostno = n;
6622 		return count;
6623 	}
6624 	return -EINVAL;
6625 }
6626 static DRIVER_ATTR_RW(vpd_use_hostno);
6627 
statistics_show(struct device_driver * ddp,char * buf)6628 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6629 {
6630 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6631 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)6632 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6633 				size_t count)
6634 {
6635 	int n;
6636 
6637 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6638 		if (n > 0)
6639 			sdebug_statistics = true;
6640 		else {
6641 			clear_queue_stats();
6642 			sdebug_statistics = false;
6643 		}
6644 		return count;
6645 	}
6646 	return -EINVAL;
6647 }
6648 static DRIVER_ATTR_RW(statistics);
6649 
sector_size_show(struct device_driver * ddp,char * buf)6650 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6651 {
6652 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6653 }
6654 static DRIVER_ATTR_RO(sector_size);
6655 
submit_queues_show(struct device_driver * ddp,char * buf)6656 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6657 {
6658 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6659 }
6660 static DRIVER_ATTR_RO(submit_queues);
6661 
dix_show(struct device_driver * ddp,char * buf)6662 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6663 {
6664 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6665 }
6666 static DRIVER_ATTR_RO(dix);
6667 
dif_show(struct device_driver * ddp,char * buf)6668 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6669 {
6670 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6671 }
6672 static DRIVER_ATTR_RO(dif);
6673 
guard_show(struct device_driver * ddp,char * buf)6674 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6675 {
6676 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6677 }
6678 static DRIVER_ATTR_RO(guard);
6679 
ato_show(struct device_driver * ddp,char * buf)6680 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6681 {
6682 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6683 }
6684 static DRIVER_ATTR_RO(ato);
6685 
map_show(struct device_driver * ddp,char * buf)6686 static ssize_t map_show(struct device_driver *ddp, char *buf)
6687 {
6688 	ssize_t count = 0;
6689 
6690 	if (!scsi_debug_lbp())
6691 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6692 				 sdebug_store_sectors);
6693 
6694 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6695 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6696 
6697 		if (sip)
6698 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6699 					  (int)map_size, sip->map_storep);
6700 	}
6701 	buf[count++] = '\n';
6702 	buf[count] = '\0';
6703 
6704 	return count;
6705 }
6706 static DRIVER_ATTR_RO(map);
6707 
random_show(struct device_driver * ddp,char * buf)6708 static ssize_t random_show(struct device_driver *ddp, char *buf)
6709 {
6710 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6711 }
6712 
random_store(struct device_driver * ddp,const char * buf,size_t count)6713 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6714 			    size_t count)
6715 {
6716 	bool v;
6717 
6718 	if (kstrtobool(buf, &v))
6719 		return -EINVAL;
6720 
6721 	sdebug_random = v;
6722 	return count;
6723 }
6724 static DRIVER_ATTR_RW(random);
6725 
removable_show(struct device_driver * ddp,char * buf)6726 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6727 {
6728 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6729 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)6730 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6731 			       size_t count)
6732 {
6733 	int n;
6734 
6735 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6736 		sdebug_removable = (n > 0);
6737 		return count;
6738 	}
6739 	return -EINVAL;
6740 }
6741 static DRIVER_ATTR_RW(removable);
6742 
host_lock_show(struct device_driver * ddp,char * buf)6743 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6744 {
6745 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6746 }
6747 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)6748 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6749 			       size_t count)
6750 {
6751 	int n;
6752 
6753 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6754 		sdebug_host_lock = (n > 0);
6755 		return count;
6756 	}
6757 	return -EINVAL;
6758 }
6759 static DRIVER_ATTR_RW(host_lock);
6760 
strict_show(struct device_driver * ddp,char * buf)6761 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6762 {
6763 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6764 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)6765 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6766 			    size_t count)
6767 {
6768 	int n;
6769 
6770 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6771 		sdebug_strict = (n > 0);
6772 		return count;
6773 	}
6774 	return -EINVAL;
6775 }
6776 static DRIVER_ATTR_RW(strict);
6777 
uuid_ctl_show(struct device_driver * ddp,char * buf)6778 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6779 {
6780 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6781 }
6782 static DRIVER_ATTR_RO(uuid_ctl);
6783 
cdb_len_show(struct device_driver * ddp,char * buf)6784 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6785 {
6786 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6787 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)6788 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6789 			     size_t count)
6790 {
6791 	int ret, n;
6792 
6793 	ret = kstrtoint(buf, 0, &n);
6794 	if (ret)
6795 		return ret;
6796 	sdebug_cdb_len = n;
6797 	all_config_cdb_len();
6798 	return count;
6799 }
6800 static DRIVER_ATTR_RW(cdb_len);
6801 
6802 static const char * const zbc_model_strs_a[] = {
6803 	[BLK_ZONED_NONE] = "none",
6804 	[BLK_ZONED_HA]   = "host-aware",
6805 	[BLK_ZONED_HM]   = "host-managed",
6806 };
6807 
6808 static const char * const zbc_model_strs_b[] = {
6809 	[BLK_ZONED_NONE] = "no",
6810 	[BLK_ZONED_HA]   = "aware",
6811 	[BLK_ZONED_HM]   = "managed",
6812 };
6813 
6814 static const char * const zbc_model_strs_c[] = {
6815 	[BLK_ZONED_NONE] = "0",
6816 	[BLK_ZONED_HA]   = "1",
6817 	[BLK_ZONED_HM]   = "2",
6818 };
6819 
sdeb_zbc_model_str(const char * cp)6820 static int sdeb_zbc_model_str(const char *cp)
6821 {
6822 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6823 
6824 	if (res < 0) {
6825 		res = sysfs_match_string(zbc_model_strs_b, cp);
6826 		if (res < 0) {
6827 			res = sysfs_match_string(zbc_model_strs_c, cp);
6828 			if (res < 0)
6829 				return -EINVAL;
6830 		}
6831 	}
6832 	return res;
6833 }
6834 
zbc_show(struct device_driver * ddp,char * buf)6835 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6836 {
6837 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6838 			 zbc_model_strs_a[sdeb_zbc_model]);
6839 }
6840 static DRIVER_ATTR_RO(zbc);
6841 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)6842 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6843 {
6844 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6845 }
6846 static DRIVER_ATTR_RO(tur_ms_to_ready);
6847 
6848 /* Note: The following array creates attribute files in the
6849    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6850    files (over those found in the /sys/module/scsi_debug/parameters
6851    directory) is that auxiliary actions can be triggered when an attribute
6852    is changed. For example see: add_host_store() above.
6853  */
6854 
6855 static struct attribute *sdebug_drv_attrs[] = {
6856 	&driver_attr_delay.attr,
6857 	&driver_attr_opts.attr,
6858 	&driver_attr_ptype.attr,
6859 	&driver_attr_dsense.attr,
6860 	&driver_attr_fake_rw.attr,
6861 	&driver_attr_host_max_queue.attr,
6862 	&driver_attr_no_lun_0.attr,
6863 	&driver_attr_num_tgts.attr,
6864 	&driver_attr_dev_size_mb.attr,
6865 	&driver_attr_num_parts.attr,
6866 	&driver_attr_every_nth.attr,
6867 	&driver_attr_lun_format.attr,
6868 	&driver_attr_max_luns.attr,
6869 	&driver_attr_max_queue.attr,
6870 	&driver_attr_no_rwlock.attr,
6871 	&driver_attr_no_uld.attr,
6872 	&driver_attr_scsi_level.attr,
6873 	&driver_attr_virtual_gb.attr,
6874 	&driver_attr_add_host.attr,
6875 	&driver_attr_per_host_store.attr,
6876 	&driver_attr_vpd_use_hostno.attr,
6877 	&driver_attr_sector_size.attr,
6878 	&driver_attr_statistics.attr,
6879 	&driver_attr_submit_queues.attr,
6880 	&driver_attr_dix.attr,
6881 	&driver_attr_dif.attr,
6882 	&driver_attr_guard.attr,
6883 	&driver_attr_ato.attr,
6884 	&driver_attr_map.attr,
6885 	&driver_attr_random.attr,
6886 	&driver_attr_removable.attr,
6887 	&driver_attr_host_lock.attr,
6888 	&driver_attr_ndelay.attr,
6889 	&driver_attr_strict.attr,
6890 	&driver_attr_uuid_ctl.attr,
6891 	&driver_attr_cdb_len.attr,
6892 	&driver_attr_tur_ms_to_ready.attr,
6893 	&driver_attr_zbc.attr,
6894 	NULL,
6895 };
6896 ATTRIBUTE_GROUPS(sdebug_drv);
6897 
6898 static struct device *pseudo_primary;
6899 
scsi_debug_init(void)6900 static int __init scsi_debug_init(void)
6901 {
6902 	bool want_store = (sdebug_fake_rw == 0);
6903 	unsigned long sz;
6904 	int k, ret, hosts_to_add;
6905 	int idx = -1;
6906 
6907 	ramdisk_lck_a[0] = &atomic_rw;
6908 	ramdisk_lck_a[1] = &atomic_rw2;
6909 	atomic_set(&retired_max_queue, 0);
6910 
6911 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6912 		pr_warn("ndelay must be less than 1 second, ignored\n");
6913 		sdebug_ndelay = 0;
6914 	} else if (sdebug_ndelay > 0)
6915 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6916 
6917 	switch (sdebug_sector_size) {
6918 	case  512:
6919 	case 1024:
6920 	case 2048:
6921 	case 4096:
6922 		break;
6923 	default:
6924 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6925 		return -EINVAL;
6926 	}
6927 
6928 	switch (sdebug_dif) {
6929 	case T10_PI_TYPE0_PROTECTION:
6930 		break;
6931 	case T10_PI_TYPE1_PROTECTION:
6932 	case T10_PI_TYPE2_PROTECTION:
6933 	case T10_PI_TYPE3_PROTECTION:
6934 		have_dif_prot = true;
6935 		break;
6936 
6937 	default:
6938 		pr_err("dif must be 0, 1, 2 or 3\n");
6939 		return -EINVAL;
6940 	}
6941 
6942 	if (sdebug_num_tgts < 0) {
6943 		pr_err("num_tgts must be >= 0\n");
6944 		return -EINVAL;
6945 	}
6946 
6947 	if (sdebug_guard > 1) {
6948 		pr_err("guard must be 0 or 1\n");
6949 		return -EINVAL;
6950 	}
6951 
6952 	if (sdebug_ato > 1) {
6953 		pr_err("ato must be 0 or 1\n");
6954 		return -EINVAL;
6955 	}
6956 
6957 	if (sdebug_physblk_exp > 15) {
6958 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6959 		return -EINVAL;
6960 	}
6961 
6962 	sdebug_lun_am = sdebug_lun_am_i;
6963 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6964 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6965 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6966 	}
6967 
6968 	if (sdebug_max_luns > 256) {
6969 		if (sdebug_max_luns > 16384) {
6970 			pr_warn("max_luns can be no more than 16384, use default\n");
6971 			sdebug_max_luns = DEF_MAX_LUNS;
6972 		}
6973 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6974 	}
6975 
6976 	if (sdebug_lowest_aligned > 0x3fff) {
6977 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6978 		return -EINVAL;
6979 	}
6980 
6981 	if (submit_queues < 1) {
6982 		pr_err("submit_queues must be 1 or more\n");
6983 		return -EINVAL;
6984 	}
6985 
6986 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6987 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6988 		return -EINVAL;
6989 	}
6990 
6991 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6992 	    (sdebug_host_max_queue < 0)) {
6993 		pr_err("host_max_queue must be in range [0 %d]\n",
6994 		       SDEBUG_CANQUEUE);
6995 		return -EINVAL;
6996 	}
6997 
6998 	if (sdebug_host_max_queue &&
6999 	    (sdebug_max_queue != sdebug_host_max_queue)) {
7000 		sdebug_max_queue = sdebug_host_max_queue;
7001 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7002 			sdebug_max_queue);
7003 	}
7004 
7005 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
7006 			       GFP_KERNEL);
7007 	if (sdebug_q_arr == NULL)
7008 		return -ENOMEM;
7009 	for (k = 0; k < submit_queues; ++k)
7010 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
7011 
7012 	/*
7013 	 * check for host managed zoned block device specified with
7014 	 * ptype=0x14 or zbc=XXX.
7015 	 */
7016 	if (sdebug_ptype == TYPE_ZBC) {
7017 		sdeb_zbc_model = BLK_ZONED_HM;
7018 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7019 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7020 		if (k < 0) {
7021 			ret = k;
7022 			goto free_q_arr;
7023 		}
7024 		sdeb_zbc_model = k;
7025 		switch (sdeb_zbc_model) {
7026 		case BLK_ZONED_NONE:
7027 		case BLK_ZONED_HA:
7028 			sdebug_ptype = TYPE_DISK;
7029 			break;
7030 		case BLK_ZONED_HM:
7031 			sdebug_ptype = TYPE_ZBC;
7032 			break;
7033 		default:
7034 			pr_err("Invalid ZBC model\n");
7035 			ret = -EINVAL;
7036 			goto free_q_arr;
7037 		}
7038 	}
7039 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7040 		sdeb_zbc_in_use = true;
7041 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7042 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7043 	}
7044 
7045 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7046 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7047 	if (sdebug_dev_size_mb < 1)
7048 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7049 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7050 	sdebug_store_sectors = sz / sdebug_sector_size;
7051 	sdebug_capacity = get_sdebug_capacity();
7052 
7053 	/* play around with geometry, don't waste too much on track 0 */
7054 	sdebug_heads = 8;
7055 	sdebug_sectors_per = 32;
7056 	if (sdebug_dev_size_mb >= 256)
7057 		sdebug_heads = 64;
7058 	else if (sdebug_dev_size_mb >= 16)
7059 		sdebug_heads = 32;
7060 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7061 			       (sdebug_sectors_per * sdebug_heads);
7062 	if (sdebug_cylinders_per >= 1024) {
7063 		/* other LLDs do this; implies >= 1GB ram disk ... */
7064 		sdebug_heads = 255;
7065 		sdebug_sectors_per = 63;
7066 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7067 			       (sdebug_sectors_per * sdebug_heads);
7068 	}
7069 	if (scsi_debug_lbp()) {
7070 		sdebug_unmap_max_blocks =
7071 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7072 
7073 		sdebug_unmap_max_desc =
7074 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7075 
7076 		sdebug_unmap_granularity =
7077 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7078 
7079 		if (sdebug_unmap_alignment &&
7080 		    sdebug_unmap_granularity <=
7081 		    sdebug_unmap_alignment) {
7082 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7083 			ret = -EINVAL;
7084 			goto free_q_arr;
7085 		}
7086 	}
7087 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7088 	if (want_store) {
7089 		idx = sdebug_add_store();
7090 		if (idx < 0) {
7091 			ret = idx;
7092 			goto free_q_arr;
7093 		}
7094 	}
7095 
7096 	pseudo_primary = root_device_register("pseudo_0");
7097 	if (IS_ERR(pseudo_primary)) {
7098 		pr_warn("root_device_register() error\n");
7099 		ret = PTR_ERR(pseudo_primary);
7100 		goto free_vm;
7101 	}
7102 	ret = bus_register(&pseudo_lld_bus);
7103 	if (ret < 0) {
7104 		pr_warn("bus_register error: %d\n", ret);
7105 		goto dev_unreg;
7106 	}
7107 	ret = driver_register(&sdebug_driverfs_driver);
7108 	if (ret < 0) {
7109 		pr_warn("driver_register error: %d\n", ret);
7110 		goto bus_unreg;
7111 	}
7112 
7113 	hosts_to_add = sdebug_add_host;
7114 	sdebug_add_host = 0;
7115 
7116 	for (k = 0; k < hosts_to_add; k++) {
7117 		if (want_store && k == 0) {
7118 			ret = sdebug_add_host_helper(idx);
7119 			if (ret < 0) {
7120 				pr_err("add_host_helper k=%d, error=%d\n",
7121 				       k, -ret);
7122 				break;
7123 			}
7124 		} else {
7125 			ret = sdebug_do_add_host(want_store &&
7126 						 sdebug_per_host_store);
7127 			if (ret < 0) {
7128 				pr_err("add_host k=%d error=%d\n", k, -ret);
7129 				break;
7130 			}
7131 		}
7132 	}
7133 	if (sdebug_verbose)
7134 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7135 
7136 	return 0;
7137 
7138 bus_unreg:
7139 	bus_unregister(&pseudo_lld_bus);
7140 dev_unreg:
7141 	root_device_unregister(pseudo_primary);
7142 free_vm:
7143 	sdebug_erase_store(idx, NULL);
7144 free_q_arr:
7145 	kfree(sdebug_q_arr);
7146 	return ret;
7147 }
7148 
scsi_debug_exit(void)7149 static void __exit scsi_debug_exit(void)
7150 {
7151 	int k = sdebug_num_hosts;
7152 
7153 	stop_all_queued();
7154 	for (; k; k--)
7155 		sdebug_do_remove_host(true);
7156 	free_all_queued();
7157 	driver_unregister(&sdebug_driverfs_driver);
7158 	bus_unregister(&pseudo_lld_bus);
7159 	root_device_unregister(pseudo_primary);
7160 
7161 	sdebug_erase_all_stores(false);
7162 	xa_destroy(per_store_ap);
7163 	kfree(sdebug_q_arr);
7164 }
7165 
7166 device_initcall(scsi_debug_init);
7167 module_exit(scsi_debug_exit);
7168 
sdebug_release_adapter(struct device * dev)7169 static void sdebug_release_adapter(struct device *dev)
7170 {
7171 	struct sdebug_host_info *sdbg_host;
7172 
7173 	sdbg_host = to_sdebug_host(dev);
7174 	kfree(sdbg_host);
7175 }
7176 
7177 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)7178 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7179 {
7180 	if (idx < 0)
7181 		return;
7182 	if (!sip) {
7183 		if (xa_empty(per_store_ap))
7184 			return;
7185 		sip = xa_load(per_store_ap, idx);
7186 		if (!sip)
7187 			return;
7188 	}
7189 	vfree(sip->map_storep);
7190 	vfree(sip->dif_storep);
7191 	vfree(sip->storep);
7192 	xa_erase(per_store_ap, idx);
7193 	kfree(sip);
7194 }
7195 
7196 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)7197 static void sdebug_erase_all_stores(bool apart_from_first)
7198 {
7199 	unsigned long idx;
7200 	struct sdeb_store_info *sip = NULL;
7201 
7202 	xa_for_each(per_store_ap, idx, sip) {
7203 		if (apart_from_first)
7204 			apart_from_first = false;
7205 		else
7206 			sdebug_erase_store(idx, sip);
7207 	}
7208 	if (apart_from_first)
7209 		sdeb_most_recent_idx = sdeb_first_idx;
7210 }
7211 
7212 /*
7213  * Returns store xarray new element index (idx) if >=0 else negated errno.
7214  * Limit the number of stores to 65536.
7215  */
sdebug_add_store(void)7216 static int sdebug_add_store(void)
7217 {
7218 	int res;
7219 	u32 n_idx;
7220 	unsigned long iflags;
7221 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7222 	struct sdeb_store_info *sip = NULL;
7223 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7224 
7225 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7226 	if (!sip)
7227 		return -ENOMEM;
7228 
7229 	xa_lock_irqsave(per_store_ap, iflags);
7230 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7231 	if (unlikely(res < 0)) {
7232 		xa_unlock_irqrestore(per_store_ap, iflags);
7233 		kfree(sip);
7234 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7235 		return res;
7236 	}
7237 	sdeb_most_recent_idx = n_idx;
7238 	if (sdeb_first_idx < 0)
7239 		sdeb_first_idx = n_idx;
7240 	xa_unlock_irqrestore(per_store_ap, iflags);
7241 
7242 	res = -ENOMEM;
7243 	sip->storep = vzalloc(sz);
7244 	if (!sip->storep) {
7245 		pr_err("user data oom\n");
7246 		goto err;
7247 	}
7248 	if (sdebug_num_parts > 0)
7249 		sdebug_build_parts(sip->storep, sz);
7250 
7251 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7252 	if (sdebug_dix) {
7253 		int dif_size;
7254 
7255 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7256 		sip->dif_storep = vmalloc(dif_size);
7257 
7258 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7259 			sip->dif_storep);
7260 
7261 		if (!sip->dif_storep) {
7262 			pr_err("DIX oom\n");
7263 			goto err;
7264 		}
7265 		memset(sip->dif_storep, 0xff, dif_size);
7266 	}
7267 	/* Logical Block Provisioning */
7268 	if (scsi_debug_lbp()) {
7269 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7270 		sip->map_storep = vmalloc(array_size(sizeof(long),
7271 						     BITS_TO_LONGS(map_size)));
7272 
7273 		pr_info("%lu provisioning blocks\n", map_size);
7274 
7275 		if (!sip->map_storep) {
7276 			pr_err("LBP map oom\n");
7277 			goto err;
7278 		}
7279 
7280 		bitmap_zero(sip->map_storep, map_size);
7281 
7282 		/* Map first 1KB for partition table */
7283 		if (sdebug_num_parts)
7284 			map_region(sip, 0, 2);
7285 	}
7286 
7287 	rwlock_init(&sip->macc_lck);
7288 	return (int)n_idx;
7289 err:
7290 	sdebug_erase_store((int)n_idx, sip);
7291 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7292 	return res;
7293 }
7294 
sdebug_add_host_helper(int per_host_idx)7295 static int sdebug_add_host_helper(int per_host_idx)
7296 {
7297 	int k, devs_per_host, idx;
7298 	int error = -ENOMEM;
7299 	struct sdebug_host_info *sdbg_host;
7300 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7301 
7302 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7303 	if (!sdbg_host)
7304 		return -ENOMEM;
7305 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7306 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7307 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7308 	sdbg_host->si_idx = idx;
7309 
7310 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7311 
7312 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7313 	for (k = 0; k < devs_per_host; k++) {
7314 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7315 		if (!sdbg_devinfo)
7316 			goto clean;
7317 	}
7318 
7319 	spin_lock(&sdebug_host_list_lock);
7320 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7321 	spin_unlock(&sdebug_host_list_lock);
7322 
7323 	sdbg_host->dev.bus = &pseudo_lld_bus;
7324 	sdbg_host->dev.parent = pseudo_primary;
7325 	sdbg_host->dev.release = &sdebug_release_adapter;
7326 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7327 
7328 	error = device_register(&sdbg_host->dev);
7329 	if (error) {
7330 		spin_lock(&sdebug_host_list_lock);
7331 		list_del(&sdbg_host->host_list);
7332 		spin_unlock(&sdebug_host_list_lock);
7333 		goto clean;
7334 	}
7335 
7336 	++sdebug_num_hosts;
7337 	return 0;
7338 
7339 clean:
7340 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7341 				 dev_list) {
7342 		list_del(&sdbg_devinfo->dev_list);
7343 		kfree(sdbg_devinfo->zstate);
7344 		kfree(sdbg_devinfo);
7345 	}
7346 	if (sdbg_host->dev.release)
7347 		put_device(&sdbg_host->dev);
7348 	else
7349 		kfree(sdbg_host);
7350 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7351 	return error;
7352 }
7353 
sdebug_do_add_host(bool mk_new_store)7354 static int sdebug_do_add_host(bool mk_new_store)
7355 {
7356 	int ph_idx = sdeb_most_recent_idx;
7357 
7358 	if (mk_new_store) {
7359 		ph_idx = sdebug_add_store();
7360 		if (ph_idx < 0)
7361 			return ph_idx;
7362 	}
7363 	return sdebug_add_host_helper(ph_idx);
7364 }
7365 
sdebug_do_remove_host(bool the_end)7366 static void sdebug_do_remove_host(bool the_end)
7367 {
7368 	int idx = -1;
7369 	struct sdebug_host_info *sdbg_host = NULL;
7370 	struct sdebug_host_info *sdbg_host2;
7371 
7372 	spin_lock(&sdebug_host_list_lock);
7373 	if (!list_empty(&sdebug_host_list)) {
7374 		sdbg_host = list_entry(sdebug_host_list.prev,
7375 				       struct sdebug_host_info, host_list);
7376 		idx = sdbg_host->si_idx;
7377 	}
7378 	if (!the_end && idx >= 0) {
7379 		bool unique = true;
7380 
7381 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7382 			if (sdbg_host2 == sdbg_host)
7383 				continue;
7384 			if (idx == sdbg_host2->si_idx) {
7385 				unique = false;
7386 				break;
7387 			}
7388 		}
7389 		if (unique) {
7390 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7391 			if (idx == sdeb_most_recent_idx)
7392 				--sdeb_most_recent_idx;
7393 		}
7394 	}
7395 	if (sdbg_host)
7396 		list_del(&sdbg_host->host_list);
7397 	spin_unlock(&sdebug_host_list_lock);
7398 
7399 	if (!sdbg_host)
7400 		return;
7401 
7402 	device_unregister(&sdbg_host->dev);
7403 	--sdebug_num_hosts;
7404 }
7405 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)7406 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7407 {
7408 	int num_in_q = 0;
7409 	struct sdebug_dev_info *devip;
7410 
7411 	block_unblock_all_queues(true);
7412 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7413 	if (NULL == devip) {
7414 		block_unblock_all_queues(false);
7415 		return	-ENODEV;
7416 	}
7417 	num_in_q = atomic_read(&devip->num_in_q);
7418 
7419 	if (qdepth > SDEBUG_CANQUEUE) {
7420 		qdepth = SDEBUG_CANQUEUE;
7421 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7422 			qdepth, SDEBUG_CANQUEUE);
7423 	}
7424 	if (qdepth < 1)
7425 		qdepth = 1;
7426 	if (qdepth != sdev->queue_depth)
7427 		scsi_change_queue_depth(sdev, qdepth);
7428 
7429 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7430 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7431 			    __func__, qdepth, num_in_q);
7432 	}
7433 	block_unblock_all_queues(false);
7434 	return sdev->queue_depth;
7435 }
7436 
fake_timeout(struct scsi_cmnd * scp)7437 static bool fake_timeout(struct scsi_cmnd *scp)
7438 {
7439 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7440 		if (sdebug_every_nth < -1)
7441 			sdebug_every_nth = -1;
7442 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7443 			return true; /* ignore command causing timeout */
7444 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7445 			 scsi_medium_access_command(scp))
7446 			return true; /* time out reads and writes */
7447 	}
7448 	return false;
7449 }
7450 
7451 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)7452 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7453 {
7454 	int stopped_state;
7455 	u64 diff_ns = 0;
7456 	ktime_t now_ts = ktime_get_boottime();
7457 	struct scsi_device *sdp = scp->device;
7458 
7459 	stopped_state = atomic_read(&devip->stopped);
7460 	if (stopped_state == 2) {
7461 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7462 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7463 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7464 				/* tur_ms_to_ready timer extinguished */
7465 				atomic_set(&devip->stopped, 0);
7466 				return 0;
7467 			}
7468 		}
7469 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7470 		if (sdebug_verbose)
7471 			sdev_printk(KERN_INFO, sdp,
7472 				    "%s: Not ready: in process of becoming ready\n", my_name);
7473 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7474 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7475 
7476 			if (diff_ns <= tur_nanosecs_to_ready)
7477 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7478 			else
7479 				diff_ns = tur_nanosecs_to_ready;
7480 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7481 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7482 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7483 						   diff_ns);
7484 			return check_condition_result;
7485 		}
7486 	}
7487 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7488 	if (sdebug_verbose)
7489 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7490 			    my_name);
7491 	return check_condition_result;
7492 }
7493 
sdebug_map_queues(struct Scsi_Host * shost)7494 static void sdebug_map_queues(struct Scsi_Host *shost)
7495 {
7496 	int i, qoff;
7497 
7498 	if (shost->nr_hw_queues == 1)
7499 		return;
7500 
7501 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7502 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7503 
7504 		map->nr_queues  = 0;
7505 
7506 		if (i == HCTX_TYPE_DEFAULT)
7507 			map->nr_queues = submit_queues - poll_queues;
7508 		else if (i == HCTX_TYPE_POLL)
7509 			map->nr_queues = poll_queues;
7510 
7511 		if (!map->nr_queues) {
7512 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7513 			continue;
7514 		}
7515 
7516 		map->queue_offset = qoff;
7517 		blk_mq_map_queues(map);
7518 
7519 		qoff += map->nr_queues;
7520 	}
7521 }
7522 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)7523 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7524 {
7525 	bool first;
7526 	bool retiring = false;
7527 	int num_entries = 0;
7528 	unsigned int qc_idx = 0;
7529 	unsigned long iflags;
7530 	ktime_t kt_from_boot = ktime_get_boottime();
7531 	struct sdebug_queue *sqp;
7532 	struct sdebug_queued_cmd *sqcp;
7533 	struct scsi_cmnd *scp;
7534 	struct sdebug_dev_info *devip;
7535 	struct sdebug_defer *sd_dp;
7536 
7537 	sqp = sdebug_q_arr + queue_num;
7538 
7539 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7540 
7541 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7542 	if (qc_idx >= sdebug_max_queue)
7543 		goto unlock;
7544 
7545 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7546 		if (first) {
7547 			first = false;
7548 			if (!test_bit(qc_idx, sqp->in_use_bm))
7549 				continue;
7550 		} else {
7551 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7552 		}
7553 		if (qc_idx >= sdebug_max_queue)
7554 			break;
7555 
7556 		sqcp = &sqp->qc_arr[qc_idx];
7557 		sd_dp = sqcp->sd_dp;
7558 		if (unlikely(!sd_dp))
7559 			continue;
7560 		scp = sqcp->a_cmnd;
7561 		if (unlikely(scp == NULL)) {
7562 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7563 			       queue_num, qc_idx, __func__);
7564 			break;
7565 		}
7566 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7567 			if (kt_from_boot < sd_dp->cmpl_ts)
7568 				continue;
7569 
7570 		} else		/* ignoring non REQ_POLLED requests */
7571 			continue;
7572 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7573 		if (likely(devip))
7574 			atomic_dec(&devip->num_in_q);
7575 		else
7576 			pr_err("devip=NULL from %s\n", __func__);
7577 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7578 			retiring = true;
7579 
7580 		sqcp->a_cmnd = NULL;
7581 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7582 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7583 				sqp, queue_num, qc_idx, __func__);
7584 			break;
7585 		}
7586 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7587 			int k, retval;
7588 
7589 			retval = atomic_read(&retired_max_queue);
7590 			if (qc_idx >= retval) {
7591 				pr_err("index %d too large\n", retval);
7592 				break;
7593 			}
7594 			k = find_last_bit(sqp->in_use_bm, retval);
7595 			if ((k < sdebug_max_queue) || (k == retval))
7596 				atomic_set(&retired_max_queue, 0);
7597 			else
7598 				atomic_set(&retired_max_queue, k + 1);
7599 		}
7600 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7601 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7602 		scsi_done(scp); /* callback to mid level */
7603 		num_entries++;
7604 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7605 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7606 			break;
7607 	}
7608 
7609 unlock:
7610 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7611 
7612 	if (num_entries > 0)
7613 		atomic_add(num_entries, &sdeb_mq_poll_count);
7614 	return num_entries;
7615 }
7616 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)7617 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7618 				   struct scsi_cmnd *scp)
7619 {
7620 	u8 sdeb_i;
7621 	struct scsi_device *sdp = scp->device;
7622 	const struct opcode_info_t *oip;
7623 	const struct opcode_info_t *r_oip;
7624 	struct sdebug_dev_info *devip;
7625 	u8 *cmd = scp->cmnd;
7626 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7627 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7628 	int k, na;
7629 	int errsts = 0;
7630 	u64 lun_index = sdp->lun & 0x3FFF;
7631 	u32 flags;
7632 	u16 sa;
7633 	u8 opcode = cmd[0];
7634 	bool has_wlun_rl;
7635 	bool inject_now;
7636 
7637 	scsi_set_resid(scp, 0);
7638 	if (sdebug_statistics) {
7639 		atomic_inc(&sdebug_cmnd_count);
7640 		inject_now = inject_on_this_cmd();
7641 	} else {
7642 		inject_now = false;
7643 	}
7644 	if (unlikely(sdebug_verbose &&
7645 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7646 		char b[120];
7647 		int n, len, sb;
7648 
7649 		len = scp->cmd_len;
7650 		sb = (int)sizeof(b);
7651 		if (len > 32)
7652 			strcpy(b, "too long, over 32 bytes");
7653 		else {
7654 			for (k = 0, n = 0; k < len && n < sb; ++k)
7655 				n += scnprintf(b + n, sb - n, "%02x ",
7656 					       (u32)cmd[k]);
7657 		}
7658 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7659 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7660 	}
7661 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7662 		return SCSI_MLQUEUE_HOST_BUSY;
7663 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7664 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7665 		goto err_out;
7666 
7667 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7668 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7669 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7670 	if (unlikely(!devip)) {
7671 		devip = find_build_dev_info(sdp);
7672 		if (NULL == devip)
7673 			goto err_out;
7674 	}
7675 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7676 		atomic_set(&sdeb_inject_pending, 1);
7677 
7678 	na = oip->num_attached;
7679 	r_pfp = oip->pfp;
7680 	if (na) {	/* multiple commands with this opcode */
7681 		r_oip = oip;
7682 		if (FF_SA & r_oip->flags) {
7683 			if (F_SA_LOW & oip->flags)
7684 				sa = 0x1f & cmd[1];
7685 			else
7686 				sa = get_unaligned_be16(cmd + 8);
7687 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7688 				if (opcode == oip->opcode && sa == oip->sa)
7689 					break;
7690 			}
7691 		} else {   /* since no service action only check opcode */
7692 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7693 				if (opcode == oip->opcode)
7694 					break;
7695 			}
7696 		}
7697 		if (k > na) {
7698 			if (F_SA_LOW & r_oip->flags)
7699 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7700 			else if (F_SA_HIGH & r_oip->flags)
7701 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7702 			else
7703 				mk_sense_invalid_opcode(scp);
7704 			goto check_cond;
7705 		}
7706 	}	/* else (when na==0) we assume the oip is a match */
7707 	flags = oip->flags;
7708 	if (unlikely(F_INV_OP & flags)) {
7709 		mk_sense_invalid_opcode(scp);
7710 		goto check_cond;
7711 	}
7712 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7713 		if (sdebug_verbose)
7714 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7715 				    my_name, opcode, " supported for wlun");
7716 		mk_sense_invalid_opcode(scp);
7717 		goto check_cond;
7718 	}
7719 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7720 		u8 rem;
7721 		int j;
7722 
7723 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7724 			rem = ~oip->len_mask[k] & cmd[k];
7725 			if (rem) {
7726 				for (j = 7; j >= 0; --j, rem <<= 1) {
7727 					if (0x80 & rem)
7728 						break;
7729 				}
7730 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7731 				goto check_cond;
7732 			}
7733 		}
7734 	}
7735 	if (unlikely(!(F_SKIP_UA & flags) &&
7736 		     find_first_bit(devip->uas_bm,
7737 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7738 		errsts = make_ua(scp, devip);
7739 		if (errsts)
7740 			goto check_cond;
7741 	}
7742 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7743 		     atomic_read(&devip->stopped))) {
7744 		errsts = resp_not_ready(scp, devip);
7745 		if (errsts)
7746 			goto fini;
7747 	}
7748 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7749 		goto fini;
7750 	if (unlikely(sdebug_every_nth)) {
7751 		if (fake_timeout(scp))
7752 			return 0;	/* ignore command: make trouble */
7753 	}
7754 	if (likely(oip->pfp))
7755 		pfp = oip->pfp;	/* calls a resp_* function */
7756 	else
7757 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7758 
7759 fini:
7760 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7761 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7762 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7763 					    sdebug_ndelay > 10000)) {
7764 		/*
7765 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7766 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7767 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7768 		 * For Synchronize Cache want 1/20 of SSU's delay.
7769 		 */
7770 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7771 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7772 
7773 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7774 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7775 	} else
7776 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7777 				     sdebug_ndelay);
7778 check_cond:
7779 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7780 err_out:
7781 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7782 }
7783 
7784 static struct scsi_host_template sdebug_driver_template = {
7785 	.show_info =		scsi_debug_show_info,
7786 	.write_info =		scsi_debug_write_info,
7787 	.proc_name =		sdebug_proc_name,
7788 	.name =			"SCSI DEBUG",
7789 	.info =			scsi_debug_info,
7790 	.slave_alloc =		scsi_debug_slave_alloc,
7791 	.slave_configure =	scsi_debug_slave_configure,
7792 	.slave_destroy =	scsi_debug_slave_destroy,
7793 	.ioctl =		scsi_debug_ioctl,
7794 	.queuecommand =		scsi_debug_queuecommand,
7795 	.change_queue_depth =	sdebug_change_qdepth,
7796 	.map_queues =		sdebug_map_queues,
7797 	.mq_poll =		sdebug_blk_mq_poll,
7798 	.eh_abort_handler =	scsi_debug_abort,
7799 	.eh_device_reset_handler = scsi_debug_device_reset,
7800 	.eh_target_reset_handler = scsi_debug_target_reset,
7801 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7802 	.eh_host_reset_handler = scsi_debug_host_reset,
7803 	.can_queue =		SDEBUG_CANQUEUE,
7804 	.this_id =		7,
7805 	.sg_tablesize =		SG_MAX_SEGMENTS,
7806 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7807 	.max_sectors =		-1U,
7808 	.max_segment_size =	-1U,
7809 	.module =		THIS_MODULE,
7810 	.track_queue_depth =	1,
7811 };
7812 
sdebug_driver_probe(struct device * dev)7813 static int sdebug_driver_probe(struct device *dev)
7814 {
7815 	int error = 0;
7816 	struct sdebug_host_info *sdbg_host;
7817 	struct Scsi_Host *hpnt;
7818 	int hprot;
7819 
7820 	sdbg_host = to_sdebug_host(dev);
7821 
7822 	sdebug_driver_template.can_queue = sdebug_max_queue;
7823 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7824 	sdebug_driver_template.max_segment_size = sdebug_max_segment_size;
7825 	if (!sdebug_clustering)
7826 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7827 
7828 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7829 	if (NULL == hpnt) {
7830 		pr_err("scsi_host_alloc failed\n");
7831 		error = -ENODEV;
7832 		return error;
7833 	}
7834 	if (submit_queues > nr_cpu_ids) {
7835 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7836 			my_name, submit_queues, nr_cpu_ids);
7837 		submit_queues = nr_cpu_ids;
7838 	}
7839 	/*
7840 	 * Decide whether to tell scsi subsystem that we want mq. The
7841 	 * following should give the same answer for each host.
7842 	 */
7843 	hpnt->nr_hw_queues = submit_queues;
7844 	if (sdebug_host_max_queue)
7845 		hpnt->host_tagset = 1;
7846 
7847 	/* poll queues are possible for nr_hw_queues > 1 */
7848 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7849 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7850 			 my_name, poll_queues, hpnt->nr_hw_queues);
7851 		poll_queues = 0;
7852 	}
7853 
7854 	/*
7855 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7856 	 * left over for non-polled I/O.
7857 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7858 	 */
7859 	if (poll_queues >= submit_queues) {
7860 		if (submit_queues < 3)
7861 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7862 		else
7863 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7864 				my_name, submit_queues - 1);
7865 		poll_queues = 1;
7866 	}
7867 	if (poll_queues)
7868 		hpnt->nr_maps = 3;
7869 
7870 	sdbg_host->shost = hpnt;
7871 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7872 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7873 		hpnt->max_id = sdebug_num_tgts + 1;
7874 	else
7875 		hpnt->max_id = sdebug_num_tgts;
7876 	/* = sdebug_max_luns; */
7877 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7878 
7879 	hprot = 0;
7880 
7881 	switch (sdebug_dif) {
7882 
7883 	case T10_PI_TYPE1_PROTECTION:
7884 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7885 		if (sdebug_dix)
7886 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7887 		break;
7888 
7889 	case T10_PI_TYPE2_PROTECTION:
7890 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7891 		if (sdebug_dix)
7892 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7893 		break;
7894 
7895 	case T10_PI_TYPE3_PROTECTION:
7896 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7897 		if (sdebug_dix)
7898 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7899 		break;
7900 
7901 	default:
7902 		if (sdebug_dix)
7903 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7904 		break;
7905 	}
7906 
7907 	scsi_host_set_prot(hpnt, hprot);
7908 
7909 	if (have_dif_prot || sdebug_dix)
7910 		pr_info("host protection%s%s%s%s%s%s%s\n",
7911 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7912 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7913 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7914 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7915 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7916 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7917 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7918 
7919 	if (sdebug_guard == 1)
7920 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7921 	else
7922 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7923 
7924 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7925 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7926 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7927 		sdebug_statistics = true;
7928 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7929 	if (error) {
7930 		pr_err("scsi_add_host failed\n");
7931 		error = -ENODEV;
7932 		scsi_host_put(hpnt);
7933 	} else {
7934 		scsi_scan_host(hpnt);
7935 	}
7936 
7937 	return error;
7938 }
7939 
sdebug_driver_remove(struct device * dev)7940 static void sdebug_driver_remove(struct device *dev)
7941 {
7942 	struct sdebug_host_info *sdbg_host;
7943 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7944 
7945 	sdbg_host = to_sdebug_host(dev);
7946 
7947 	scsi_remove_host(sdbg_host->shost);
7948 
7949 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7950 				 dev_list) {
7951 		list_del(&sdbg_devinfo->dev_list);
7952 		kfree(sdbg_devinfo->zstate);
7953 		kfree(sdbg_devinfo);
7954 	}
7955 
7956 	scsi_host_put(sdbg_host->shost);
7957 }
7958 
pseudo_lld_bus_match(struct device * dev,struct device_driver * dev_driver)7959 static int pseudo_lld_bus_match(struct device *dev,
7960 				struct device_driver *dev_driver)
7961 {
7962 	return 1;
7963 }
7964 
7965 static struct bus_type pseudo_lld_bus = {
7966 	.name = "pseudo",
7967 	.match = pseudo_lld_bus_match,
7968 	.probe = sdebug_driver_probe,
7969 	.remove = sdebug_driver_remove,
7970 	.drv_groups = sdebug_drv_groups,
7971 };
7972