• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2018 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20190125";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
100 
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST   1
103 #define DEF_NUM_TGTS   1
104 #define DEF_MAX_LUNS   1
105 /* With these defaults, this driver will make 1 host with 1 target
106  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107  */
108 #define DEF_ATO 1
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB   8
112 #define DEF_DIF 0
113 #define DEF_DIX 0
114 #define DEF_D_SENSE   0
115 #define DEF_EVERY_NTH   0
116 #define DEF_FAKE_RW	0
117 #define DEF_GUARD 0
118 #define DEF_HOST_LOCK 0
119 #define DEF_LBPU 0
120 #define DEF_LBPWS 0
121 #define DEF_LBPWS10 0
122 #define DEF_LBPRZ 1
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0   0
126 #define DEF_NUM_PARTS   0
127 #define DEF_OPTS   0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE   TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB   0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
147 
148 #define SDEBUG_LUN_0_VAL 0
149 
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE		1
152 #define SDEBUG_OPT_MEDIUM_ERR		2
153 #define SDEBUG_OPT_TIMEOUT		4
154 #define SDEBUG_OPT_RECOVERED_ERR	8
155 #define SDEBUG_OPT_TRANSPORT_ERR	16
156 #define SDEBUG_OPT_DIF_ERR		32
157 #define SDEBUG_OPT_DIX_ERR		64
158 #define SDEBUG_OPT_MAC_TIMEOUT		128
159 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
160 #define SDEBUG_OPT_Q_NOISE		0x200
161 #define SDEBUG_OPT_ALL_TSF		0x400
162 #define SDEBUG_OPT_RARE_TSF		0x800
163 #define SDEBUG_OPT_N_WCE		0x1000
164 #define SDEBUG_OPT_RESET_NOISE		0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
166 #define SDEBUG_OPT_HOST_BUSY		0x8000
167 #define SDEBUG_OPT_CMD_ABORT		0x10000
168 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
169 			      SDEBUG_OPT_RESET_NOISE)
170 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
171 				  SDEBUG_OPT_TRANSPORT_ERR | \
172 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
173 				  SDEBUG_OPT_SHORT_TRANSFER | \
174 				  SDEBUG_OPT_HOST_BUSY | \
175 				  SDEBUG_OPT_CMD_ABORT)
176 /* When "every_nth" > 0 then modulo "every_nth" commands:
177  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
178  *   - a RECOVERED_ERROR is simulated on successful read and write
179  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
180  *   - a TRANSPORT_ERROR is simulated on successful read and write
181  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
182  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
183  *     CMD_ABORT
184  *
185  * When "every_nth" < 0 then after "- every_nth" commands the selected
186  * error will be injected. The error will be injected on every subsequent
187  * command until some other action occurs; for example, the user writing
188  * a new value (other than -1 or 1) to every_nth:
189  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
190  */
191 
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193  * priority order. In the subset implemented here lower numbers have higher
194  * priority. The UA numbers should be a sequence starting from 0 with
195  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
204 
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206  * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
209 
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211  * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  255
224 
225 #define F_D_IN			1
226 #define F_D_OUT			2
227 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
228 #define F_D_UNKN		8
229 #define F_RL_WLUN_OK		0x10
230 #define F_SKIP_UA		0x20
231 #define F_DELAY_OVERR		0x40
232 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
234 #define F_INV_OP		0x200
235 #define F_FAKE_RW		0x400
236 #define F_M_ACCESS		0x800	/* media access */
237 #define F_SSU_DELAY		0x1000
238 #define F_SYNC_DELAY		0x2000
239 
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 
250 struct sdebug_dev_info {
251 	struct list_head dev_list;
252 	unsigned int channel;
253 	unsigned int target;
254 	u64 lun;
255 	uuid_t lu_name;
256 	struct sdebug_host_info *sdbg_host;
257 	unsigned long uas_bm[1];
258 	atomic_t num_in_q;
259 	atomic_t stopped;
260 	bool used;
261 };
262 
263 struct sdebug_host_info {
264 	struct list_head host_list;
265 	struct Scsi_Host *shost;
266 	struct device dev;
267 	struct list_head dev_info_list;
268 };
269 
270 #define to_sdebug_host(d)	\
271 	container_of(d, struct sdebug_host_info, dev)
272 
273 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
274 		      SDEB_DEFER_WQ = 2};
275 
276 struct sdebug_defer {
277 	struct hrtimer hrt;
278 	struct execute_work ew;
279 	int sqa_idx;	/* index of sdebug_queue array */
280 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
281 	int issuing_cpu;
282 	bool init_hrt;
283 	bool init_wq;
284 	bool aborted;	/* true when blk_abort_request() already called */
285 	enum sdeb_defer_type defer_t;
286 };
287 
288 struct sdebug_queued_cmd {
289 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
290 	 * instance indicates this slot is in use.
291 	 */
292 	struct sdebug_defer *sd_dp;
293 	struct scsi_cmnd *a_cmnd;
294 	unsigned int inj_recovered:1;
295 	unsigned int inj_transport:1;
296 	unsigned int inj_dif:1;
297 	unsigned int inj_dix:1;
298 	unsigned int inj_short:1;
299 	unsigned int inj_host_busy:1;
300 	unsigned int inj_cmd_abort:1;
301 };
302 
303 struct sdebug_queue {
304 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
305 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
306 	spinlock_t qc_lock;
307 	atomic_t blocked;	/* to temporarily stop more being queued */
308 };
309 
310 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
311 static atomic_t sdebug_completions;  /* count of deferred completions */
312 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
313 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
314 
315 struct opcode_info_t {
316 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
317 				/* for terminating element */
318 	u8 opcode;		/* if num_attached > 0, preferred */
319 	u16 sa;			/* service action */
320 	u32 flags;		/* OR-ed set of SDEB_F_* */
321 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
322 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
323 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
324 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
325 };
326 
327 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
328 enum sdeb_opcode_index {
329 	SDEB_I_INVALID_OPCODE =	0,
330 	SDEB_I_INQUIRY = 1,
331 	SDEB_I_REPORT_LUNS = 2,
332 	SDEB_I_REQUEST_SENSE = 3,
333 	SDEB_I_TEST_UNIT_READY = 4,
334 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
335 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
336 	SDEB_I_LOG_SENSE = 7,
337 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
338 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
339 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
340 	SDEB_I_START_STOP = 11,
341 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
342 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
343 	SDEB_I_MAINT_IN = 14,
344 	SDEB_I_MAINT_OUT = 15,
345 	SDEB_I_VERIFY = 16,		/* 10 only */
346 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
347 	SDEB_I_RESERVE = 18,		/* 6, 10 */
348 	SDEB_I_RELEASE = 19,		/* 6, 10 */
349 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
350 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
351 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
352 	SDEB_I_SEND_DIAG = 23,
353 	SDEB_I_UNMAP = 24,
354 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
355 	SDEB_I_WRITE_BUFFER = 26,
356 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
357 	SDEB_I_SYNC_CACHE = 28,		/* 10, 16 */
358 	SDEB_I_COMP_WRITE = 29,
359 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
360 };
361 
362 
363 static const unsigned char opcode_ind_arr[256] = {
364 /* 0x0; 0x0->0x1f: 6 byte cdbs */
365 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
366 	    0, 0, 0, 0,
367 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
368 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
369 	    SDEB_I_RELEASE,
370 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
371 	    SDEB_I_ALLOW_REMOVAL, 0,
372 /* 0x20; 0x20->0x3f: 10 byte cdbs */
373 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
374 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
375 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
376 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
377 /* 0x40; 0x40->0x5f: 10 byte cdbs */
378 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
379 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
380 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
381 	    SDEB_I_RELEASE,
382 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
383 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
384 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 	0, SDEB_I_VARIABLE_LEN,
387 /* 0x80; 0x80->0x9f: 16 byte cdbs */
388 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
389 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
390 	0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
391 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
392 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
393 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
394 	     SDEB_I_MAINT_OUT, 0, 0, 0,
395 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
396 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
397 	0, 0, 0, 0, 0, 0, 0, 0,
398 	0, 0, 0, 0, 0, 0, 0, 0,
399 /* 0xc0; 0xc0->0xff: vendor specific */
400 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
401 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
402 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
403 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
404 };
405 
406 /*
407  * The following "response" functions return the SCSI mid-level's 4 byte
408  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
409  * command completion, they can mask their return value with
410  * SDEG_RES_IMMED_MASK .
411  */
412 #define SDEG_RES_IMMED_MASK 0x40000000
413 
414 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
423 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
424 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
425 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
426 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
432 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
433 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
434 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
435 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
436 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
437 
438 /*
439  * The following are overflow arrays for cdbs that "hit" the same index in
440  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
441  * should be placed in opcode_info_arr[], the others should be placed here.
442  */
443 static const struct opcode_info_t msense_iarr[] = {
444 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
445 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
446 };
447 
448 static const struct opcode_info_t mselect_iarr[] = {
449 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
450 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
451 };
452 
453 static const struct opcode_info_t read_iarr[] = {
454 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
455 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
456 	     0, 0, 0, 0} },
457 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
458 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
459 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
460 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
461 	     0xc7, 0, 0, 0, 0} },
462 };
463 
464 static const struct opcode_info_t write_iarr[] = {
465 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
466 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
467 		   0, 0, 0, 0, 0, 0} },
468 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
469 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
470 		   0, 0, 0} },
471 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
472 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
473 		   0xbf, 0xc7, 0, 0, 0, 0} },
474 };
475 
476 static const struct opcode_info_t sa_in_16_iarr[] = {
477 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
478 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
479 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
480 };
481 
482 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
483 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
484 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
485 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
486 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
487 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
488 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
489 };
490 
491 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
492 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
493 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
494 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
495 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
496 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
497 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
498 };
499 
500 static const struct opcode_info_t write_same_iarr[] = {
501 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
502 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
503 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
504 };
505 
506 static const struct opcode_info_t reserve_iarr[] = {
507 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
508 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510 
511 static const struct opcode_info_t release_iarr[] = {
512 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
513 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 };
515 
516 static const struct opcode_info_t sync_cache_iarr[] = {
517 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
518 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
519 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
520 };
521 
522 
523 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
524  * plus the terminating elements for logic that scans this table such as
525  * REPORT SUPPORTED OPERATION CODES. */
526 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
527 /* 0 */
528 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
529 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
530 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
531 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
532 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
533 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
534 	     0, 0} },					/* REPORT LUNS */
535 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
536 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
537 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
538 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
539 /* 5 */
540 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
541 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
542 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
543 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
544 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
545 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
546 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
547 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
548 	     0, 0, 0} },
549 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
550 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
551 	     0, 0} },
552 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
553 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
554 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
555 /* 10 */
556 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
557 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
558 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
559 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
560 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
561 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
562 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
563 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
564 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
565 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
566 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
567 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
568 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
569 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
570 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
571 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
572 				0xff, 0, 0xc7, 0, 0, 0, 0} },
573 /* 15 */
574 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
575 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
576 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
577 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
578 	     0, 0, 0, 0, 0, 0} },
579 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
580 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
581 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
582 	     0xff, 0xff} },
583 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
584 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
585 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
586 	     0} },
587 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
588 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
589 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
590 	     0} },
591 /* 20 */
592 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
593 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
595 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
597 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
598 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
599 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
600 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
601 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
602 /* 25 */
603 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
604 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
605 		   0, 0, 0, 0, 0, 0} },		/* XDWRITEREAD(10) */
606 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
607 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
608 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
609 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
610 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
611 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
612 		 0, 0, 0, 0, 0} },
613 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
614 	    resp_sync_cache, sync_cache_iarr,
615 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
616 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
617 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
618 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
619 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
620 
621 /* 30 */
622 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
623 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
624 };
625 
626 static int sdebug_add_host = DEF_NUM_HOST;
627 static int sdebug_ato = DEF_ATO;
628 static int sdebug_cdb_len = DEF_CDB_LEN;
629 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
630 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
631 static int sdebug_dif = DEF_DIF;
632 static int sdebug_dix = DEF_DIX;
633 static int sdebug_dsense = DEF_D_SENSE;
634 static int sdebug_every_nth = DEF_EVERY_NTH;
635 static int sdebug_fake_rw = DEF_FAKE_RW;
636 static unsigned int sdebug_guard = DEF_GUARD;
637 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
638 static int sdebug_max_luns = DEF_MAX_LUNS;
639 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
640 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
641 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
642 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
643 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
644 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
645 static int sdebug_no_uld;
646 static int sdebug_num_parts = DEF_NUM_PARTS;
647 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
648 static int sdebug_opt_blks = DEF_OPT_BLKS;
649 static int sdebug_opts = DEF_OPTS;
650 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
651 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
652 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
653 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
654 static int sdebug_sector_size = DEF_SECTOR_SIZE;
655 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
656 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
657 static unsigned int sdebug_lbpu = DEF_LBPU;
658 static unsigned int sdebug_lbpws = DEF_LBPWS;
659 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
660 static unsigned int sdebug_lbprz = DEF_LBPRZ;
661 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
662 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
663 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
664 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
665 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
666 static int sdebug_uuid_ctl = DEF_UUID_CTL;
667 static bool sdebug_removable = DEF_REMOVABLE;
668 static bool sdebug_clustering;
669 static bool sdebug_host_lock = DEF_HOST_LOCK;
670 static bool sdebug_strict = DEF_STRICT;
671 static bool sdebug_any_injecting_opt;
672 static bool sdebug_verbose;
673 static bool have_dif_prot;
674 static bool write_since_sync;
675 static bool sdebug_statistics = DEF_STATISTICS;
676 
677 static unsigned int sdebug_store_sectors;
678 static sector_t sdebug_capacity;	/* in sectors */
679 
680 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
681    may still need them */
682 static int sdebug_heads;		/* heads per disk */
683 static int sdebug_cylinders_per;	/* cylinders per surface */
684 static int sdebug_sectors_per;		/* sectors per cylinder */
685 
686 static LIST_HEAD(sdebug_host_list);
687 static DEFINE_SPINLOCK(sdebug_host_list_lock);
688 
689 static unsigned char *fake_storep;	/* ramdisk storage */
690 static struct t10_pi_tuple *dif_storep;	/* protection info */
691 static void *map_storep;		/* provisioning map */
692 
693 static unsigned long map_size;
694 static int num_aborts;
695 static int num_dev_resets;
696 static int num_target_resets;
697 static int num_bus_resets;
698 static int num_host_resets;
699 static int dix_writes;
700 static int dix_reads;
701 static int dif_errors;
702 
703 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
704 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
705 
706 static DEFINE_RWLOCK(atomic_rw);
707 
708 static char sdebug_proc_name[] = MY_NAME;
709 static const char *my_name = MY_NAME;
710 
711 static struct bus_type pseudo_lld_bus;
712 
713 static struct device_driver sdebug_driverfs_driver = {
714 	.name 		= sdebug_proc_name,
715 	.bus		= &pseudo_lld_bus,
716 };
717 
718 static const int check_condition_result =
719 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
720 
721 static const int illegal_condition_result =
722 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
723 
724 static const int device_qfull_result =
725 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
726 
727 
728 /* Only do the extra work involved in logical block provisioning if one or
729  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
730  * real reads and writes (i.e. not skipping them for speed).
731  */
scsi_debug_lbp(void)732 static inline bool scsi_debug_lbp(void)
733 {
734 	return 0 == sdebug_fake_rw &&
735 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
736 }
737 
lba2fake_store(unsigned long long lba)738 static void *lba2fake_store(unsigned long long lba)
739 {
740 	lba = do_div(lba, sdebug_store_sectors);
741 
742 	return fake_storep + lba * sdebug_sector_size;
743 }
744 
dif_store(sector_t sector)745 static struct t10_pi_tuple *dif_store(sector_t sector)
746 {
747 	sector = sector_div(sector, sdebug_store_sectors);
748 
749 	return dif_storep + sector;
750 }
751 
sdebug_max_tgts_luns(void)752 static void sdebug_max_tgts_luns(void)
753 {
754 	struct sdebug_host_info *sdbg_host;
755 	struct Scsi_Host *hpnt;
756 
757 	spin_lock(&sdebug_host_list_lock);
758 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
759 		hpnt = sdbg_host->shost;
760 		if ((hpnt->this_id >= 0) &&
761 		    (sdebug_num_tgts > hpnt->this_id))
762 			hpnt->max_id = sdebug_num_tgts + 1;
763 		else
764 			hpnt->max_id = sdebug_num_tgts;
765 		/* sdebug_max_luns; */
766 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
767 	}
768 	spin_unlock(&sdebug_host_list_lock);
769 }
770 
771 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
772 
773 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)774 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
775 				 enum sdeb_cmd_data c_d,
776 				 int in_byte, int in_bit)
777 {
778 	unsigned char *sbuff;
779 	u8 sks[4];
780 	int sl, asc;
781 
782 	sbuff = scp->sense_buffer;
783 	if (!sbuff) {
784 		sdev_printk(KERN_ERR, scp->device,
785 			    "%s: sense_buffer is NULL\n", __func__);
786 		return;
787 	}
788 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
789 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
790 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
791 	memset(sks, 0, sizeof(sks));
792 	sks[0] = 0x80;
793 	if (c_d)
794 		sks[0] |= 0x40;
795 	if (in_bit >= 0) {
796 		sks[0] |= 0x8;
797 		sks[0] |= 0x7 & in_bit;
798 	}
799 	put_unaligned_be16(in_byte, sks + 1);
800 	if (sdebug_dsense) {
801 		sl = sbuff[7] + 8;
802 		sbuff[7] = sl;
803 		sbuff[sl] = 0x2;
804 		sbuff[sl + 1] = 0x6;
805 		memcpy(sbuff + sl + 4, sks, 3);
806 	} else
807 		memcpy(sbuff + 15, sks, 3);
808 	if (sdebug_verbose)
809 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
810 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
811 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
812 }
813 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)814 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
815 {
816 	unsigned char *sbuff;
817 
818 	sbuff = scp->sense_buffer;
819 	if (!sbuff) {
820 		sdev_printk(KERN_ERR, scp->device,
821 			    "%s: sense_buffer is NULL\n", __func__);
822 		return;
823 	}
824 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
825 
826 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
827 
828 	if (sdebug_verbose)
829 		sdev_printk(KERN_INFO, scp->device,
830 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
831 			    my_name, key, asc, asq);
832 }
833 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)834 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
835 {
836 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
837 }
838 
scsi_debug_ioctl(struct scsi_device * dev,int cmd,void __user * arg)839 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
840 {
841 	if (sdebug_verbose) {
842 		if (0x1261 == cmd)
843 			sdev_printk(KERN_INFO, dev,
844 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
845 		else if (0x5331 == cmd)
846 			sdev_printk(KERN_INFO, dev,
847 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
848 				    __func__);
849 		else
850 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
851 				    __func__, cmd);
852 	}
853 	return -EINVAL;
854 	/* return -ENOTTY; // correct return but upsets fdisk */
855 }
856 
config_cdb_len(struct scsi_device * sdev)857 static void config_cdb_len(struct scsi_device *sdev)
858 {
859 	switch (sdebug_cdb_len) {
860 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
861 		sdev->use_10_for_rw = false;
862 		sdev->use_16_for_rw = false;
863 		sdev->use_10_for_ms = false;
864 		break;
865 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
866 		sdev->use_10_for_rw = true;
867 		sdev->use_16_for_rw = false;
868 		sdev->use_10_for_ms = false;
869 		break;
870 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
871 		sdev->use_10_for_rw = true;
872 		sdev->use_16_for_rw = false;
873 		sdev->use_10_for_ms = true;
874 		break;
875 	case 16:
876 		sdev->use_10_for_rw = false;
877 		sdev->use_16_for_rw = true;
878 		sdev->use_10_for_ms = true;
879 		break;
880 	case 32: /* No knobs to suggest this so same as 16 for now */
881 		sdev->use_10_for_rw = false;
882 		sdev->use_16_for_rw = true;
883 		sdev->use_10_for_ms = true;
884 		break;
885 	default:
886 		pr_warn("unexpected cdb_len=%d, force to 10\n",
887 			sdebug_cdb_len);
888 		sdev->use_10_for_rw = true;
889 		sdev->use_16_for_rw = false;
890 		sdev->use_10_for_ms = false;
891 		sdebug_cdb_len = 10;
892 		break;
893 	}
894 }
895 
all_config_cdb_len(void)896 static void all_config_cdb_len(void)
897 {
898 	struct sdebug_host_info *sdbg_host;
899 	struct Scsi_Host *shost;
900 	struct scsi_device *sdev;
901 
902 	spin_lock(&sdebug_host_list_lock);
903 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
904 		shost = sdbg_host->shost;
905 		shost_for_each_device(sdev, shost) {
906 			config_cdb_len(sdev);
907 		}
908 	}
909 	spin_unlock(&sdebug_host_list_lock);
910 }
911 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)912 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
913 {
914 	struct sdebug_host_info *sdhp;
915 	struct sdebug_dev_info *dp;
916 
917 	spin_lock(&sdebug_host_list_lock);
918 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
919 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
920 			if ((devip->sdbg_host == dp->sdbg_host) &&
921 			    (devip->target == dp->target))
922 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
923 		}
924 	}
925 	spin_unlock(&sdebug_host_list_lock);
926 }
927 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)928 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
929 {
930 	int k;
931 
932 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
933 	if (k != SDEBUG_NUM_UAS) {
934 		const char *cp = NULL;
935 
936 		switch (k) {
937 		case SDEBUG_UA_POR:
938 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
939 					POWER_ON_RESET_ASCQ);
940 			if (sdebug_verbose)
941 				cp = "power on reset";
942 			break;
943 		case SDEBUG_UA_BUS_RESET:
944 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
945 					BUS_RESET_ASCQ);
946 			if (sdebug_verbose)
947 				cp = "bus reset";
948 			break;
949 		case SDEBUG_UA_MODE_CHANGED:
950 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
951 					MODE_CHANGED_ASCQ);
952 			if (sdebug_verbose)
953 				cp = "mode parameters changed";
954 			break;
955 		case SDEBUG_UA_CAPACITY_CHANGED:
956 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
957 					CAPACITY_CHANGED_ASCQ);
958 			if (sdebug_verbose)
959 				cp = "capacity data changed";
960 			break;
961 		case SDEBUG_UA_MICROCODE_CHANGED:
962 			mk_sense_buffer(scp, UNIT_ATTENTION,
963 					TARGET_CHANGED_ASC,
964 					MICROCODE_CHANGED_ASCQ);
965 			if (sdebug_verbose)
966 				cp = "microcode has been changed";
967 			break;
968 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
969 			mk_sense_buffer(scp, UNIT_ATTENTION,
970 					TARGET_CHANGED_ASC,
971 					MICROCODE_CHANGED_WO_RESET_ASCQ);
972 			if (sdebug_verbose)
973 				cp = "microcode has been changed without reset";
974 			break;
975 		case SDEBUG_UA_LUNS_CHANGED:
976 			/*
977 			 * SPC-3 behavior is to report a UNIT ATTENTION with
978 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
979 			 * on the target, until a REPORT LUNS command is
980 			 * received.  SPC-4 behavior is to report it only once.
981 			 * NOTE:  sdebug_scsi_level does not use the same
982 			 * values as struct scsi_device->scsi_level.
983 			 */
984 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
985 				clear_luns_changed_on_target(devip);
986 			mk_sense_buffer(scp, UNIT_ATTENTION,
987 					TARGET_CHANGED_ASC,
988 					LUNS_CHANGED_ASCQ);
989 			if (sdebug_verbose)
990 				cp = "reported luns data has changed";
991 			break;
992 		default:
993 			pr_warn("unexpected unit attention code=%d\n", k);
994 			if (sdebug_verbose)
995 				cp = "unknown";
996 			break;
997 		}
998 		clear_bit(k, devip->uas_bm);
999 		if (sdebug_verbose)
1000 			sdev_printk(KERN_INFO, scp->device,
1001 				   "%s reports: Unit attention: %s\n",
1002 				   my_name, cp);
1003 		return check_condition_result;
1004 	}
1005 	return 0;
1006 }
1007 
1008 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1009 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1010 				int arr_len)
1011 {
1012 	int act_len;
1013 	struct scsi_data_buffer *sdb = scsi_in(scp);
1014 
1015 	if (!sdb->length)
1016 		return 0;
1017 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1018 		return DID_ERROR << 16;
1019 
1020 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1021 				      arr, arr_len);
1022 	sdb->resid = scsi_bufflen(scp) - act_len;
1023 
1024 	return 0;
1025 }
1026 
1027 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1028  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1029  * calls, not required to write in ascending offset order. Assumes resid
1030  * set to scsi_bufflen() prior to any calls.
1031  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1032 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1033 				  int arr_len, unsigned int off_dst)
1034 {
1035 	int act_len, n;
1036 	struct scsi_data_buffer *sdb = scsi_in(scp);
1037 	off_t skip = off_dst;
1038 
1039 	if (sdb->length <= off_dst)
1040 		return 0;
1041 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1042 		return DID_ERROR << 16;
1043 
1044 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1045 				       arr, arr_len, skip);
1046 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1047 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1048 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1049 	sdb->resid = min(sdb->resid, n);
1050 	return 0;
1051 }
1052 
1053 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1054  * 'arr' or -1 if error.
1055  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1056 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1057 			       int arr_len)
1058 {
1059 	if (!scsi_bufflen(scp))
1060 		return 0;
1061 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1062 		return -1;
1063 
1064 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1065 }
1066 
1067 
1068 static char sdebug_inq_vendor_id[9] = "Linux   ";
1069 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1070 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1071 /* Use some locally assigned NAAs for SAS addresses. */
1072 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1073 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1074 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1075 
1076 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1077 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1078 			  int target_dev_id, int dev_id_num,
1079 			  const char *dev_id_str, int dev_id_str_len,
1080 			  const uuid_t *lu_name)
1081 {
1082 	int num, port_a;
1083 	char b[32];
1084 
1085 	port_a = target_dev_id + 1;
1086 	/* T10 vendor identifier field format (faked) */
1087 	arr[0] = 0x2;	/* ASCII */
1088 	arr[1] = 0x1;
1089 	arr[2] = 0x0;
1090 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1091 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1092 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1093 	num = 8 + 16 + dev_id_str_len;
1094 	arr[3] = num;
1095 	num += 4;
1096 	if (dev_id_num >= 0) {
1097 		if (sdebug_uuid_ctl) {
1098 			/* Locally assigned UUID */
1099 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1100 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1101 			arr[num++] = 0x0;
1102 			arr[num++] = 0x12;
1103 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1104 			arr[num++] = 0x0;
1105 			memcpy(arr + num, lu_name, 16);
1106 			num += 16;
1107 		} else {
1108 			/* NAA-3, Logical unit identifier (binary) */
1109 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1110 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1111 			arr[num++] = 0x0;
1112 			arr[num++] = 0x8;
1113 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1114 			num += 8;
1115 		}
1116 		/* Target relative port number */
1117 		arr[num++] = 0x61;	/* proto=sas, binary */
1118 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1119 		arr[num++] = 0x0;	/* reserved */
1120 		arr[num++] = 0x4;	/* length */
1121 		arr[num++] = 0x0;	/* reserved */
1122 		arr[num++] = 0x0;	/* reserved */
1123 		arr[num++] = 0x0;
1124 		arr[num++] = 0x1;	/* relative port A */
1125 	}
1126 	/* NAA-3, Target port identifier */
1127 	arr[num++] = 0x61;	/* proto=sas, binary */
1128 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1129 	arr[num++] = 0x0;
1130 	arr[num++] = 0x8;
1131 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1132 	num += 8;
1133 	/* NAA-3, Target port group identifier */
1134 	arr[num++] = 0x61;	/* proto=sas, binary */
1135 	arr[num++] = 0x95;	/* piv=1, target port group id */
1136 	arr[num++] = 0x0;
1137 	arr[num++] = 0x4;
1138 	arr[num++] = 0;
1139 	arr[num++] = 0;
1140 	put_unaligned_be16(port_group_id, arr + num);
1141 	num += 2;
1142 	/* NAA-3, Target device identifier */
1143 	arr[num++] = 0x61;	/* proto=sas, binary */
1144 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1145 	arr[num++] = 0x0;
1146 	arr[num++] = 0x8;
1147 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1148 	num += 8;
1149 	/* SCSI name string: Target device identifier */
1150 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1151 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1152 	arr[num++] = 0x0;
1153 	arr[num++] = 24;
1154 	memcpy(arr + num, "naa.32222220", 12);
1155 	num += 12;
1156 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1157 	memcpy(arr + num, b, 8);
1158 	num += 8;
1159 	memset(arr + num, 0, 4);
1160 	num += 4;
1161 	return num;
1162 }
1163 
1164 static unsigned char vpd84_data[] = {
1165 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1166     0x22,0x22,0x22,0x0,0xbb,0x1,
1167     0x22,0x22,0x22,0x0,0xbb,0x2,
1168 };
1169 
1170 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1171 static int inquiry_vpd_84(unsigned char *arr)
1172 {
1173 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1174 	return sizeof(vpd84_data);
1175 }
1176 
1177 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1178 static int inquiry_vpd_85(unsigned char *arr)
1179 {
1180 	int num = 0;
1181 	const char *na1 = "https://www.kernel.org/config";
1182 	const char *na2 = "http://www.kernel.org/log";
1183 	int plen, olen;
1184 
1185 	arr[num++] = 0x1;	/* lu, storage config */
1186 	arr[num++] = 0x0;	/* reserved */
1187 	arr[num++] = 0x0;
1188 	olen = strlen(na1);
1189 	plen = olen + 1;
1190 	if (plen % 4)
1191 		plen = ((plen / 4) + 1) * 4;
1192 	arr[num++] = plen;	/* length, null termianted, padded */
1193 	memcpy(arr + num, na1, olen);
1194 	memset(arr + num + olen, 0, plen - olen);
1195 	num += plen;
1196 
1197 	arr[num++] = 0x4;	/* lu, logging */
1198 	arr[num++] = 0x0;	/* reserved */
1199 	arr[num++] = 0x0;
1200 	olen = strlen(na2);
1201 	plen = olen + 1;
1202 	if (plen % 4)
1203 		plen = ((plen / 4) + 1) * 4;
1204 	arr[num++] = plen;	/* length, null terminated, padded */
1205 	memcpy(arr + num, na2, olen);
1206 	memset(arr + num + olen, 0, plen - olen);
1207 	num += plen;
1208 
1209 	return num;
1210 }
1211 
1212 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1213 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1214 {
1215 	int num = 0;
1216 	int port_a, port_b;
1217 
1218 	port_a = target_dev_id + 1;
1219 	port_b = port_a + 1;
1220 	arr[num++] = 0x0;	/* reserved */
1221 	arr[num++] = 0x0;	/* reserved */
1222 	arr[num++] = 0x0;
1223 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1224 	memset(arr + num, 0, 6);
1225 	num += 6;
1226 	arr[num++] = 0x0;
1227 	arr[num++] = 12;	/* length tp descriptor */
1228 	/* naa-5 target port identifier (A) */
1229 	arr[num++] = 0x61;	/* proto=sas, binary */
1230 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1231 	arr[num++] = 0x0;	/* reserved */
1232 	arr[num++] = 0x8;	/* length */
1233 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1234 	num += 8;
1235 	arr[num++] = 0x0;	/* reserved */
1236 	arr[num++] = 0x0;	/* reserved */
1237 	arr[num++] = 0x0;
1238 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1239 	memset(arr + num, 0, 6);
1240 	num += 6;
1241 	arr[num++] = 0x0;
1242 	arr[num++] = 12;	/* length tp descriptor */
1243 	/* naa-5 target port identifier (B) */
1244 	arr[num++] = 0x61;	/* proto=sas, binary */
1245 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1246 	arr[num++] = 0x0;	/* reserved */
1247 	arr[num++] = 0x8;	/* length */
1248 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1249 	num += 8;
1250 
1251 	return num;
1252 }
1253 
1254 
1255 static unsigned char vpd89_data[] = {
1256 /* from 4th byte */ 0,0,0,0,
1257 'l','i','n','u','x',' ',' ',' ',
1258 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1259 '1','2','3','4',
1260 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1261 0xec,0,0,0,
1262 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1263 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1264 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1265 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1266 0x53,0x41,
1267 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1268 0x20,0x20,
1269 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1270 0x10,0x80,
1271 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1272 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1273 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1274 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1275 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1276 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1277 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1278 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1279 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1280 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1281 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1282 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1283 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1284 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1292 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1293 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1294 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1295 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1296 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1297 };
1298 
1299 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1300 static int inquiry_vpd_89(unsigned char *arr)
1301 {
1302 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1303 	return sizeof(vpd89_data);
1304 }
1305 
1306 
1307 static unsigned char vpdb0_data[] = {
1308 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1309 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1310 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1311 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1312 };
1313 
1314 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1315 static int inquiry_vpd_b0(unsigned char *arr)
1316 {
1317 	unsigned int gran;
1318 
1319 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1320 
1321 	/* Optimal transfer length granularity */
1322 	if (sdebug_opt_xferlen_exp != 0 &&
1323 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1324 		gran = 1 << sdebug_opt_xferlen_exp;
1325 	else
1326 		gran = 1 << sdebug_physblk_exp;
1327 	put_unaligned_be16(gran, arr + 2);
1328 
1329 	/* Maximum Transfer Length */
1330 	if (sdebug_store_sectors > 0x400)
1331 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1332 
1333 	/* Optimal Transfer Length */
1334 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1335 
1336 	if (sdebug_lbpu) {
1337 		/* Maximum Unmap LBA Count */
1338 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1339 
1340 		/* Maximum Unmap Block Descriptor Count */
1341 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1342 	}
1343 
1344 	/* Unmap Granularity Alignment */
1345 	if (sdebug_unmap_alignment) {
1346 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1347 		arr[28] |= 0x80; /* UGAVALID */
1348 	}
1349 
1350 	/* Optimal Unmap Granularity */
1351 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1352 
1353 	/* Maximum WRITE SAME Length */
1354 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1355 
1356 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1357 
1358 	return sizeof(vpdb0_data);
1359 }
1360 
1361 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(unsigned char * arr)1362 static int inquiry_vpd_b1(unsigned char *arr)
1363 {
1364 	memset(arr, 0, 0x3c);
1365 	arr[0] = 0;
1366 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1367 	arr[2] = 0;
1368 	arr[3] = 5;	/* less than 1.8" */
1369 
1370 	return 0x3c;
1371 }
1372 
1373 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1374 static int inquiry_vpd_b2(unsigned char *arr)
1375 {
1376 	memset(arr, 0, 0x4);
1377 	arr[0] = 0;			/* threshold exponent */
1378 	if (sdebug_lbpu)
1379 		arr[1] = 1 << 7;
1380 	if (sdebug_lbpws)
1381 		arr[1] |= 1 << 6;
1382 	if (sdebug_lbpws10)
1383 		arr[1] |= 1 << 5;
1384 	if (sdebug_lbprz && scsi_debug_lbp())
1385 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1386 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1387 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1388 	/* threshold_percentage=0 */
1389 	return 0x4;
1390 }
1391 
1392 #define SDEBUG_LONG_INQ_SZ 96
1393 #define SDEBUG_MAX_INQ_ARR_SZ 584
1394 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1395 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1396 {
1397 	unsigned char pq_pdt;
1398 	unsigned char *arr;
1399 	unsigned char *cmd = scp->cmnd;
1400 	int alloc_len, n, ret;
1401 	bool have_wlun, is_disk;
1402 
1403 	alloc_len = get_unaligned_be16(cmd + 3);
1404 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1405 	if (! arr)
1406 		return DID_REQUEUE << 16;
1407 	is_disk = (sdebug_ptype == TYPE_DISK);
1408 	have_wlun = scsi_is_wlun(scp->device->lun);
1409 	if (have_wlun)
1410 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1411 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1412 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1413 	else
1414 		pq_pdt = (sdebug_ptype & 0x1f);
1415 	arr[0] = pq_pdt;
1416 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1417 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1418 		kfree(arr);
1419 		return check_condition_result;
1420 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1421 		int lu_id_num, port_group_id, target_dev_id, len;
1422 		char lu_id_str[6];
1423 		int host_no = devip->sdbg_host->shost->host_no;
1424 
1425 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1426 		    (devip->channel & 0x7f);
1427 		if (sdebug_vpd_use_hostno == 0)
1428 			host_no = 0;
1429 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1430 			    (devip->target * 1000) + devip->lun);
1431 		target_dev_id = ((host_no + 1) * 2000) +
1432 				 (devip->target * 1000) - 3;
1433 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1434 		if (0 == cmd[2]) { /* supported vital product data pages */
1435 			arr[1] = cmd[2];	/*sanity */
1436 			n = 4;
1437 			arr[n++] = 0x0;   /* this page */
1438 			arr[n++] = 0x80;  /* unit serial number */
1439 			arr[n++] = 0x83;  /* device identification */
1440 			arr[n++] = 0x84;  /* software interface ident. */
1441 			arr[n++] = 0x85;  /* management network addresses */
1442 			arr[n++] = 0x86;  /* extended inquiry */
1443 			arr[n++] = 0x87;  /* mode page policy */
1444 			arr[n++] = 0x88;  /* SCSI ports */
1445 			if (is_disk) {	  /* SBC only */
1446 				arr[n++] = 0x89;  /* ATA information */
1447 				arr[n++] = 0xb0;  /* Block limits */
1448 				arr[n++] = 0xb1;  /* Block characteristics */
1449 				arr[n++] = 0xb2;  /* Logical Block Prov */
1450 			}
1451 			arr[3] = n - 4;	  /* number of supported VPD pages */
1452 		} else if (0x80 == cmd[2]) { /* unit serial number */
1453 			arr[1] = cmd[2];	/*sanity */
1454 			arr[3] = len;
1455 			memcpy(&arr[4], lu_id_str, len);
1456 		} else if (0x83 == cmd[2]) { /* device identification */
1457 			arr[1] = cmd[2];	/*sanity */
1458 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1459 						target_dev_id, lu_id_num,
1460 						lu_id_str, len,
1461 						&devip->lu_name);
1462 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1463 			arr[1] = cmd[2];	/*sanity */
1464 			arr[3] = inquiry_vpd_84(&arr[4]);
1465 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1466 			arr[1] = cmd[2];	/*sanity */
1467 			arr[3] = inquiry_vpd_85(&arr[4]);
1468 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1469 			arr[1] = cmd[2];	/*sanity */
1470 			arr[3] = 0x3c;	/* number of following entries */
1471 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1472 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1473 			else if (have_dif_prot)
1474 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1475 			else
1476 				arr[4] = 0x0;   /* no protection stuff */
1477 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1478 		} else if (0x87 == cmd[2]) { /* mode page policy */
1479 			arr[1] = cmd[2];	/*sanity */
1480 			arr[3] = 0x8;	/* number of following entries */
1481 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1482 			arr[6] = 0x80;	/* mlus, shared */
1483 			arr[8] = 0x18;	 /* protocol specific lu */
1484 			arr[10] = 0x82;	 /* mlus, per initiator port */
1485 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1486 			arr[1] = cmd[2];	/*sanity */
1487 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1488 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1489 			arr[1] = cmd[2];        /*sanity */
1490 			n = inquiry_vpd_89(&arr[4]);
1491 			put_unaligned_be16(n, arr + 2);
1492 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1493 			arr[1] = cmd[2];        /*sanity */
1494 			arr[3] = inquiry_vpd_b0(&arr[4]);
1495 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1496 			arr[1] = cmd[2];        /*sanity */
1497 			arr[3] = inquiry_vpd_b1(&arr[4]);
1498 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1499 			arr[1] = cmd[2];        /*sanity */
1500 			arr[3] = inquiry_vpd_b2(&arr[4]);
1501 		} else {
1502 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1503 			kfree(arr);
1504 			return check_condition_result;
1505 		}
1506 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1507 		ret = fill_from_dev_buffer(scp, arr,
1508 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1509 		kfree(arr);
1510 		return ret;
1511 	}
1512 	/* drops through here for a standard inquiry */
1513 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1514 	arr[2] = sdebug_scsi_level;
1515 	arr[3] = 2;    /* response_data_format==2 */
1516 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1517 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1518 	if (sdebug_vpd_use_hostno == 0)
1519 		arr[5] |= 0x10; /* claim: implicit TPGS */
1520 	arr[6] = 0x10; /* claim: MultiP */
1521 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1522 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1523 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1524 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1525 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1526 	/* Use Vendor Specific area to place driver date in ASCII hex */
1527 	memcpy(&arr[36], sdebug_version_date, 8);
1528 	/* version descriptors (2 bytes each) follow */
1529 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1530 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1531 	n = 62;
1532 	if (is_disk) {		/* SBC-4 no version claimed */
1533 		put_unaligned_be16(0x600, arr + n);
1534 		n += 2;
1535 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1536 		put_unaligned_be16(0x525, arr + n);
1537 		n += 2;
1538 	}
1539 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1540 	ret = fill_from_dev_buffer(scp, arr,
1541 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1542 	kfree(arr);
1543 	return ret;
1544 }
1545 
1546 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1547 				   0, 0, 0x0, 0x0};
1548 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1549 static int resp_requests(struct scsi_cmnd *scp,
1550 			 struct sdebug_dev_info *devip)
1551 {
1552 	unsigned char *sbuff;
1553 	unsigned char *cmd = scp->cmnd;
1554 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1555 	bool dsense;
1556 	int len = 18;
1557 
1558 	memset(arr, 0, sizeof(arr));
1559 	dsense = !!(cmd[1] & 1);
1560 	sbuff = scp->sense_buffer;
1561 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1562 		if (dsense) {
1563 			arr[0] = 0x72;
1564 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1565 			arr[2] = THRESHOLD_EXCEEDED;
1566 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1567 			len = 8;
1568 		} else {
1569 			arr[0] = 0x70;
1570 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1571 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1572 			arr[12] = THRESHOLD_EXCEEDED;
1573 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1574 		}
1575 	} else {
1576 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1577 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1578 			;	/* have sense and formats match */
1579 		else if (arr[0] <= 0x70) {
1580 			if (dsense) {
1581 				memset(arr, 0, 8);
1582 				arr[0] = 0x72;
1583 				len = 8;
1584 			} else {
1585 				memset(arr, 0, 18);
1586 				arr[0] = 0x70;
1587 				arr[7] = 0xa;
1588 			}
1589 		} else if (dsense) {
1590 			memset(arr, 0, 8);
1591 			arr[0] = 0x72;
1592 			arr[1] = sbuff[2];     /* sense key */
1593 			arr[2] = sbuff[12];    /* asc */
1594 			arr[3] = sbuff[13];    /* ascq */
1595 			len = 8;
1596 		} else {
1597 			memset(arr, 0, 18);
1598 			arr[0] = 0x70;
1599 			arr[2] = sbuff[1];
1600 			arr[7] = 0xa;
1601 			arr[12] = sbuff[1];
1602 			arr[13] = sbuff[3];
1603 		}
1604 
1605 	}
1606 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1607 	return fill_from_dev_buffer(scp, arr, len);
1608 }
1609 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1610 static int resp_start_stop(struct scsi_cmnd *scp,
1611 			   struct sdebug_dev_info *devip)
1612 {
1613 	unsigned char *cmd = scp->cmnd;
1614 	int power_cond, stop;
1615 	bool changing;
1616 
1617 	power_cond = (cmd[4] & 0xf0) >> 4;
1618 	if (power_cond) {
1619 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1620 		return check_condition_result;
1621 	}
1622 	stop = !(cmd[4] & 1);
1623 	changing = atomic_read(&devip->stopped) == !stop;
1624 	atomic_xchg(&devip->stopped, stop);
1625 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1626 		return SDEG_RES_IMMED_MASK;
1627 	else
1628 		return 0;
1629 }
1630 
get_sdebug_capacity(void)1631 static sector_t get_sdebug_capacity(void)
1632 {
1633 	static const unsigned int gibibyte = 1073741824;
1634 
1635 	if (sdebug_virtual_gb > 0)
1636 		return (sector_t)sdebug_virtual_gb *
1637 			(gibibyte / sdebug_sector_size);
1638 	else
1639 		return sdebug_store_sectors;
1640 }
1641 
1642 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1643 static int resp_readcap(struct scsi_cmnd *scp,
1644 			struct sdebug_dev_info *devip)
1645 {
1646 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1647 	unsigned int capac;
1648 
1649 	/* following just in case virtual_gb changed */
1650 	sdebug_capacity = get_sdebug_capacity();
1651 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1652 	if (sdebug_capacity < 0xffffffff) {
1653 		capac = (unsigned int)sdebug_capacity - 1;
1654 		put_unaligned_be32(capac, arr + 0);
1655 	} else
1656 		put_unaligned_be32(0xffffffff, arr + 0);
1657 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1658 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1659 }
1660 
1661 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1662 static int resp_readcap16(struct scsi_cmnd *scp,
1663 			  struct sdebug_dev_info *devip)
1664 {
1665 	unsigned char *cmd = scp->cmnd;
1666 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1667 	int alloc_len;
1668 
1669 	alloc_len = get_unaligned_be32(cmd + 10);
1670 	/* following just in case virtual_gb changed */
1671 	sdebug_capacity = get_sdebug_capacity();
1672 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1673 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1674 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1675 	arr[13] = sdebug_physblk_exp & 0xf;
1676 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1677 
1678 	if (scsi_debug_lbp()) {
1679 		arr[14] |= 0x80; /* LBPME */
1680 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1681 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1682 		 * in the wider field maps to 0 in this field.
1683 		 */
1684 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1685 			arr[14] |= 0x40;
1686 	}
1687 
1688 	arr[15] = sdebug_lowest_aligned & 0xff;
1689 
1690 	if (have_dif_prot) {
1691 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1692 		arr[12] |= 1; /* PROT_EN */
1693 	}
1694 
1695 	return fill_from_dev_buffer(scp, arr,
1696 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1697 }
1698 
1699 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1700 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1701 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1702 			      struct sdebug_dev_info *devip)
1703 {
1704 	unsigned char *cmd = scp->cmnd;
1705 	unsigned char *arr;
1706 	int host_no = devip->sdbg_host->shost->host_no;
1707 	int n, ret, alen, rlen;
1708 	int port_group_a, port_group_b, port_a, port_b;
1709 
1710 	alen = get_unaligned_be32(cmd + 6);
1711 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1712 	if (! arr)
1713 		return DID_REQUEUE << 16;
1714 	/*
1715 	 * EVPD page 0x88 states we have two ports, one
1716 	 * real and a fake port with no device connected.
1717 	 * So we create two port groups with one port each
1718 	 * and set the group with port B to unavailable.
1719 	 */
1720 	port_a = 0x1; /* relative port A */
1721 	port_b = 0x2; /* relative port B */
1722 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1723 			(devip->channel & 0x7f);
1724 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1725 			(devip->channel & 0x7f) + 0x80;
1726 
1727 	/*
1728 	 * The asymmetric access state is cycled according to the host_id.
1729 	 */
1730 	n = 4;
1731 	if (sdebug_vpd_use_hostno == 0) {
1732 		arr[n++] = host_no % 3; /* Asymm access state */
1733 		arr[n++] = 0x0F; /* claim: all states are supported */
1734 	} else {
1735 		arr[n++] = 0x0; /* Active/Optimized path */
1736 		arr[n++] = 0x01; /* only support active/optimized paths */
1737 	}
1738 	put_unaligned_be16(port_group_a, arr + n);
1739 	n += 2;
1740 	arr[n++] = 0;    /* Reserved */
1741 	arr[n++] = 0;    /* Status code */
1742 	arr[n++] = 0;    /* Vendor unique */
1743 	arr[n++] = 0x1;  /* One port per group */
1744 	arr[n++] = 0;    /* Reserved */
1745 	arr[n++] = 0;    /* Reserved */
1746 	put_unaligned_be16(port_a, arr + n);
1747 	n += 2;
1748 	arr[n++] = 3;    /* Port unavailable */
1749 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1750 	put_unaligned_be16(port_group_b, arr + n);
1751 	n += 2;
1752 	arr[n++] = 0;    /* Reserved */
1753 	arr[n++] = 0;    /* Status code */
1754 	arr[n++] = 0;    /* Vendor unique */
1755 	arr[n++] = 0x1;  /* One port per group */
1756 	arr[n++] = 0;    /* Reserved */
1757 	arr[n++] = 0;    /* Reserved */
1758 	put_unaligned_be16(port_b, arr + n);
1759 	n += 2;
1760 
1761 	rlen = n - 4;
1762 	put_unaligned_be32(rlen, arr + 0);
1763 
1764 	/*
1765 	 * Return the smallest value of either
1766 	 * - The allocated length
1767 	 * - The constructed command length
1768 	 * - The maximum array size
1769 	 */
1770 	rlen = min(alen,n);
1771 	ret = fill_from_dev_buffer(scp, arr,
1772 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1773 	kfree(arr);
1774 	return ret;
1775 }
1776 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1777 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1778 			     struct sdebug_dev_info *devip)
1779 {
1780 	bool rctd;
1781 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1782 	u16 req_sa, u;
1783 	u32 alloc_len, a_len;
1784 	int k, offset, len, errsts, count, bump, na;
1785 	const struct opcode_info_t *oip;
1786 	const struct opcode_info_t *r_oip;
1787 	u8 *arr;
1788 	u8 *cmd = scp->cmnd;
1789 
1790 	rctd = !!(cmd[2] & 0x80);
1791 	reporting_opts = cmd[2] & 0x7;
1792 	req_opcode = cmd[3];
1793 	req_sa = get_unaligned_be16(cmd + 4);
1794 	alloc_len = get_unaligned_be32(cmd + 6);
1795 	if (alloc_len < 4 || alloc_len > 0xffff) {
1796 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1797 		return check_condition_result;
1798 	}
1799 	if (alloc_len > 8192)
1800 		a_len = 8192;
1801 	else
1802 		a_len = alloc_len;
1803 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1804 	if (NULL == arr) {
1805 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1806 				INSUFF_RES_ASCQ);
1807 		return check_condition_result;
1808 	}
1809 	switch (reporting_opts) {
1810 	case 0:	/* all commands */
1811 		/* count number of commands */
1812 		for (count = 0, oip = opcode_info_arr;
1813 		     oip->num_attached != 0xff; ++oip) {
1814 			if (F_INV_OP & oip->flags)
1815 				continue;
1816 			count += (oip->num_attached + 1);
1817 		}
1818 		bump = rctd ? 20 : 8;
1819 		put_unaligned_be32(count * bump, arr);
1820 		for (offset = 4, oip = opcode_info_arr;
1821 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1822 			if (F_INV_OP & oip->flags)
1823 				continue;
1824 			na = oip->num_attached;
1825 			arr[offset] = oip->opcode;
1826 			put_unaligned_be16(oip->sa, arr + offset + 2);
1827 			if (rctd)
1828 				arr[offset + 5] |= 0x2;
1829 			if (FF_SA & oip->flags)
1830 				arr[offset + 5] |= 0x1;
1831 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1832 			if (rctd)
1833 				put_unaligned_be16(0xa, arr + offset + 8);
1834 			r_oip = oip;
1835 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1836 				if (F_INV_OP & oip->flags)
1837 					continue;
1838 				offset += bump;
1839 				arr[offset] = oip->opcode;
1840 				put_unaligned_be16(oip->sa, arr + offset + 2);
1841 				if (rctd)
1842 					arr[offset + 5] |= 0x2;
1843 				if (FF_SA & oip->flags)
1844 					arr[offset + 5] |= 0x1;
1845 				put_unaligned_be16(oip->len_mask[0],
1846 						   arr + offset + 6);
1847 				if (rctd)
1848 					put_unaligned_be16(0xa,
1849 							   arr + offset + 8);
1850 			}
1851 			oip = r_oip;
1852 			offset += bump;
1853 		}
1854 		break;
1855 	case 1:	/* one command: opcode only */
1856 	case 2:	/* one command: opcode plus service action */
1857 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1858 		sdeb_i = opcode_ind_arr[req_opcode];
1859 		oip = &opcode_info_arr[sdeb_i];
1860 		if (F_INV_OP & oip->flags) {
1861 			supp = 1;
1862 			offset = 4;
1863 		} else {
1864 			if (1 == reporting_opts) {
1865 				if (FF_SA & oip->flags) {
1866 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1867 							     2, 2);
1868 					kfree(arr);
1869 					return check_condition_result;
1870 				}
1871 				req_sa = 0;
1872 			} else if (2 == reporting_opts &&
1873 				   0 == (FF_SA & oip->flags)) {
1874 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1875 				kfree(arr);	/* point at requested sa */
1876 				return check_condition_result;
1877 			}
1878 			if (0 == (FF_SA & oip->flags) &&
1879 			    req_opcode == oip->opcode)
1880 				supp = 3;
1881 			else if (0 == (FF_SA & oip->flags)) {
1882 				na = oip->num_attached;
1883 				for (k = 0, oip = oip->arrp; k < na;
1884 				     ++k, ++oip) {
1885 					if (req_opcode == oip->opcode)
1886 						break;
1887 				}
1888 				supp = (k >= na) ? 1 : 3;
1889 			} else if (req_sa != oip->sa) {
1890 				na = oip->num_attached;
1891 				for (k = 0, oip = oip->arrp; k < na;
1892 				     ++k, ++oip) {
1893 					if (req_sa == oip->sa)
1894 						break;
1895 				}
1896 				supp = (k >= na) ? 1 : 3;
1897 			} else
1898 				supp = 3;
1899 			if (3 == supp) {
1900 				u = oip->len_mask[0];
1901 				put_unaligned_be16(u, arr + 2);
1902 				arr[4] = oip->opcode;
1903 				for (k = 1; k < u; ++k)
1904 					arr[4 + k] = (k < 16) ?
1905 						 oip->len_mask[k] : 0xff;
1906 				offset = 4 + u;
1907 			} else
1908 				offset = 4;
1909 		}
1910 		arr[1] = (rctd ? 0x80 : 0) | supp;
1911 		if (rctd) {
1912 			put_unaligned_be16(0xa, arr + offset);
1913 			offset += 12;
1914 		}
1915 		break;
1916 	default:
1917 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1918 		kfree(arr);
1919 		return check_condition_result;
1920 	}
1921 	offset = (offset < a_len) ? offset : a_len;
1922 	len = (offset < alloc_len) ? offset : alloc_len;
1923 	errsts = fill_from_dev_buffer(scp, arr, len);
1924 	kfree(arr);
1925 	return errsts;
1926 }
1927 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1928 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1929 			  struct sdebug_dev_info *devip)
1930 {
1931 	bool repd;
1932 	u32 alloc_len, len;
1933 	u8 arr[16];
1934 	u8 *cmd = scp->cmnd;
1935 
1936 	memset(arr, 0, sizeof(arr));
1937 	repd = !!(cmd[2] & 0x80);
1938 	alloc_len = get_unaligned_be32(cmd + 6);
1939 	if (alloc_len < 4) {
1940 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1941 		return check_condition_result;
1942 	}
1943 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1944 	arr[1] = 0x1;		/* ITNRS */
1945 	if (repd) {
1946 		arr[3] = 0xc;
1947 		len = 16;
1948 	} else
1949 		len = 4;
1950 
1951 	len = (len < alloc_len) ? len : alloc_len;
1952 	return fill_from_dev_buffer(scp, arr, len);
1953 }
1954 
1955 /* <<Following mode page info copied from ST318451LW>> */
1956 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)1957 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1958 {	/* Read-Write Error Recovery page for mode_sense */
1959 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1960 					5, 0, 0xff, 0xff};
1961 
1962 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1963 	if (1 == pcontrol)
1964 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1965 	return sizeof(err_recov_pg);
1966 }
1967 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)1968 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1969 { 	/* Disconnect-Reconnect page for mode_sense */
1970 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1971 					 0, 0, 0, 0, 0, 0, 0, 0};
1972 
1973 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1974 	if (1 == pcontrol)
1975 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1976 	return sizeof(disconnect_pg);
1977 }
1978 
resp_format_pg(unsigned char * p,int pcontrol,int target)1979 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1980 {       /* Format device page for mode_sense */
1981 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1982 				     0, 0, 0, 0, 0, 0, 0, 0,
1983 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1984 
1985 	memcpy(p, format_pg, sizeof(format_pg));
1986 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1987 	put_unaligned_be16(sdebug_sector_size, p + 12);
1988 	if (sdebug_removable)
1989 		p[20] |= 0x20; /* should agree with INQUIRY */
1990 	if (1 == pcontrol)
1991 		memset(p + 2, 0, sizeof(format_pg) - 2);
1992 	return sizeof(format_pg);
1993 }
1994 
1995 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1996 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1997 				     0, 0, 0, 0};
1998 
resp_caching_pg(unsigned char * p,int pcontrol,int target)1999 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2000 { 	/* Caching page for mode_sense */
2001 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2002 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2003 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2004 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2005 
2006 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2007 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2008 	memcpy(p, caching_pg, sizeof(caching_pg));
2009 	if (1 == pcontrol)
2010 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2011 	else if (2 == pcontrol)
2012 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2013 	return sizeof(caching_pg);
2014 }
2015 
2016 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2017 				    0, 0, 0x2, 0x4b};
2018 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2019 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2020 { 	/* Control mode page for mode_sense */
2021 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2022 					0, 0, 0, 0};
2023 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2024 				     0, 0, 0x2, 0x4b};
2025 
2026 	if (sdebug_dsense)
2027 		ctrl_m_pg[2] |= 0x4;
2028 	else
2029 		ctrl_m_pg[2] &= ~0x4;
2030 
2031 	if (sdebug_ato)
2032 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2033 
2034 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2035 	if (1 == pcontrol)
2036 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2037 	else if (2 == pcontrol)
2038 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2039 	return sizeof(ctrl_m_pg);
2040 }
2041 
2042 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2043 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2044 {	/* Informational Exceptions control mode page for mode_sense */
2045 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2046 				       0, 0, 0x0, 0x0};
2047 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2048 				      0, 0, 0x0, 0x0};
2049 
2050 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2051 	if (1 == pcontrol)
2052 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2053 	else if (2 == pcontrol)
2054 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2055 	return sizeof(iec_m_pg);
2056 }
2057 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2058 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2059 {	/* SAS SSP mode page - short format for mode_sense */
2060 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2061 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2062 
2063 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2064 	if (1 == pcontrol)
2065 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2066 	return sizeof(sas_sf_m_pg);
2067 }
2068 
2069 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2070 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2071 			      int target_dev_id)
2072 {	/* SAS phy control and discover mode page for mode_sense */
2073 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2074 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2075 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2076 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2077 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2078 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2079 		    0, 0, 0, 0, 0, 0, 0, 0,
2080 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2081 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2082 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2083 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2084 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2085 		    0, 0, 0, 0, 0, 0, 0, 0,
2086 		};
2087 	int port_a, port_b;
2088 
2089 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2090 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2091 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2092 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2093 	port_a = target_dev_id + 1;
2094 	port_b = port_a + 1;
2095 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2096 	put_unaligned_be32(port_a, p + 20);
2097 	put_unaligned_be32(port_b, p + 48 + 20);
2098 	if (1 == pcontrol)
2099 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2100 	return sizeof(sas_pcd_m_pg);
2101 }
2102 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2103 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2104 {	/* SAS SSP shared protocol specific port mode subpage */
2105 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2106 		    0, 0, 0, 0, 0, 0, 0, 0,
2107 		};
2108 
2109 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2110 	if (1 == pcontrol)
2111 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2112 	return sizeof(sas_sha_m_pg);
2113 }
2114 
2115 #define SDEBUG_MAX_MSENSE_SZ 256
2116 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2117 static int resp_mode_sense(struct scsi_cmnd *scp,
2118 			   struct sdebug_dev_info *devip)
2119 {
2120 	int pcontrol, pcode, subpcode, bd_len;
2121 	unsigned char dev_spec;
2122 	int alloc_len, offset, len, target_dev_id;
2123 	int target = scp->device->id;
2124 	unsigned char *ap;
2125 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2126 	unsigned char *cmd = scp->cmnd;
2127 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2128 
2129 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2130 	pcontrol = (cmd[2] & 0xc0) >> 6;
2131 	pcode = cmd[2] & 0x3f;
2132 	subpcode = cmd[3];
2133 	msense_6 = (MODE_SENSE == cmd[0]);
2134 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2135 	is_disk = (sdebug_ptype == TYPE_DISK);
2136 	if (is_disk && !dbd)
2137 		bd_len = llbaa ? 16 : 8;
2138 	else
2139 		bd_len = 0;
2140 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2141 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2142 	if (0x3 == pcontrol) {  /* Saving values not supported */
2143 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2144 		return check_condition_result;
2145 	}
2146 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2147 			(devip->target * 1000) - 3;
2148 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2149 	if (is_disk)
2150 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2151 	else
2152 		dev_spec = 0x0;
2153 	if (msense_6) {
2154 		arr[2] = dev_spec;
2155 		arr[3] = bd_len;
2156 		offset = 4;
2157 	} else {
2158 		arr[3] = dev_spec;
2159 		if (16 == bd_len)
2160 			arr[4] = 0x1;	/* set LONGLBA bit */
2161 		arr[7] = bd_len;	/* assume 255 or less */
2162 		offset = 8;
2163 	}
2164 	ap = arr + offset;
2165 	if ((bd_len > 0) && (!sdebug_capacity))
2166 		sdebug_capacity = get_sdebug_capacity();
2167 
2168 	if (8 == bd_len) {
2169 		if (sdebug_capacity > 0xfffffffe)
2170 			put_unaligned_be32(0xffffffff, ap + 0);
2171 		else
2172 			put_unaligned_be32(sdebug_capacity, ap + 0);
2173 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2174 		offset += bd_len;
2175 		ap = arr + offset;
2176 	} else if (16 == bd_len) {
2177 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2178 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2179 		offset += bd_len;
2180 		ap = arr + offset;
2181 	}
2182 
2183 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2184 		/* TODO: Control Extension page */
2185 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2186 		return check_condition_result;
2187 	}
2188 	bad_pcode = false;
2189 
2190 	switch (pcode) {
2191 	case 0x1:	/* Read-Write error recovery page, direct access */
2192 		len = resp_err_recov_pg(ap, pcontrol, target);
2193 		offset += len;
2194 		break;
2195 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2196 		len = resp_disconnect_pg(ap, pcontrol, target);
2197 		offset += len;
2198 		break;
2199 	case 0x3:       /* Format device page, direct access */
2200 		if (is_disk) {
2201 			len = resp_format_pg(ap, pcontrol, target);
2202 			offset += len;
2203 		} else
2204 			bad_pcode = true;
2205 		break;
2206 	case 0x8:	/* Caching page, direct access */
2207 		if (is_disk) {
2208 			len = resp_caching_pg(ap, pcontrol, target);
2209 			offset += len;
2210 		} else
2211 			bad_pcode = true;
2212 		break;
2213 	case 0xa:	/* Control Mode page, all devices */
2214 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2215 		offset += len;
2216 		break;
2217 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2218 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2219 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2220 			return check_condition_result;
2221 		}
2222 		len = 0;
2223 		if ((0x0 == subpcode) || (0xff == subpcode))
2224 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2225 		if ((0x1 == subpcode) || (0xff == subpcode))
2226 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2227 						  target_dev_id);
2228 		if ((0x2 == subpcode) || (0xff == subpcode))
2229 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2230 		offset += len;
2231 		break;
2232 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2233 		len = resp_iec_m_pg(ap, pcontrol, target);
2234 		offset += len;
2235 		break;
2236 	case 0x3f:	/* Read all Mode pages */
2237 		if ((0 == subpcode) || (0xff == subpcode)) {
2238 			len = resp_err_recov_pg(ap, pcontrol, target);
2239 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2240 			if (is_disk) {
2241 				len += resp_format_pg(ap + len, pcontrol,
2242 						      target);
2243 				len += resp_caching_pg(ap + len, pcontrol,
2244 						       target);
2245 			}
2246 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2247 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2248 			if (0xff == subpcode) {
2249 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2250 						  target, target_dev_id);
2251 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2252 			}
2253 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2254 			offset += len;
2255 		} else {
2256 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2257 			return check_condition_result;
2258 		}
2259 		break;
2260 	default:
2261 		bad_pcode = true;
2262 		break;
2263 	}
2264 	if (bad_pcode) {
2265 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2266 		return check_condition_result;
2267 	}
2268 	if (msense_6)
2269 		arr[0] = offset - 1;
2270 	else
2271 		put_unaligned_be16((offset - 2), arr + 0);
2272 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2273 }
2274 
2275 #define SDEBUG_MAX_MSELECT_SZ 512
2276 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2277 static int resp_mode_select(struct scsi_cmnd *scp,
2278 			    struct sdebug_dev_info *devip)
2279 {
2280 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2281 	int param_len, res, mpage;
2282 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2283 	unsigned char *cmd = scp->cmnd;
2284 	int mselect6 = (MODE_SELECT == cmd[0]);
2285 
2286 	memset(arr, 0, sizeof(arr));
2287 	pf = cmd[1] & 0x10;
2288 	sp = cmd[1] & 0x1;
2289 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2290 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2291 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2292 		return check_condition_result;
2293 	}
2294 	res = fetch_to_dev_buffer(scp, arr, param_len);
2295 	if (-1 == res)
2296 		return DID_ERROR << 16;
2297 	else if (sdebug_verbose && (res < param_len))
2298 		sdev_printk(KERN_INFO, scp->device,
2299 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2300 			    __func__, param_len, res);
2301 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2302 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2303 	if (md_len > 2) {
2304 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2305 		return check_condition_result;
2306 	}
2307 	off = bd_len + (mselect6 ? 4 : 8);
2308 	mpage = arr[off] & 0x3f;
2309 	ps = !!(arr[off] & 0x80);
2310 	if (ps) {
2311 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2312 		return check_condition_result;
2313 	}
2314 	spf = !!(arr[off] & 0x40);
2315 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2316 		       (arr[off + 1] + 2);
2317 	if ((pg_len + off) > param_len) {
2318 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2319 				PARAMETER_LIST_LENGTH_ERR, 0);
2320 		return check_condition_result;
2321 	}
2322 	switch (mpage) {
2323 	case 0x8:      /* Caching Mode page */
2324 		if (caching_pg[1] == arr[off + 1]) {
2325 			memcpy(caching_pg + 2, arr + off + 2,
2326 			       sizeof(caching_pg) - 2);
2327 			goto set_mode_changed_ua;
2328 		}
2329 		break;
2330 	case 0xa:      /* Control Mode page */
2331 		if (ctrl_m_pg[1] == arr[off + 1]) {
2332 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2333 			       sizeof(ctrl_m_pg) - 2);
2334 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2335 			goto set_mode_changed_ua;
2336 		}
2337 		break;
2338 	case 0x1c:      /* Informational Exceptions Mode page */
2339 		if (iec_m_pg[1] == arr[off + 1]) {
2340 			memcpy(iec_m_pg + 2, arr + off + 2,
2341 			       sizeof(iec_m_pg) - 2);
2342 			goto set_mode_changed_ua;
2343 		}
2344 		break;
2345 	default:
2346 		break;
2347 	}
2348 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2349 	return check_condition_result;
2350 set_mode_changed_ua:
2351 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2352 	return 0;
2353 }
2354 
resp_temp_l_pg(unsigned char * arr)2355 static int resp_temp_l_pg(unsigned char *arr)
2356 {
2357 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2358 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2359 		};
2360 
2361 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2362 	return sizeof(temp_l_pg);
2363 }
2364 
resp_ie_l_pg(unsigned char * arr)2365 static int resp_ie_l_pg(unsigned char *arr)
2366 {
2367 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2368 		};
2369 
2370 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2371 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2372 		arr[4] = THRESHOLD_EXCEEDED;
2373 		arr[5] = 0xff;
2374 	}
2375 	return sizeof(ie_l_pg);
2376 }
2377 
2378 #define SDEBUG_MAX_LSENSE_SZ 512
2379 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2380 static int resp_log_sense(struct scsi_cmnd *scp,
2381 			  struct sdebug_dev_info *devip)
2382 {
2383 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2384 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2385 	unsigned char *cmd = scp->cmnd;
2386 
2387 	memset(arr, 0, sizeof(arr));
2388 	ppc = cmd[1] & 0x2;
2389 	sp = cmd[1] & 0x1;
2390 	if (ppc || sp) {
2391 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2392 		return check_condition_result;
2393 	}
2394 	pcode = cmd[2] & 0x3f;
2395 	subpcode = cmd[3] & 0xff;
2396 	alloc_len = get_unaligned_be16(cmd + 7);
2397 	arr[0] = pcode;
2398 	if (0 == subpcode) {
2399 		switch (pcode) {
2400 		case 0x0:	/* Supported log pages log page */
2401 			n = 4;
2402 			arr[n++] = 0x0;		/* this page */
2403 			arr[n++] = 0xd;		/* Temperature */
2404 			arr[n++] = 0x2f;	/* Informational exceptions */
2405 			arr[3] = n - 4;
2406 			break;
2407 		case 0xd:	/* Temperature log page */
2408 			arr[3] = resp_temp_l_pg(arr + 4);
2409 			break;
2410 		case 0x2f:	/* Informational exceptions log page */
2411 			arr[3] = resp_ie_l_pg(arr + 4);
2412 			break;
2413 		default:
2414 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2415 			return check_condition_result;
2416 		}
2417 	} else if (0xff == subpcode) {
2418 		arr[0] |= 0x40;
2419 		arr[1] = subpcode;
2420 		switch (pcode) {
2421 		case 0x0:	/* Supported log pages and subpages log page */
2422 			n = 4;
2423 			arr[n++] = 0x0;
2424 			arr[n++] = 0x0;		/* 0,0 page */
2425 			arr[n++] = 0x0;
2426 			arr[n++] = 0xff;	/* this page */
2427 			arr[n++] = 0xd;
2428 			arr[n++] = 0x0;		/* Temperature */
2429 			arr[n++] = 0x2f;
2430 			arr[n++] = 0x0;	/* Informational exceptions */
2431 			arr[3] = n - 4;
2432 			break;
2433 		case 0xd:	/* Temperature subpages */
2434 			n = 4;
2435 			arr[n++] = 0xd;
2436 			arr[n++] = 0x0;		/* Temperature */
2437 			arr[3] = n - 4;
2438 			break;
2439 		case 0x2f:	/* Informational exceptions subpages */
2440 			n = 4;
2441 			arr[n++] = 0x2f;
2442 			arr[n++] = 0x0;		/* Informational exceptions */
2443 			arr[3] = n - 4;
2444 			break;
2445 		default:
2446 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2447 			return check_condition_result;
2448 		}
2449 	} else {
2450 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2451 		return check_condition_result;
2452 	}
2453 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2454 	return fill_from_dev_buffer(scp, arr,
2455 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2456 }
2457 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num)2458 static int check_device_access_params(struct scsi_cmnd *scp,
2459 				      unsigned long long lba, unsigned int num)
2460 {
2461 	if (lba + num > sdebug_capacity) {
2462 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2463 		return check_condition_result;
2464 	}
2465 	/* transfer length excessive (tie in to block limits VPD page) */
2466 	if (num > sdebug_store_sectors) {
2467 		/* needs work to find which cdb byte 'num' comes from */
2468 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2469 		return check_condition_result;
2470 	}
2471 	return 0;
2472 }
2473 
2474 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct scsi_cmnd * scmd,u32 sg_skip,u64 lba,u32 num,bool do_write)2475 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2476 			    u32 num, bool do_write)
2477 {
2478 	int ret;
2479 	u64 block, rest = 0;
2480 	struct scsi_data_buffer *sdb;
2481 	enum dma_data_direction dir;
2482 
2483 	if (do_write) {
2484 		sdb = scsi_out(scmd);
2485 		dir = DMA_TO_DEVICE;
2486 		write_since_sync = true;
2487 	} else {
2488 		sdb = scsi_in(scmd);
2489 		dir = DMA_FROM_DEVICE;
2490 	}
2491 
2492 	if (!sdb->length)
2493 		return 0;
2494 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2495 		return -1;
2496 
2497 	block = do_div(lba, sdebug_store_sectors);
2498 	if (block + num > sdebug_store_sectors)
2499 		rest = block + num - sdebug_store_sectors;
2500 
2501 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2502 		   fake_storep + (block * sdebug_sector_size),
2503 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2504 	if (ret != (num - rest) * sdebug_sector_size)
2505 		return ret;
2506 
2507 	if (rest) {
2508 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2509 			    fake_storep, rest * sdebug_sector_size,
2510 			    sg_skip + ((num - rest) * sdebug_sector_size),
2511 			    do_write);
2512 	}
2513 
2514 	return ret;
2515 }
2516 
2517 /* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2518  * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2519  * return false. */
comp_write_worker(u64 lba,u32 num,const u8 * arr)2520 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2521 {
2522 	bool res;
2523 	u64 block, rest = 0;
2524 	u32 store_blks = sdebug_store_sectors;
2525 	u32 lb_size = sdebug_sector_size;
2526 
2527 	block = do_div(lba, store_blks);
2528 	if (block + num > store_blks)
2529 		rest = block + num - store_blks;
2530 
2531 	res = !memcmp(fake_storep + (block * lb_size), arr,
2532 		      (num - rest) * lb_size);
2533 	if (!res)
2534 		return res;
2535 	if (rest)
2536 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2537 			     rest * lb_size);
2538 	if (!res)
2539 		return res;
2540 	arr += num * lb_size;
2541 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2542 	if (rest)
2543 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2544 		       rest * lb_size);
2545 	return res;
2546 }
2547 
dif_compute_csum(const void * buf,int len)2548 static __be16 dif_compute_csum(const void *buf, int len)
2549 {
2550 	__be16 csum;
2551 
2552 	if (sdebug_guard)
2553 		csum = (__force __be16)ip_compute_csum(buf, len);
2554 	else
2555 		csum = cpu_to_be16(crc_t10dif(buf, len));
2556 
2557 	return csum;
2558 }
2559 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)2560 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2561 		      sector_t sector, u32 ei_lba)
2562 {
2563 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2564 
2565 	if (sdt->guard_tag != csum) {
2566 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2567 			(unsigned long)sector,
2568 			be16_to_cpu(sdt->guard_tag),
2569 			be16_to_cpu(csum));
2570 		return 0x01;
2571 	}
2572 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2573 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2574 		pr_err("REF check failed on sector %lu\n",
2575 			(unsigned long)sector);
2576 		return 0x03;
2577 	}
2578 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2579 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2580 		pr_err("REF check failed on sector %lu\n",
2581 			(unsigned long)sector);
2582 		return 0x03;
2583 	}
2584 	return 0;
2585 }
2586 
dif_copy_prot(struct scsi_cmnd * SCpnt,sector_t sector,unsigned int sectors,bool read)2587 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2588 			  unsigned int sectors, bool read)
2589 {
2590 	size_t resid;
2591 	void *paddr;
2592 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2593 	struct sg_mapping_iter miter;
2594 
2595 	/* Bytes of protection data to copy into sgl */
2596 	resid = sectors * sizeof(*dif_storep);
2597 
2598 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2599 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2600 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2601 
2602 	while (sg_miter_next(&miter) && resid > 0) {
2603 		size_t len = min(miter.length, resid);
2604 		void *start = dif_store(sector);
2605 		size_t rest = 0;
2606 
2607 		if (dif_store_end < start + len)
2608 			rest = start + len - dif_store_end;
2609 
2610 		paddr = miter.addr;
2611 
2612 		if (read)
2613 			memcpy(paddr, start, len - rest);
2614 		else
2615 			memcpy(start, paddr, len - rest);
2616 
2617 		if (rest) {
2618 			if (read)
2619 				memcpy(paddr + len - rest, dif_storep, rest);
2620 			else
2621 				memcpy(dif_storep, paddr + len - rest, rest);
2622 		}
2623 
2624 		sector += len / sizeof(*dif_storep);
2625 		resid -= len;
2626 	}
2627 	sg_miter_stop(&miter);
2628 }
2629 
prot_verify_read(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)2630 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2631 			    unsigned int sectors, u32 ei_lba)
2632 {
2633 	unsigned int i;
2634 	struct t10_pi_tuple *sdt;
2635 	sector_t sector;
2636 
2637 	for (i = 0; i < sectors; i++, ei_lba++) {
2638 		int ret;
2639 
2640 		sector = start_sec + i;
2641 		sdt = dif_store(sector);
2642 
2643 		if (sdt->app_tag == cpu_to_be16(0xffff))
2644 			continue;
2645 
2646 		ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2647 		if (ret) {
2648 			dif_errors++;
2649 			return ret;
2650 		}
2651 	}
2652 
2653 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2654 	dix_reads++;
2655 
2656 	return 0;
2657 }
2658 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2659 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2660 {
2661 	u8 *cmd = scp->cmnd;
2662 	struct sdebug_queued_cmd *sqcp;
2663 	u64 lba;
2664 	u32 num;
2665 	u32 ei_lba;
2666 	unsigned long iflags;
2667 	int ret;
2668 	bool check_prot;
2669 
2670 	switch (cmd[0]) {
2671 	case READ_16:
2672 		ei_lba = 0;
2673 		lba = get_unaligned_be64(cmd + 2);
2674 		num = get_unaligned_be32(cmd + 10);
2675 		check_prot = true;
2676 		break;
2677 	case READ_10:
2678 		ei_lba = 0;
2679 		lba = get_unaligned_be32(cmd + 2);
2680 		num = get_unaligned_be16(cmd + 7);
2681 		check_prot = true;
2682 		break;
2683 	case READ_6:
2684 		ei_lba = 0;
2685 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2686 		      (u32)(cmd[1] & 0x1f) << 16;
2687 		num = (0 == cmd[4]) ? 256 : cmd[4];
2688 		check_prot = true;
2689 		break;
2690 	case READ_12:
2691 		ei_lba = 0;
2692 		lba = get_unaligned_be32(cmd + 2);
2693 		num = get_unaligned_be32(cmd + 6);
2694 		check_prot = true;
2695 		break;
2696 	case XDWRITEREAD_10:
2697 		ei_lba = 0;
2698 		lba = get_unaligned_be32(cmd + 2);
2699 		num = get_unaligned_be16(cmd + 7);
2700 		check_prot = false;
2701 		break;
2702 	default:	/* assume READ(32) */
2703 		lba = get_unaligned_be64(cmd + 12);
2704 		ei_lba = get_unaligned_be32(cmd + 20);
2705 		num = get_unaligned_be32(cmd + 28);
2706 		check_prot = false;
2707 		break;
2708 	}
2709 	if (unlikely(have_dif_prot && check_prot)) {
2710 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2711 		    (cmd[1] & 0xe0)) {
2712 			mk_sense_invalid_opcode(scp);
2713 			return check_condition_result;
2714 		}
2715 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2716 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2717 		    (cmd[1] & 0xe0) == 0)
2718 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2719 				    "to DIF device\n");
2720 	}
2721 	if (unlikely(sdebug_any_injecting_opt)) {
2722 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2723 
2724 		if (sqcp) {
2725 			if (sqcp->inj_short)
2726 				num /= 2;
2727 		}
2728 	} else
2729 		sqcp = NULL;
2730 
2731 	/* inline check_device_access_params() */
2732 	if (unlikely(lba + num > sdebug_capacity)) {
2733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2734 		return check_condition_result;
2735 	}
2736 	/* transfer length excessive (tie in to block limits VPD page) */
2737 	if (unlikely(num > sdebug_store_sectors)) {
2738 		/* needs work to find which cdb byte 'num' comes from */
2739 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2740 		return check_condition_result;
2741 	}
2742 
2743 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2744 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2745 		     ((lba + num) > sdebug_medium_error_start))) {
2746 		/* claim unrecoverable read error */
2747 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2748 		/* set info field and valid bit for fixed descriptor */
2749 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2750 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2751 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2752 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2753 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2754 		}
2755 		scsi_set_resid(scp, scsi_bufflen(scp));
2756 		return check_condition_result;
2757 	}
2758 
2759 	read_lock_irqsave(&atomic_rw, iflags);
2760 
2761 	/* DIX + T10 DIF */
2762 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2763 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2764 
2765 		if (prot_ret) {
2766 			read_unlock_irqrestore(&atomic_rw, iflags);
2767 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2768 			return illegal_condition_result;
2769 		}
2770 	}
2771 
2772 	ret = do_device_access(scp, 0, lba, num, false);
2773 	read_unlock_irqrestore(&atomic_rw, iflags);
2774 	if (unlikely(ret == -1))
2775 		return DID_ERROR << 16;
2776 
2777 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2778 
2779 	if (unlikely(sqcp)) {
2780 		if (sqcp->inj_recovered) {
2781 			mk_sense_buffer(scp, RECOVERED_ERROR,
2782 					THRESHOLD_EXCEEDED, 0);
2783 			return check_condition_result;
2784 		} else if (sqcp->inj_transport) {
2785 			mk_sense_buffer(scp, ABORTED_COMMAND,
2786 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2787 			return check_condition_result;
2788 		} else if (sqcp->inj_dif) {
2789 			/* Logical block guard check failed */
2790 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2791 			return illegal_condition_result;
2792 		} else if (sqcp->inj_dix) {
2793 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2794 			return illegal_condition_result;
2795 		}
2796 	}
2797 	return 0;
2798 }
2799 
dump_sector(unsigned char * buf,int len)2800 static void dump_sector(unsigned char *buf, int len)
2801 {
2802 	int i, j, n;
2803 
2804 	pr_err(">>> Sector Dump <<<\n");
2805 	for (i = 0 ; i < len ; i += 16) {
2806 		char b[128];
2807 
2808 		for (j = 0, n = 0; j < 16; j++) {
2809 			unsigned char c = buf[i+j];
2810 
2811 			if (c >= 0x20 && c < 0x7e)
2812 				n += scnprintf(b + n, sizeof(b) - n,
2813 					       " %c ", buf[i+j]);
2814 			else
2815 				n += scnprintf(b + n, sizeof(b) - n,
2816 					       "%02x ", buf[i+j]);
2817 		}
2818 		pr_err("%04d: %s\n", i, b);
2819 	}
2820 }
2821 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)2822 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2823 			     unsigned int sectors, u32 ei_lba)
2824 {
2825 	int ret;
2826 	struct t10_pi_tuple *sdt;
2827 	void *daddr;
2828 	sector_t sector = start_sec;
2829 	int ppage_offset;
2830 	int dpage_offset;
2831 	struct sg_mapping_iter diter;
2832 	struct sg_mapping_iter piter;
2833 
2834 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2835 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2836 
2837 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2838 			scsi_prot_sg_count(SCpnt),
2839 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2840 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2841 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2842 
2843 	/* For each protection page */
2844 	while (sg_miter_next(&piter)) {
2845 		dpage_offset = 0;
2846 		if (WARN_ON(!sg_miter_next(&diter))) {
2847 			ret = 0x01;
2848 			goto out;
2849 		}
2850 
2851 		for (ppage_offset = 0; ppage_offset < piter.length;
2852 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2853 			/* If we're at the end of the current
2854 			 * data page advance to the next one
2855 			 */
2856 			if (dpage_offset >= diter.length) {
2857 				if (WARN_ON(!sg_miter_next(&diter))) {
2858 					ret = 0x01;
2859 					goto out;
2860 				}
2861 				dpage_offset = 0;
2862 			}
2863 
2864 			sdt = piter.addr + ppage_offset;
2865 			daddr = diter.addr + dpage_offset;
2866 
2867 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2868 			if (ret) {
2869 				dump_sector(daddr, sdebug_sector_size);
2870 				goto out;
2871 			}
2872 
2873 			sector++;
2874 			ei_lba++;
2875 			dpage_offset += sdebug_sector_size;
2876 		}
2877 		diter.consumed = dpage_offset;
2878 		sg_miter_stop(&diter);
2879 	}
2880 	sg_miter_stop(&piter);
2881 
2882 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2883 	dix_writes++;
2884 
2885 	return 0;
2886 
2887 out:
2888 	dif_errors++;
2889 	sg_miter_stop(&diter);
2890 	sg_miter_stop(&piter);
2891 	return ret;
2892 }
2893 
lba_to_map_index(sector_t lba)2894 static unsigned long lba_to_map_index(sector_t lba)
2895 {
2896 	if (sdebug_unmap_alignment)
2897 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2898 	sector_div(lba, sdebug_unmap_granularity);
2899 	return lba;
2900 }
2901 
map_index_to_lba(unsigned long index)2902 static sector_t map_index_to_lba(unsigned long index)
2903 {
2904 	sector_t lba = index * sdebug_unmap_granularity;
2905 
2906 	if (sdebug_unmap_alignment)
2907 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2908 	return lba;
2909 }
2910 
map_state(sector_t lba,unsigned int * num)2911 static unsigned int map_state(sector_t lba, unsigned int *num)
2912 {
2913 	sector_t end;
2914 	unsigned int mapped;
2915 	unsigned long index;
2916 	unsigned long next;
2917 
2918 	index = lba_to_map_index(lba);
2919 	mapped = test_bit(index, map_storep);
2920 
2921 	if (mapped)
2922 		next = find_next_zero_bit(map_storep, map_size, index);
2923 	else
2924 		next = find_next_bit(map_storep, map_size, index);
2925 
2926 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2927 	*num = end - lba;
2928 	return mapped;
2929 }
2930 
map_region(sector_t lba,unsigned int len)2931 static void map_region(sector_t lba, unsigned int len)
2932 {
2933 	sector_t end = lba + len;
2934 
2935 	while (lba < end) {
2936 		unsigned long index = lba_to_map_index(lba);
2937 
2938 		if (index < map_size)
2939 			set_bit(index, map_storep);
2940 
2941 		lba = map_index_to_lba(index + 1);
2942 	}
2943 }
2944 
unmap_region(sector_t lba,unsigned int len)2945 static void unmap_region(sector_t lba, unsigned int len)
2946 {
2947 	sector_t end = lba + len;
2948 
2949 	while (lba < end) {
2950 		unsigned long index = lba_to_map_index(lba);
2951 
2952 		if (lba == map_index_to_lba(index) &&
2953 		    lba + sdebug_unmap_granularity <= end &&
2954 		    index < map_size) {
2955 			clear_bit(index, map_storep);
2956 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2957 				memset(fake_storep +
2958 				       lba * sdebug_sector_size,
2959 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2960 				       sdebug_sector_size *
2961 				       sdebug_unmap_granularity);
2962 			}
2963 			if (dif_storep) {
2964 				memset(dif_storep + lba, 0xff,
2965 				       sizeof(*dif_storep) *
2966 				       sdebug_unmap_granularity);
2967 			}
2968 		}
2969 		lba = map_index_to_lba(index + 1);
2970 	}
2971 }
2972 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2973 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2974 {
2975 	u8 *cmd = scp->cmnd;
2976 	u64 lba;
2977 	u32 num;
2978 	u32 ei_lba;
2979 	unsigned long iflags;
2980 	int ret;
2981 	bool check_prot;
2982 
2983 	switch (cmd[0]) {
2984 	case WRITE_16:
2985 		ei_lba = 0;
2986 		lba = get_unaligned_be64(cmd + 2);
2987 		num = get_unaligned_be32(cmd + 10);
2988 		check_prot = true;
2989 		break;
2990 	case WRITE_10:
2991 		ei_lba = 0;
2992 		lba = get_unaligned_be32(cmd + 2);
2993 		num = get_unaligned_be16(cmd + 7);
2994 		check_prot = true;
2995 		break;
2996 	case WRITE_6:
2997 		ei_lba = 0;
2998 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2999 		      (u32)(cmd[1] & 0x1f) << 16;
3000 		num = (0 == cmd[4]) ? 256 : cmd[4];
3001 		check_prot = true;
3002 		break;
3003 	case WRITE_12:
3004 		ei_lba = 0;
3005 		lba = get_unaligned_be32(cmd + 2);
3006 		num = get_unaligned_be32(cmd + 6);
3007 		check_prot = true;
3008 		break;
3009 	case 0x53:	/* XDWRITEREAD(10) */
3010 		ei_lba = 0;
3011 		lba = get_unaligned_be32(cmd + 2);
3012 		num = get_unaligned_be16(cmd + 7);
3013 		check_prot = false;
3014 		break;
3015 	default:	/* assume WRITE(32) */
3016 		lba = get_unaligned_be64(cmd + 12);
3017 		ei_lba = get_unaligned_be32(cmd + 20);
3018 		num = get_unaligned_be32(cmd + 28);
3019 		check_prot = false;
3020 		break;
3021 	}
3022 	if (unlikely(have_dif_prot && check_prot)) {
3023 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3024 		    (cmd[1] & 0xe0)) {
3025 			mk_sense_invalid_opcode(scp);
3026 			return check_condition_result;
3027 		}
3028 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3029 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3030 		    (cmd[1] & 0xe0) == 0)
3031 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3032 				    "to DIF device\n");
3033 	}
3034 
3035 	/* inline check_device_access_params() */
3036 	if (unlikely(lba + num > sdebug_capacity)) {
3037 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3038 		return check_condition_result;
3039 	}
3040 	/* transfer length excessive (tie in to block limits VPD page) */
3041 	if (unlikely(num > sdebug_store_sectors)) {
3042 		/* needs work to find which cdb byte 'num' comes from */
3043 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3044 		return check_condition_result;
3045 	}
3046 
3047 	write_lock_irqsave(&atomic_rw, iflags);
3048 
3049 	/* DIX + T10 DIF */
3050 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3051 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3052 
3053 		if (prot_ret) {
3054 			write_unlock_irqrestore(&atomic_rw, iflags);
3055 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3056 			return illegal_condition_result;
3057 		}
3058 	}
3059 
3060 	ret = do_device_access(scp, 0, lba, num, true);
3061 	if (unlikely(scsi_debug_lbp()))
3062 		map_region(lba, num);
3063 	write_unlock_irqrestore(&atomic_rw, iflags);
3064 	if (unlikely(-1 == ret))
3065 		return DID_ERROR << 16;
3066 	else if (unlikely(sdebug_verbose &&
3067 			  (ret < (num * sdebug_sector_size))))
3068 		sdev_printk(KERN_INFO, scp->device,
3069 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3070 			    my_name, num * sdebug_sector_size, ret);
3071 
3072 	if (unlikely(sdebug_any_injecting_opt)) {
3073 		struct sdebug_queued_cmd *sqcp =
3074 				(struct sdebug_queued_cmd *)scp->host_scribble;
3075 
3076 		if (sqcp) {
3077 			if (sqcp->inj_recovered) {
3078 				mk_sense_buffer(scp, RECOVERED_ERROR,
3079 						THRESHOLD_EXCEEDED, 0);
3080 				return check_condition_result;
3081 			} else if (sqcp->inj_dif) {
3082 				/* Logical block guard check failed */
3083 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3084 				return illegal_condition_result;
3085 			} else if (sqcp->inj_dix) {
3086 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3087 				return illegal_condition_result;
3088 			}
3089 		}
3090 	}
3091 	return 0;
3092 }
3093 
3094 /*
3095  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3096  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3097  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3098 static int resp_write_scat(struct scsi_cmnd *scp,
3099 			   struct sdebug_dev_info *devip)
3100 {
3101 	u8 *cmd = scp->cmnd;
3102 	u8 *lrdp = NULL;
3103 	u8 *up;
3104 	u8 wrprotect;
3105 	u16 lbdof, num_lrd, k;
3106 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3107 	u32 lb_size = sdebug_sector_size;
3108 	u32 ei_lba;
3109 	u64 lba;
3110 	unsigned long iflags;
3111 	int ret, res;
3112 	bool is_16;
3113 	static const u32 lrd_size = 32; /* + parameter list header size */
3114 
3115 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3116 		is_16 = false;
3117 		wrprotect = (cmd[10] >> 5) & 0x7;
3118 		lbdof = get_unaligned_be16(cmd + 12);
3119 		num_lrd = get_unaligned_be16(cmd + 16);
3120 		bt_len = get_unaligned_be32(cmd + 28);
3121 	} else {        /* that leaves WRITE SCATTERED(16) */
3122 		is_16 = true;
3123 		wrprotect = (cmd[2] >> 5) & 0x7;
3124 		lbdof = get_unaligned_be16(cmd + 4);
3125 		num_lrd = get_unaligned_be16(cmd + 8);
3126 		bt_len = get_unaligned_be32(cmd + 10);
3127 		if (unlikely(have_dif_prot)) {
3128 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3129 			    wrprotect) {
3130 				mk_sense_invalid_opcode(scp);
3131 				return illegal_condition_result;
3132 			}
3133 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3134 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3135 			     wrprotect == 0)
3136 				sdev_printk(KERN_ERR, scp->device,
3137 					    "Unprotected WR to DIF device\n");
3138 		}
3139 	}
3140 	if ((num_lrd == 0) || (bt_len == 0))
3141 		return 0;       /* T10 says these do-nothings are not errors */
3142 	if (lbdof == 0) {
3143 		if (sdebug_verbose)
3144 			sdev_printk(KERN_INFO, scp->device,
3145 				"%s: %s: LB Data Offset field bad\n",
3146 				my_name, __func__);
3147 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3148 		return illegal_condition_result;
3149 	}
3150 	lbdof_blen = lbdof * lb_size;
3151 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3152 		if (sdebug_verbose)
3153 			sdev_printk(KERN_INFO, scp->device,
3154 				"%s: %s: LBA range descriptors don't fit\n",
3155 				my_name, __func__);
3156 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3157 		return illegal_condition_result;
3158 	}
3159 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3160 	if (lrdp == NULL)
3161 		return SCSI_MLQUEUE_HOST_BUSY;
3162 	if (sdebug_verbose)
3163 		sdev_printk(KERN_INFO, scp->device,
3164 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3165 			my_name, __func__, lbdof_blen);
3166 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3167 	if (res == -1) {
3168 		ret = DID_ERROR << 16;
3169 		goto err_out;
3170 	}
3171 
3172 	write_lock_irqsave(&atomic_rw, iflags);
3173 	sg_off = lbdof_blen;
3174 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3175 	cum_lb = 0;
3176 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3177 		lba = get_unaligned_be64(up + 0);
3178 		num = get_unaligned_be32(up + 8);
3179 		if (sdebug_verbose)
3180 			sdev_printk(KERN_INFO, scp->device,
3181 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3182 				my_name, __func__, k, lba, num, sg_off);
3183 		if (num == 0)
3184 			continue;
3185 		ret = check_device_access_params(scp, lba, num);
3186 		if (ret)
3187 			goto err_out_unlock;
3188 		num_by = num * lb_size;
3189 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3190 
3191 		if ((cum_lb + num) > bt_len) {
3192 			if (sdebug_verbose)
3193 				sdev_printk(KERN_INFO, scp->device,
3194 				    "%s: %s: sum of blocks > data provided\n",
3195 				    my_name, __func__);
3196 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3197 					0);
3198 			ret = illegal_condition_result;
3199 			goto err_out_unlock;
3200 		}
3201 
3202 		/* DIX + T10 DIF */
3203 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3204 			int prot_ret = prot_verify_write(scp, lba, num,
3205 							 ei_lba);
3206 
3207 			if (prot_ret) {
3208 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3209 						prot_ret);
3210 				ret = illegal_condition_result;
3211 				goto err_out_unlock;
3212 			}
3213 		}
3214 
3215 		ret = do_device_access(scp, sg_off, lba, num, true);
3216 		if (unlikely(scsi_debug_lbp()))
3217 			map_region(lba, num);
3218 		if (unlikely(-1 == ret)) {
3219 			ret = DID_ERROR << 16;
3220 			goto err_out_unlock;
3221 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3222 			sdev_printk(KERN_INFO, scp->device,
3223 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3224 			    my_name, num_by, ret);
3225 
3226 		if (unlikely(sdebug_any_injecting_opt)) {
3227 			struct sdebug_queued_cmd *sqcp =
3228 				(struct sdebug_queued_cmd *)scp->host_scribble;
3229 
3230 			if (sqcp) {
3231 				if (sqcp->inj_recovered) {
3232 					mk_sense_buffer(scp, RECOVERED_ERROR,
3233 							THRESHOLD_EXCEEDED, 0);
3234 					ret = illegal_condition_result;
3235 					goto err_out_unlock;
3236 				} else if (sqcp->inj_dif) {
3237 					/* Logical block guard check failed */
3238 					mk_sense_buffer(scp, ABORTED_COMMAND,
3239 							0x10, 1);
3240 					ret = illegal_condition_result;
3241 					goto err_out_unlock;
3242 				} else if (sqcp->inj_dix) {
3243 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3244 							0x10, 1);
3245 					ret = illegal_condition_result;
3246 					goto err_out_unlock;
3247 				}
3248 			}
3249 		}
3250 		sg_off += num_by;
3251 		cum_lb += num;
3252 	}
3253 	ret = 0;
3254 err_out_unlock:
3255 	write_unlock_irqrestore(&atomic_rw, iflags);
3256 err_out:
3257 	kfree(lrdp);
3258 	return ret;
3259 }
3260 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)3261 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3262 			   u32 ei_lba, bool unmap, bool ndob)
3263 {
3264 	int ret;
3265 	unsigned long iflags;
3266 	unsigned long long i;
3267 	u32 lb_size = sdebug_sector_size;
3268 	u64 block, lbaa;
3269 	u8 *fs1p;
3270 
3271 	ret = check_device_access_params(scp, lba, num);
3272 	if (ret)
3273 		return ret;
3274 
3275 	write_lock_irqsave(&atomic_rw, iflags);
3276 
3277 	if (unmap && scsi_debug_lbp()) {
3278 		unmap_region(lba, num);
3279 		goto out;
3280 	}
3281 	lbaa = lba;
3282 	block = do_div(lbaa, sdebug_store_sectors);
3283 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3284 	fs1p = fake_storep + (block * lb_size);
3285 	if (ndob) {
3286 		memset(fs1p, 0, lb_size);
3287 		ret = 0;
3288 	} else
3289 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3290 
3291 	if (-1 == ret) {
3292 		write_unlock_irqrestore(&atomic_rw, iflags);
3293 		return DID_ERROR << 16;
3294 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3295 		sdev_printk(KERN_INFO, scp->device,
3296 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3297 			    my_name, "write same", lb_size, ret);
3298 
3299 	/* Copy first sector to remaining blocks */
3300 	for (i = 1 ; i < num ; i++) {
3301 		lbaa = lba + i;
3302 		block = do_div(lbaa, sdebug_store_sectors);
3303 		memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3304 	}
3305 	if (scsi_debug_lbp())
3306 		map_region(lba, num);
3307 out:
3308 	write_unlock_irqrestore(&atomic_rw, iflags);
3309 
3310 	return 0;
3311 }
3312 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3313 static int resp_write_same_10(struct scsi_cmnd *scp,
3314 			      struct sdebug_dev_info *devip)
3315 {
3316 	u8 *cmd = scp->cmnd;
3317 	u32 lba;
3318 	u16 num;
3319 	u32 ei_lba = 0;
3320 	bool unmap = false;
3321 
3322 	if (cmd[1] & 0x8) {
3323 		if (sdebug_lbpws10 == 0) {
3324 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3325 			return check_condition_result;
3326 		} else
3327 			unmap = true;
3328 	}
3329 	lba = get_unaligned_be32(cmd + 2);
3330 	num = get_unaligned_be16(cmd + 7);
3331 	if (num > sdebug_write_same_length) {
3332 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3333 		return check_condition_result;
3334 	}
3335 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3336 }
3337 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3338 static int resp_write_same_16(struct scsi_cmnd *scp,
3339 			      struct sdebug_dev_info *devip)
3340 {
3341 	u8 *cmd = scp->cmnd;
3342 	u64 lba;
3343 	u32 num;
3344 	u32 ei_lba = 0;
3345 	bool unmap = false;
3346 	bool ndob = false;
3347 
3348 	if (cmd[1] & 0x8) {	/* UNMAP */
3349 		if (sdebug_lbpws == 0) {
3350 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3351 			return check_condition_result;
3352 		} else
3353 			unmap = true;
3354 	}
3355 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3356 		ndob = true;
3357 	lba = get_unaligned_be64(cmd + 2);
3358 	num = get_unaligned_be32(cmd + 10);
3359 	if (num > sdebug_write_same_length) {
3360 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3361 		return check_condition_result;
3362 	}
3363 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3364 }
3365 
3366 /* Note the mode field is in the same position as the (lower) service action
3367  * field. For the Report supported operation codes command, SPC-4 suggests
3368  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3369 static int resp_write_buffer(struct scsi_cmnd *scp,
3370 			     struct sdebug_dev_info *devip)
3371 {
3372 	u8 *cmd = scp->cmnd;
3373 	struct scsi_device *sdp = scp->device;
3374 	struct sdebug_dev_info *dp;
3375 	u8 mode;
3376 
3377 	mode = cmd[1] & 0x1f;
3378 	switch (mode) {
3379 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3380 		/* set UAs on this device only */
3381 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3382 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3383 		break;
3384 	case 0x5:	/* download MC, save and ACT */
3385 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3386 		break;
3387 	case 0x6:	/* download MC with offsets and ACT */
3388 		/* set UAs on most devices (LUs) in this target */
3389 		list_for_each_entry(dp,
3390 				    &devip->sdbg_host->dev_info_list,
3391 				    dev_list)
3392 			if (dp->target == sdp->id) {
3393 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3394 				if (devip != dp)
3395 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3396 						dp->uas_bm);
3397 			}
3398 		break;
3399 	case 0x7:	/* download MC with offsets, save, and ACT */
3400 		/* set UA on all devices (LUs) in this target */
3401 		list_for_each_entry(dp,
3402 				    &devip->sdbg_host->dev_info_list,
3403 				    dev_list)
3404 			if (dp->target == sdp->id)
3405 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3406 					dp->uas_bm);
3407 		break;
3408 	default:
3409 		/* do nothing for this command for other mode values */
3410 		break;
3411 	}
3412 	return 0;
3413 }
3414 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3415 static int resp_comp_write(struct scsi_cmnd *scp,
3416 			   struct sdebug_dev_info *devip)
3417 {
3418 	u8 *cmd = scp->cmnd;
3419 	u8 *arr;
3420 	u8 *fake_storep_hold;
3421 	u64 lba;
3422 	u32 dnum;
3423 	u32 lb_size = sdebug_sector_size;
3424 	u8 num;
3425 	unsigned long iflags;
3426 	int ret;
3427 	int retval = 0;
3428 
3429 	lba = get_unaligned_be64(cmd + 2);
3430 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3431 	if (0 == num)
3432 		return 0;	/* degenerate case, not an error */
3433 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3434 	    (cmd[1] & 0xe0)) {
3435 		mk_sense_invalid_opcode(scp);
3436 		return check_condition_result;
3437 	}
3438 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3439 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3440 	    (cmd[1] & 0xe0) == 0)
3441 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3442 			    "to DIF device\n");
3443 
3444 	/* inline check_device_access_params() */
3445 	if (lba + num > sdebug_capacity) {
3446 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3447 		return check_condition_result;
3448 	}
3449 	/* transfer length excessive (tie in to block limits VPD page) */
3450 	if (num > sdebug_store_sectors) {
3451 		/* needs work to find which cdb byte 'num' comes from */
3452 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3453 		return check_condition_result;
3454 	}
3455 	dnum = 2 * num;
3456 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3457 	if (NULL == arr) {
3458 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3459 				INSUFF_RES_ASCQ);
3460 		return check_condition_result;
3461 	}
3462 
3463 	write_lock_irqsave(&atomic_rw, iflags);
3464 
3465 	/* trick do_device_access() to fetch both compare and write buffers
3466 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3467 	fake_storep_hold = fake_storep;
3468 	fake_storep = arr;
3469 	ret = do_device_access(scp, 0, 0, dnum, true);
3470 	fake_storep = fake_storep_hold;
3471 	if (ret == -1) {
3472 		retval = DID_ERROR << 16;
3473 		goto cleanup;
3474 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3475 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3476 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3477 			    dnum * lb_size, ret);
3478 	if (!comp_write_worker(lba, num, arr)) {
3479 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3480 		retval = check_condition_result;
3481 		goto cleanup;
3482 	}
3483 	if (scsi_debug_lbp())
3484 		map_region(lba, num);
3485 cleanup:
3486 	write_unlock_irqrestore(&atomic_rw, iflags);
3487 	kfree(arr);
3488 	return retval;
3489 }
3490 
3491 struct unmap_block_desc {
3492 	__be64	lba;
3493 	__be32	blocks;
3494 	__be32	__reserved;
3495 };
3496 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3497 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3498 {
3499 	unsigned char *buf;
3500 	struct unmap_block_desc *desc;
3501 	unsigned int i, payload_len, descriptors;
3502 	int ret;
3503 	unsigned long iflags;
3504 
3505 
3506 	if (!scsi_debug_lbp())
3507 		return 0;	/* fib and say its done */
3508 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3509 	BUG_ON(scsi_bufflen(scp) != payload_len);
3510 
3511 	descriptors = (payload_len - 8) / 16;
3512 	if (descriptors > sdebug_unmap_max_desc) {
3513 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3514 		return check_condition_result;
3515 	}
3516 
3517 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3518 	if (!buf) {
3519 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3520 				INSUFF_RES_ASCQ);
3521 		return check_condition_result;
3522 	}
3523 
3524 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3525 
3526 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3527 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3528 
3529 	desc = (void *)&buf[8];
3530 
3531 	write_lock_irqsave(&atomic_rw, iflags);
3532 
3533 	for (i = 0 ; i < descriptors ; i++) {
3534 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3535 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3536 
3537 		ret = check_device_access_params(scp, lba, num);
3538 		if (ret)
3539 			goto out;
3540 
3541 		unmap_region(lba, num);
3542 	}
3543 
3544 	ret = 0;
3545 
3546 out:
3547 	write_unlock_irqrestore(&atomic_rw, iflags);
3548 	kfree(buf);
3549 
3550 	return ret;
3551 }
3552 
3553 #define SDEBUG_GET_LBA_STATUS_LEN 32
3554 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3555 static int resp_get_lba_status(struct scsi_cmnd *scp,
3556 			       struct sdebug_dev_info *devip)
3557 {
3558 	u8 *cmd = scp->cmnd;
3559 	u64 lba;
3560 	u32 alloc_len, mapped, num;
3561 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3562 	int ret;
3563 
3564 	lba = get_unaligned_be64(cmd + 2);
3565 	alloc_len = get_unaligned_be32(cmd + 10);
3566 
3567 	if (alloc_len < 24)
3568 		return 0;
3569 
3570 	ret = check_device_access_params(scp, lba, 1);
3571 	if (ret)
3572 		return ret;
3573 
3574 	if (scsi_debug_lbp())
3575 		mapped = map_state(lba, &num);
3576 	else {
3577 		mapped = 1;
3578 		/* following just in case virtual_gb changed */
3579 		sdebug_capacity = get_sdebug_capacity();
3580 		if (sdebug_capacity - lba <= 0xffffffff)
3581 			num = sdebug_capacity - lba;
3582 		else
3583 			num = 0xffffffff;
3584 	}
3585 
3586 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3587 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3588 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3589 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3590 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3591 
3592 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3593 }
3594 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3595 static int resp_sync_cache(struct scsi_cmnd *scp,
3596 			   struct sdebug_dev_info *devip)
3597 {
3598 	int res = 0;
3599 	u64 lba;
3600 	u32 num_blocks;
3601 	u8 *cmd = scp->cmnd;
3602 
3603 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
3604 		lba = get_unaligned_be32(cmd + 2);
3605 		num_blocks = get_unaligned_be16(cmd + 7);
3606 	} else {				/* SYNCHRONIZE_CACHE(16) */
3607 		lba = get_unaligned_be64(cmd + 2);
3608 		num_blocks = get_unaligned_be32(cmd + 10);
3609 	}
3610 	if (lba + num_blocks > sdebug_capacity) {
3611 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3612 		return check_condition_result;
3613 	}
3614 	if (!write_since_sync || cmd[1] & 0x2)
3615 		res = SDEG_RES_IMMED_MASK;
3616 	else		/* delay if write_since_sync and IMMED clear */
3617 		write_since_sync = false;
3618 	return res;
3619 }
3620 
3621 #define RL_BUCKET_ELEMS 8
3622 
3623 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3624  * (W-LUN), the normal Linux scanning logic does not associate it with a
3625  * device (e.g. /dev/sg7). The following magic will make that association:
3626  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3627  * where <n> is a host number. If there are multiple targets in a host then
3628  * the above will associate a W-LUN to each target. To only get a W-LUN
3629  * for target 2, then use "echo '- 2 49409' > scan" .
3630  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3631 static int resp_report_luns(struct scsi_cmnd *scp,
3632 			    struct sdebug_dev_info *devip)
3633 {
3634 	unsigned char *cmd = scp->cmnd;
3635 	unsigned int alloc_len;
3636 	unsigned char select_report;
3637 	u64 lun;
3638 	struct scsi_lun *lun_p;
3639 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3640 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3641 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3642 	unsigned int tlun_cnt;	/* total LUN count */
3643 	unsigned int rlen;	/* response length (in bytes) */
3644 	int k, j, n, res;
3645 	unsigned int off_rsp = 0;
3646 	const int sz_lun = sizeof(struct scsi_lun);
3647 
3648 	clear_luns_changed_on_target(devip);
3649 
3650 	select_report = cmd[2];
3651 	alloc_len = get_unaligned_be32(cmd + 6);
3652 
3653 	if (alloc_len < 4) {
3654 		pr_err("alloc len too small %d\n", alloc_len);
3655 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3656 		return check_condition_result;
3657 	}
3658 
3659 	switch (select_report) {
3660 	case 0:		/* all LUNs apart from W-LUNs */
3661 		lun_cnt = sdebug_max_luns;
3662 		wlun_cnt = 0;
3663 		break;
3664 	case 1:		/* only W-LUNs */
3665 		lun_cnt = 0;
3666 		wlun_cnt = 1;
3667 		break;
3668 	case 2:		/* all LUNs */
3669 		lun_cnt = sdebug_max_luns;
3670 		wlun_cnt = 1;
3671 		break;
3672 	case 0x10:	/* only administrative LUs */
3673 	case 0x11:	/* see SPC-5 */
3674 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3675 	default:
3676 		pr_debug("select report invalid %d\n", select_report);
3677 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3678 		return check_condition_result;
3679 	}
3680 
3681 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3682 		--lun_cnt;
3683 
3684 	tlun_cnt = lun_cnt + wlun_cnt;
3685 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3686 	scsi_set_resid(scp, scsi_bufflen(scp));
3687 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3688 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3689 
3690 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3691 	lun = sdebug_no_lun_0 ? 1 : 0;
3692 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3693 		memset(arr, 0, sizeof(arr));
3694 		lun_p = (struct scsi_lun *)&arr[0];
3695 		if (k == 0) {
3696 			put_unaligned_be32(rlen, &arr[0]);
3697 			++lun_p;
3698 			j = 1;
3699 		}
3700 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3701 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3702 				break;
3703 			int_to_scsilun(lun++, lun_p);
3704 		}
3705 		if (j < RL_BUCKET_ELEMS)
3706 			break;
3707 		n = j * sz_lun;
3708 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3709 		if (res)
3710 			return res;
3711 		off_rsp += n;
3712 	}
3713 	if (wlun_cnt) {
3714 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3715 		++j;
3716 	}
3717 	if (j > 0)
3718 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3719 	return res;
3720 }
3721 
resp_xdwriteread(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,struct sdebug_dev_info * devip)3722 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3723 			    unsigned int num, struct sdebug_dev_info *devip)
3724 {
3725 	int j;
3726 	unsigned char *kaddr, *buf;
3727 	unsigned int offset;
3728 	struct scsi_data_buffer *sdb = scsi_in(scp);
3729 	struct sg_mapping_iter miter;
3730 
3731 	/* better not to use temporary buffer. */
3732 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3733 	if (!buf) {
3734 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3735 				INSUFF_RES_ASCQ);
3736 		return check_condition_result;
3737 	}
3738 
3739 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3740 
3741 	offset = 0;
3742 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3743 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3744 
3745 	while (sg_miter_next(&miter)) {
3746 		kaddr = miter.addr;
3747 		for (j = 0; j < miter.length; j++)
3748 			*(kaddr + j) ^= *(buf + offset + j);
3749 
3750 		offset += miter.length;
3751 	}
3752 	sg_miter_stop(&miter);
3753 	kfree(buf);
3754 
3755 	return 0;
3756 }
3757 
resp_xdwriteread_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3758 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3759 			       struct sdebug_dev_info *devip)
3760 {
3761 	u8 *cmd = scp->cmnd;
3762 	u64 lba;
3763 	u32 num;
3764 	int errsts;
3765 
3766 	if (!scsi_bidi_cmnd(scp)) {
3767 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3768 				INSUFF_RES_ASCQ);
3769 		return check_condition_result;
3770 	}
3771 	errsts = resp_read_dt0(scp, devip);
3772 	if (errsts)
3773 		return errsts;
3774 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3775 		errsts = resp_write_dt0(scp, devip);
3776 		if (errsts)
3777 			return errsts;
3778 	}
3779 	lba = get_unaligned_be32(cmd + 2);
3780 	num = get_unaligned_be16(cmd + 7);
3781 	return resp_xdwriteread(scp, lba, num, devip);
3782 }
3783 
get_queue(struct scsi_cmnd * cmnd)3784 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3785 {
3786 	u32 tag = blk_mq_unique_tag(cmnd->request);
3787 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3788 
3789 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3790 	if (WARN_ON_ONCE(hwq >= submit_queues))
3791 		hwq = 0;
3792 	return sdebug_q_arr + hwq;
3793 }
3794 
3795 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)3796 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3797 {
3798 	bool aborted = sd_dp->aborted;
3799 	int qc_idx;
3800 	int retiring = 0;
3801 	unsigned long iflags;
3802 	struct sdebug_queue *sqp;
3803 	struct sdebug_queued_cmd *sqcp;
3804 	struct scsi_cmnd *scp;
3805 	struct sdebug_dev_info *devip;
3806 
3807 	sd_dp->defer_t = SDEB_DEFER_NONE;
3808 	if (unlikely(aborted))
3809 		sd_dp->aborted = false;
3810 	qc_idx = sd_dp->qc_idx;
3811 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3812 	if (sdebug_statistics) {
3813 		atomic_inc(&sdebug_completions);
3814 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3815 			atomic_inc(&sdebug_miss_cpus);
3816 	}
3817 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3818 		pr_err("wild qc_idx=%d\n", qc_idx);
3819 		return;
3820 	}
3821 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3822 	sqcp = &sqp->qc_arr[qc_idx];
3823 	scp = sqcp->a_cmnd;
3824 	if (unlikely(scp == NULL)) {
3825 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3826 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3827 		       sd_dp->sqa_idx, qc_idx);
3828 		return;
3829 	}
3830 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3831 	if (likely(devip))
3832 		atomic_dec(&devip->num_in_q);
3833 	else
3834 		pr_err("devip=NULL\n");
3835 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3836 		retiring = 1;
3837 
3838 	sqcp->a_cmnd = NULL;
3839 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3840 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3841 		pr_err("Unexpected completion\n");
3842 		return;
3843 	}
3844 
3845 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3846 		int k, retval;
3847 
3848 		retval = atomic_read(&retired_max_queue);
3849 		if (qc_idx >= retval) {
3850 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3851 			pr_err("index %d too large\n", retval);
3852 			return;
3853 		}
3854 		k = find_last_bit(sqp->in_use_bm, retval);
3855 		if ((k < sdebug_max_queue) || (k == retval))
3856 			atomic_set(&retired_max_queue, 0);
3857 		else
3858 			atomic_set(&retired_max_queue, k + 1);
3859 	}
3860 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3861 	if (unlikely(aborted)) {
3862 		if (sdebug_verbose)
3863 			pr_info("bypassing scsi_done() due to aborted cmd\n");
3864 		return;
3865 	}
3866 	scp->scsi_done(scp); /* callback to mid level */
3867 }
3868 
3869 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)3870 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3871 {
3872 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3873 						  hrt);
3874 	sdebug_q_cmd_complete(sd_dp);
3875 	return HRTIMER_NORESTART;
3876 }
3877 
3878 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)3879 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3880 {
3881 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3882 						  ew.work);
3883 	sdebug_q_cmd_complete(sd_dp);
3884 }
3885 
3886 static bool got_shared_uuid;
3887 static uuid_t shared_uuid;
3888 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)3889 static struct sdebug_dev_info *sdebug_device_create(
3890 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3891 {
3892 	struct sdebug_dev_info *devip;
3893 
3894 	devip = kzalloc(sizeof(*devip), flags);
3895 	if (devip) {
3896 		if (sdebug_uuid_ctl == 1)
3897 			uuid_gen(&devip->lu_name);
3898 		else if (sdebug_uuid_ctl == 2) {
3899 			if (got_shared_uuid)
3900 				devip->lu_name = shared_uuid;
3901 			else {
3902 				uuid_gen(&shared_uuid);
3903 				got_shared_uuid = true;
3904 				devip->lu_name = shared_uuid;
3905 			}
3906 		}
3907 		devip->sdbg_host = sdbg_host;
3908 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3909 	}
3910 	return devip;
3911 }
3912 
find_build_dev_info(struct scsi_device * sdev)3913 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3914 {
3915 	struct sdebug_host_info *sdbg_host;
3916 	struct sdebug_dev_info *open_devip = NULL;
3917 	struct sdebug_dev_info *devip;
3918 
3919 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3920 	if (!sdbg_host) {
3921 		pr_err("Host info NULL\n");
3922 		return NULL;
3923 	}
3924 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3925 		if ((devip->used) && (devip->channel == sdev->channel) &&
3926 		    (devip->target == sdev->id) &&
3927 		    (devip->lun == sdev->lun))
3928 			return devip;
3929 		else {
3930 			if ((!devip->used) && (!open_devip))
3931 				open_devip = devip;
3932 		}
3933 	}
3934 	if (!open_devip) { /* try and make a new one */
3935 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3936 		if (!open_devip) {
3937 			pr_err("out of memory at line %d\n", __LINE__);
3938 			return NULL;
3939 		}
3940 	}
3941 
3942 	open_devip->channel = sdev->channel;
3943 	open_devip->target = sdev->id;
3944 	open_devip->lun = sdev->lun;
3945 	open_devip->sdbg_host = sdbg_host;
3946 	atomic_set(&open_devip->num_in_q, 0);
3947 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3948 	open_devip->used = true;
3949 	return open_devip;
3950 }
3951 
scsi_debug_slave_alloc(struct scsi_device * sdp)3952 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3953 {
3954 	if (sdebug_verbose)
3955 		pr_info("slave_alloc <%u %u %u %llu>\n",
3956 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3957 	blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
3958 	return 0;
3959 }
3960 
scsi_debug_slave_configure(struct scsi_device * sdp)3961 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3962 {
3963 	struct sdebug_dev_info *devip =
3964 			(struct sdebug_dev_info *)sdp->hostdata;
3965 
3966 	if (sdebug_verbose)
3967 		pr_info("slave_configure <%u %u %u %llu>\n",
3968 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3969 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3970 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3971 	if (devip == NULL) {
3972 		devip = find_build_dev_info(sdp);
3973 		if (devip == NULL)
3974 			return 1;  /* no resources, will be marked offline */
3975 	}
3976 	sdp->hostdata = devip;
3977 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3978 	if (sdebug_no_uld)
3979 		sdp->no_uld_attach = 1;
3980 	config_cdb_len(sdp);
3981 	return 0;
3982 }
3983 
scsi_debug_slave_destroy(struct scsi_device * sdp)3984 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3985 {
3986 	struct sdebug_dev_info *devip =
3987 		(struct sdebug_dev_info *)sdp->hostdata;
3988 
3989 	if (sdebug_verbose)
3990 		pr_info("slave_destroy <%u %u %u %llu>\n",
3991 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3992 	if (devip) {
3993 		/* make this slot available for re-use */
3994 		devip->used = false;
3995 		sdp->hostdata = NULL;
3996 	}
3997 }
3998 
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)3999 static void stop_qc_helper(struct sdebug_defer *sd_dp,
4000 			   enum sdeb_defer_type defer_t)
4001 {
4002 	if (!sd_dp)
4003 		return;
4004 	if (defer_t == SDEB_DEFER_HRT)
4005 		hrtimer_cancel(&sd_dp->hrt);
4006 	else if (defer_t == SDEB_DEFER_WQ)
4007 		cancel_work_sync(&sd_dp->ew.work);
4008 }
4009 
4010 /* If @cmnd found deletes its timer or work queue and returns true; else
4011    returns false */
stop_queued_cmnd(struct scsi_cmnd * cmnd)4012 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4013 {
4014 	unsigned long iflags;
4015 	int j, k, qmax, r_qmax;
4016 	enum sdeb_defer_type l_defer_t;
4017 	struct sdebug_queue *sqp;
4018 	struct sdebug_queued_cmd *sqcp;
4019 	struct sdebug_dev_info *devip;
4020 	struct sdebug_defer *sd_dp;
4021 
4022 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4023 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4024 		qmax = sdebug_max_queue;
4025 		r_qmax = atomic_read(&retired_max_queue);
4026 		if (r_qmax > qmax)
4027 			qmax = r_qmax;
4028 		for (k = 0; k < qmax; ++k) {
4029 			if (test_bit(k, sqp->in_use_bm)) {
4030 				sqcp = &sqp->qc_arr[k];
4031 				if (cmnd != sqcp->a_cmnd)
4032 					continue;
4033 				/* found */
4034 				devip = (struct sdebug_dev_info *)
4035 						cmnd->device->hostdata;
4036 				if (devip)
4037 					atomic_dec(&devip->num_in_q);
4038 				sqcp->a_cmnd = NULL;
4039 				sd_dp = sqcp->sd_dp;
4040 				if (sd_dp) {
4041 					l_defer_t = sd_dp->defer_t;
4042 					sd_dp->defer_t = SDEB_DEFER_NONE;
4043 				} else
4044 					l_defer_t = SDEB_DEFER_NONE;
4045 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4046 				stop_qc_helper(sd_dp, l_defer_t);
4047 				clear_bit(k, sqp->in_use_bm);
4048 				return true;
4049 			}
4050 		}
4051 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4052 	}
4053 	return false;
4054 }
4055 
4056 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)4057 static void stop_all_queued(void)
4058 {
4059 	unsigned long iflags;
4060 	int j, k;
4061 	enum sdeb_defer_type l_defer_t;
4062 	struct sdebug_queue *sqp;
4063 	struct sdebug_queued_cmd *sqcp;
4064 	struct sdebug_dev_info *devip;
4065 	struct sdebug_defer *sd_dp;
4066 
4067 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4068 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4069 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4070 			if (test_bit(k, sqp->in_use_bm)) {
4071 				sqcp = &sqp->qc_arr[k];
4072 				if (sqcp->a_cmnd == NULL)
4073 					continue;
4074 				devip = (struct sdebug_dev_info *)
4075 					sqcp->a_cmnd->device->hostdata;
4076 				if (devip)
4077 					atomic_dec(&devip->num_in_q);
4078 				sqcp->a_cmnd = NULL;
4079 				sd_dp = sqcp->sd_dp;
4080 				if (sd_dp) {
4081 					l_defer_t = sd_dp->defer_t;
4082 					sd_dp->defer_t = SDEB_DEFER_NONE;
4083 				} else
4084 					l_defer_t = SDEB_DEFER_NONE;
4085 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4086 				stop_qc_helper(sd_dp, l_defer_t);
4087 				clear_bit(k, sqp->in_use_bm);
4088 				spin_lock_irqsave(&sqp->qc_lock, iflags);
4089 			}
4090 		}
4091 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4092 	}
4093 }
4094 
4095 /* Free queued command memory on heap */
free_all_queued(void)4096 static void free_all_queued(void)
4097 {
4098 	int j, k;
4099 	struct sdebug_queue *sqp;
4100 	struct sdebug_queued_cmd *sqcp;
4101 
4102 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4103 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4104 			sqcp = &sqp->qc_arr[k];
4105 			kfree(sqcp->sd_dp);
4106 			sqcp->sd_dp = NULL;
4107 		}
4108 	}
4109 }
4110 
scsi_debug_abort(struct scsi_cmnd * SCpnt)4111 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4112 {
4113 	bool ok;
4114 
4115 	++num_aborts;
4116 	if (SCpnt) {
4117 		ok = stop_queued_cmnd(SCpnt);
4118 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4119 			sdev_printk(KERN_INFO, SCpnt->device,
4120 				    "%s: command%s found\n", __func__,
4121 				    ok ? "" : " not");
4122 	}
4123 	return SUCCESS;
4124 }
4125 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)4126 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4127 {
4128 	++num_dev_resets;
4129 	if (SCpnt && SCpnt->device) {
4130 		struct scsi_device *sdp = SCpnt->device;
4131 		struct sdebug_dev_info *devip =
4132 				(struct sdebug_dev_info *)sdp->hostdata;
4133 
4134 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4135 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4136 		if (devip)
4137 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4138 	}
4139 	return SUCCESS;
4140 }
4141 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)4142 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4143 {
4144 	struct sdebug_host_info *sdbg_host;
4145 	struct sdebug_dev_info *devip;
4146 	struct scsi_device *sdp;
4147 	struct Scsi_Host *hp;
4148 	int k = 0;
4149 
4150 	++num_target_resets;
4151 	if (!SCpnt)
4152 		goto lie;
4153 	sdp = SCpnt->device;
4154 	if (!sdp)
4155 		goto lie;
4156 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4157 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4158 	hp = sdp->host;
4159 	if (!hp)
4160 		goto lie;
4161 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4162 	if (sdbg_host) {
4163 		list_for_each_entry(devip,
4164 				    &sdbg_host->dev_info_list,
4165 				    dev_list)
4166 			if (devip->target == sdp->id) {
4167 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4168 				++k;
4169 			}
4170 	}
4171 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4172 		sdev_printk(KERN_INFO, sdp,
4173 			    "%s: %d device(s) found in target\n", __func__, k);
4174 lie:
4175 	return SUCCESS;
4176 }
4177 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)4178 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4179 {
4180 	struct sdebug_host_info *sdbg_host;
4181 	struct sdebug_dev_info *devip;
4182 	struct scsi_device *sdp;
4183 	struct Scsi_Host *hp;
4184 	int k = 0;
4185 
4186 	++num_bus_resets;
4187 	if (!(SCpnt && SCpnt->device))
4188 		goto lie;
4189 	sdp = SCpnt->device;
4190 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4191 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4192 	hp = sdp->host;
4193 	if (hp) {
4194 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4195 		if (sdbg_host) {
4196 			list_for_each_entry(devip,
4197 					    &sdbg_host->dev_info_list,
4198 					    dev_list) {
4199 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4200 				++k;
4201 			}
4202 		}
4203 	}
4204 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4205 		sdev_printk(KERN_INFO, sdp,
4206 			    "%s: %d device(s) found in host\n", __func__, k);
4207 lie:
4208 	return SUCCESS;
4209 }
4210 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)4211 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4212 {
4213 	struct sdebug_host_info *sdbg_host;
4214 	struct sdebug_dev_info *devip;
4215 	int k = 0;
4216 
4217 	++num_host_resets;
4218 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4219 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4220 	spin_lock(&sdebug_host_list_lock);
4221 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4222 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4223 				    dev_list) {
4224 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4225 			++k;
4226 		}
4227 	}
4228 	spin_unlock(&sdebug_host_list_lock);
4229 	stop_all_queued();
4230 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4231 		sdev_printk(KERN_INFO, SCpnt->device,
4232 			    "%s: %d device(s) found\n", __func__, k);
4233 	return SUCCESS;
4234 }
4235 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)4236 static void __init sdebug_build_parts(unsigned char *ramp,
4237 				      unsigned long store_size)
4238 {
4239 	struct partition *pp;
4240 	int starts[SDEBUG_MAX_PARTS + 2];
4241 	int sectors_per_part, num_sectors, k;
4242 	int heads_by_sects, start_sec, end_sec;
4243 
4244 	/* assume partition table already zeroed */
4245 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4246 		return;
4247 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4248 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4249 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4250 	}
4251 	num_sectors = (int)sdebug_store_sectors;
4252 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4253 			   / sdebug_num_parts;
4254 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4255 	starts[0] = sdebug_sectors_per;
4256 	for (k = 1; k < sdebug_num_parts; ++k)
4257 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4258 			    * heads_by_sects;
4259 	starts[sdebug_num_parts] = num_sectors;
4260 	starts[sdebug_num_parts + 1] = 0;
4261 
4262 	ramp[510] = 0x55;	/* magic partition markings */
4263 	ramp[511] = 0xAA;
4264 	pp = (struct partition *)(ramp + 0x1be);
4265 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4266 		start_sec = starts[k];
4267 		end_sec = starts[k + 1] - 1;
4268 		pp->boot_ind = 0;
4269 
4270 		pp->cyl = start_sec / heads_by_sects;
4271 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4272 			   / sdebug_sectors_per;
4273 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4274 
4275 		pp->end_cyl = end_sec / heads_by_sects;
4276 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4277 			       / sdebug_sectors_per;
4278 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4279 
4280 		pp->start_sect = cpu_to_le32(start_sec);
4281 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4282 		pp->sys_ind = 0x83;	/* plain Linux partition */
4283 	}
4284 }
4285 
block_unblock_all_queues(bool block)4286 static void block_unblock_all_queues(bool block)
4287 {
4288 	int j;
4289 	struct sdebug_queue *sqp;
4290 
4291 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4292 		atomic_set(&sqp->blocked, (int)block);
4293 }
4294 
4295 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4296  * commands will be processed normally before triggers occur.
4297  */
tweak_cmnd_count(void)4298 static void tweak_cmnd_count(void)
4299 {
4300 	int count, modulo;
4301 
4302 	modulo = abs(sdebug_every_nth);
4303 	if (modulo < 2)
4304 		return;
4305 	block_unblock_all_queues(true);
4306 	count = atomic_read(&sdebug_cmnd_count);
4307 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4308 	block_unblock_all_queues(false);
4309 }
4310 
clear_queue_stats(void)4311 static void clear_queue_stats(void)
4312 {
4313 	atomic_set(&sdebug_cmnd_count, 0);
4314 	atomic_set(&sdebug_completions, 0);
4315 	atomic_set(&sdebug_miss_cpus, 0);
4316 	atomic_set(&sdebug_a_tsf, 0);
4317 }
4318 
setup_inject(struct sdebug_queue * sqp,struct sdebug_queued_cmd * sqcp)4319 static void setup_inject(struct sdebug_queue *sqp,
4320 			 struct sdebug_queued_cmd *sqcp)
4321 {
4322 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4323 		if (sdebug_every_nth > 0)
4324 			sqcp->inj_recovered = sqcp->inj_transport
4325 				= sqcp->inj_dif
4326 				= sqcp->inj_dix = sqcp->inj_short
4327 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4328 		return;
4329 	}
4330 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4331 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4332 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4333 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4334 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4335 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4336 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4337 }
4338 
4339 /* Complete the processing of the thread that queued a SCSI command to this
4340  * driver. It either completes the command by calling cmnd_done() or
4341  * schedules a hr timer or work queue then returns 0. Returns
4342  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4343  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)4344 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4345 			 int scsi_result,
4346 			 int (*pfp)(struct scsi_cmnd *,
4347 				    struct sdebug_dev_info *),
4348 			 int delta_jiff, int ndelay)
4349 {
4350 	unsigned long iflags;
4351 	int k, num_in_q, qdepth, inject;
4352 	struct sdebug_queue *sqp;
4353 	struct sdebug_queued_cmd *sqcp;
4354 	struct scsi_device *sdp;
4355 	struct sdebug_defer *sd_dp;
4356 
4357 	if (unlikely(devip == NULL)) {
4358 		if (scsi_result == 0)
4359 			scsi_result = DID_NO_CONNECT << 16;
4360 		goto respond_in_thread;
4361 	}
4362 	sdp = cmnd->device;
4363 
4364 	if (delta_jiff == 0)
4365 		goto respond_in_thread;
4366 
4367 	/* schedule the response at a later time if resources permit */
4368 	sqp = get_queue(cmnd);
4369 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4370 	if (unlikely(atomic_read(&sqp->blocked))) {
4371 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4372 		return SCSI_MLQUEUE_HOST_BUSY;
4373 	}
4374 	num_in_q = atomic_read(&devip->num_in_q);
4375 	qdepth = cmnd->device->queue_depth;
4376 	inject = 0;
4377 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4378 		if (scsi_result) {
4379 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4380 			goto respond_in_thread;
4381 		} else
4382 			scsi_result = device_qfull_result;
4383 	} else if (unlikely(sdebug_every_nth &&
4384 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4385 			    (scsi_result == 0))) {
4386 		if ((num_in_q == (qdepth - 1)) &&
4387 		    (atomic_inc_return(&sdebug_a_tsf) >=
4388 		     abs(sdebug_every_nth))) {
4389 			atomic_set(&sdebug_a_tsf, 0);
4390 			inject = 1;
4391 			scsi_result = device_qfull_result;
4392 		}
4393 	}
4394 
4395 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4396 	if (unlikely(k >= sdebug_max_queue)) {
4397 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4398 		if (scsi_result)
4399 			goto respond_in_thread;
4400 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4401 			scsi_result = device_qfull_result;
4402 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4403 			sdev_printk(KERN_INFO, sdp,
4404 				    "%s: max_queue=%d exceeded, %s\n",
4405 				    __func__, sdebug_max_queue,
4406 				    (scsi_result ?  "status: TASK SET FULL" :
4407 						    "report: host busy"));
4408 		if (scsi_result)
4409 			goto respond_in_thread;
4410 		else
4411 			return SCSI_MLQUEUE_HOST_BUSY;
4412 	}
4413 	__set_bit(k, sqp->in_use_bm);
4414 	atomic_inc(&devip->num_in_q);
4415 	sqcp = &sqp->qc_arr[k];
4416 	sqcp->a_cmnd = cmnd;
4417 	cmnd->host_scribble = (unsigned char *)sqcp;
4418 	sd_dp = sqcp->sd_dp;
4419 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4420 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4421 		setup_inject(sqp, sqcp);
4422 	if (sd_dp == NULL) {
4423 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4424 		if (sd_dp == NULL)
4425 			return SCSI_MLQUEUE_HOST_BUSY;
4426 	}
4427 
4428 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4429 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
4430 		/*
4431 		 * This is the F_DELAY_OVERR case. No delay.
4432 		 */
4433 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
4434 		delta_jiff = ndelay = 0;
4435 	}
4436 	if (cmnd->result == 0 && scsi_result != 0)
4437 		cmnd->result = scsi_result;
4438 
4439 	if (unlikely(sdebug_verbose && cmnd->result))
4440 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4441 			    __func__, cmnd->result);
4442 
4443 	if (delta_jiff > 0 || ndelay > 0) {
4444 		ktime_t kt;
4445 
4446 		if (delta_jiff > 0) {
4447 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4448 		} else
4449 			kt = ndelay;
4450 		if (!sd_dp->init_hrt) {
4451 			sd_dp->init_hrt = true;
4452 			sqcp->sd_dp = sd_dp;
4453 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4454 				     HRTIMER_MODE_REL_PINNED);
4455 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4456 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4457 			sd_dp->qc_idx = k;
4458 		}
4459 		if (sdebug_statistics)
4460 			sd_dp->issuing_cpu = raw_smp_processor_id();
4461 		sd_dp->defer_t = SDEB_DEFER_HRT;
4462 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4463 	} else {	/* jdelay < 0, use work queue */
4464 		if (!sd_dp->init_wq) {
4465 			sd_dp->init_wq = true;
4466 			sqcp->sd_dp = sd_dp;
4467 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4468 			sd_dp->qc_idx = k;
4469 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4470 		}
4471 		if (sdebug_statistics)
4472 			sd_dp->issuing_cpu = raw_smp_processor_id();
4473 		sd_dp->defer_t = SDEB_DEFER_WQ;
4474 		if (unlikely(sqcp->inj_cmd_abort))
4475 			sd_dp->aborted = true;
4476 		schedule_work(&sd_dp->ew.work);
4477 		if (unlikely(sqcp->inj_cmd_abort)) {
4478 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4479 				    cmnd->request->tag);
4480 			blk_abort_request(cmnd->request);
4481 		}
4482 	}
4483 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4484 		     (scsi_result == device_qfull_result)))
4485 		sdev_printk(KERN_INFO, sdp,
4486 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4487 			    num_in_q, (inject ? "<inject> " : ""),
4488 			    "status: TASK SET FULL");
4489 	return 0;
4490 
4491 respond_in_thread:	/* call back to mid-layer using invocation thread */
4492 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4493 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
4494 	if (cmnd->result == 0 && scsi_result != 0)
4495 		cmnd->result = scsi_result;
4496 	cmnd->scsi_done(cmnd);
4497 	return 0;
4498 }
4499 
4500 /* Note: The following macros create attribute files in the
4501    /sys/module/scsi_debug/parameters directory. Unfortunately this
4502    driver is unaware of a change and cannot trigger auxiliary actions
4503    as it can when the corresponding attribute in the
4504    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4505  */
4506 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4507 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4508 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4509 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4510 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4511 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4512 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4513 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4514 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4515 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4516 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4517 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4518 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4519 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4520 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4521 module_param_string(inq_product, sdebug_inq_product_id,
4522 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4523 module_param_string(inq_rev, sdebug_inq_product_rev,
4524 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4525 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4526 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4527 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4528 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4529 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4530 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4531 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4532 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4533 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4534 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4535 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4536 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4537 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4538 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4539 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4540 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4541 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4542 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4543 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4544 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4545 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4546 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4547 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4548 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4549 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4550 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4551 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4552 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4553 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4554 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4555 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4556 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4557 		   S_IRUGO | S_IWUSR);
4558 module_param_named(write_same_length, sdebug_write_same_length, int,
4559 		   S_IRUGO | S_IWUSR);
4560 
4561 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4562 MODULE_DESCRIPTION("SCSI debug adapter driver");
4563 MODULE_LICENSE("GPL");
4564 MODULE_VERSION(SDEBUG_VERSION);
4565 
4566 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4567 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4568 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4569 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4570 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4571 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4572 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4573 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4574 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4575 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4576 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4577 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4578 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4579 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4580 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4581 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4582 		 SDEBUG_VERSION "\")");
4583 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4584 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4585 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4586 MODULE_PARM_DESC(lbprz,
4587 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4588 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4589 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4590 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4591 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4592 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4593 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4594 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4595 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4596 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4597 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4598 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4599 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4600 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4601 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4602 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4603 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4604 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4605 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4606 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4607 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4608 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4609 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4610 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4611 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4612 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4613 MODULE_PARM_DESC(uuid_ctl,
4614 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4615 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4616 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4617 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4618 
4619 #define SDEBUG_INFO_LEN 256
4620 static char sdebug_info[SDEBUG_INFO_LEN];
4621 
scsi_debug_info(struct Scsi_Host * shp)4622 static const char *scsi_debug_info(struct Scsi_Host *shp)
4623 {
4624 	int k;
4625 
4626 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4627 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4628 	if (k >= (SDEBUG_INFO_LEN - 1))
4629 		return sdebug_info;
4630 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4631 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4632 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4633 		  "statistics", (int)sdebug_statistics);
4634 	return sdebug_info;
4635 }
4636 
4637 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)4638 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4639 				 int length)
4640 {
4641 	char arr[16];
4642 	int opts;
4643 	int minLen = length > 15 ? 15 : length;
4644 
4645 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4646 		return -EACCES;
4647 	memcpy(arr, buffer, minLen);
4648 	arr[minLen] = '\0';
4649 	if (1 != sscanf(arr, "%d", &opts))
4650 		return -EINVAL;
4651 	sdebug_opts = opts;
4652 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4653 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4654 	if (sdebug_every_nth != 0)
4655 		tweak_cmnd_count();
4656 	return length;
4657 }
4658 
4659 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4660  * same for each scsi_debug host (if more than one). Some of the counters
4661  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)4662 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4663 {
4664 	int f, j, l;
4665 	struct sdebug_queue *sqp;
4666 
4667 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4668 		   SDEBUG_VERSION, sdebug_version_date);
4669 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4670 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4671 		   sdebug_opts, sdebug_every_nth);
4672 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4673 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4674 		   sdebug_sector_size, "bytes");
4675 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4676 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4677 		   num_aborts);
4678 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4679 		   num_dev_resets, num_target_resets, num_bus_resets,
4680 		   num_host_resets);
4681 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4682 		   dix_reads, dix_writes, dif_errors);
4683 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4684 		   sdebug_statistics);
4685 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4686 		   atomic_read(&sdebug_cmnd_count),
4687 		   atomic_read(&sdebug_completions),
4688 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4689 		   atomic_read(&sdebug_a_tsf));
4690 
4691 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4692 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4693 		seq_printf(m, "  queue %d:\n", j);
4694 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4695 		if (f != sdebug_max_queue) {
4696 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4697 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4698 				   "first,last bits", f, l);
4699 		}
4700 	}
4701 	return 0;
4702 }
4703 
delay_show(struct device_driver * ddp,char * buf)4704 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4705 {
4706 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4707 }
4708 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4709  * of delay is jiffies.
4710  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)4711 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4712 			   size_t count)
4713 {
4714 	int jdelay, res;
4715 
4716 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4717 		res = count;
4718 		if (sdebug_jdelay != jdelay) {
4719 			int j, k;
4720 			struct sdebug_queue *sqp;
4721 
4722 			block_unblock_all_queues(true);
4723 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4724 			     ++j, ++sqp) {
4725 				k = find_first_bit(sqp->in_use_bm,
4726 						   sdebug_max_queue);
4727 				if (k != sdebug_max_queue) {
4728 					res = -EBUSY;   /* queued commands */
4729 					break;
4730 				}
4731 			}
4732 			if (res > 0) {
4733 				sdebug_jdelay = jdelay;
4734 				sdebug_ndelay = 0;
4735 			}
4736 			block_unblock_all_queues(false);
4737 		}
4738 		return res;
4739 	}
4740 	return -EINVAL;
4741 }
4742 static DRIVER_ATTR_RW(delay);
4743 
ndelay_show(struct device_driver * ddp,char * buf)4744 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4745 {
4746 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4747 }
4748 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4749 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)4750 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4751 			    size_t count)
4752 {
4753 	int ndelay, res;
4754 
4755 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4756 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4757 		res = count;
4758 		if (sdebug_ndelay != ndelay) {
4759 			int j, k;
4760 			struct sdebug_queue *sqp;
4761 
4762 			block_unblock_all_queues(true);
4763 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4764 			     ++j, ++sqp) {
4765 				k = find_first_bit(sqp->in_use_bm,
4766 						   sdebug_max_queue);
4767 				if (k != sdebug_max_queue) {
4768 					res = -EBUSY;   /* queued commands */
4769 					break;
4770 				}
4771 			}
4772 			if (res > 0) {
4773 				sdebug_ndelay = ndelay;
4774 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4775 							: DEF_JDELAY;
4776 			}
4777 			block_unblock_all_queues(false);
4778 		}
4779 		return res;
4780 	}
4781 	return -EINVAL;
4782 }
4783 static DRIVER_ATTR_RW(ndelay);
4784 
opts_show(struct device_driver * ddp,char * buf)4785 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4786 {
4787 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4788 }
4789 
opts_store(struct device_driver * ddp,const char * buf,size_t count)4790 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4791 			  size_t count)
4792 {
4793 	int opts;
4794 	char work[20];
4795 
4796 	if (sscanf(buf, "%10s", work) == 1) {
4797 		if (strncasecmp(work, "0x", 2) == 0) {
4798 			if (kstrtoint(work + 2, 16, &opts) == 0)
4799 				goto opts_done;
4800 		} else {
4801 			if (kstrtoint(work, 10, &opts) == 0)
4802 				goto opts_done;
4803 		}
4804 	}
4805 	return -EINVAL;
4806 opts_done:
4807 	sdebug_opts = opts;
4808 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4809 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4810 	tweak_cmnd_count();
4811 	return count;
4812 }
4813 static DRIVER_ATTR_RW(opts);
4814 
ptype_show(struct device_driver * ddp,char * buf)4815 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4816 {
4817 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4818 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)4819 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4820 			   size_t count)
4821 {
4822 	int n;
4823 
4824 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4825 		sdebug_ptype = n;
4826 		return count;
4827 	}
4828 	return -EINVAL;
4829 }
4830 static DRIVER_ATTR_RW(ptype);
4831 
dsense_show(struct device_driver * ddp,char * buf)4832 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4833 {
4834 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4835 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)4836 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4837 			    size_t count)
4838 {
4839 	int n;
4840 
4841 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4842 		sdebug_dsense = n;
4843 		return count;
4844 	}
4845 	return -EINVAL;
4846 }
4847 static DRIVER_ATTR_RW(dsense);
4848 
fake_rw_show(struct device_driver * ddp,char * buf)4849 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4850 {
4851 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4852 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)4853 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4854 			     size_t count)
4855 {
4856 	int n;
4857 
4858 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4859 		n = (n > 0);
4860 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4861 		if (sdebug_fake_rw != n) {
4862 			if ((0 == n) && (NULL == fake_storep)) {
4863 				unsigned long sz =
4864 					(unsigned long)sdebug_dev_size_mb *
4865 					1048576;
4866 
4867 				fake_storep = vzalloc(sz);
4868 				if (NULL == fake_storep) {
4869 					pr_err("out of memory, 9\n");
4870 					return -ENOMEM;
4871 				}
4872 			}
4873 			sdebug_fake_rw = n;
4874 		}
4875 		return count;
4876 	}
4877 	return -EINVAL;
4878 }
4879 static DRIVER_ATTR_RW(fake_rw);
4880 
no_lun_0_show(struct device_driver * ddp,char * buf)4881 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4882 {
4883 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4884 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)4885 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4886 			      size_t count)
4887 {
4888 	int n;
4889 
4890 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4891 		sdebug_no_lun_0 = n;
4892 		return count;
4893 	}
4894 	return -EINVAL;
4895 }
4896 static DRIVER_ATTR_RW(no_lun_0);
4897 
num_tgts_show(struct device_driver * ddp,char * buf)4898 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4899 {
4900 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4901 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)4902 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4903 			      size_t count)
4904 {
4905 	int n;
4906 
4907 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4908 		sdebug_num_tgts = n;
4909 		sdebug_max_tgts_luns();
4910 		return count;
4911 	}
4912 	return -EINVAL;
4913 }
4914 static DRIVER_ATTR_RW(num_tgts);
4915 
dev_size_mb_show(struct device_driver * ddp,char * buf)4916 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4917 {
4918 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4919 }
4920 static DRIVER_ATTR_RO(dev_size_mb);
4921 
num_parts_show(struct device_driver * ddp,char * buf)4922 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4923 {
4924 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4925 }
4926 static DRIVER_ATTR_RO(num_parts);
4927 
every_nth_show(struct device_driver * ddp,char * buf)4928 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4929 {
4930 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4931 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)4932 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4933 			       size_t count)
4934 {
4935 	int nth;
4936 
4937 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4938 		sdebug_every_nth = nth;
4939 		if (nth && !sdebug_statistics) {
4940 			pr_info("every_nth needs statistics=1, set it\n");
4941 			sdebug_statistics = true;
4942 		}
4943 		tweak_cmnd_count();
4944 		return count;
4945 	}
4946 	return -EINVAL;
4947 }
4948 static DRIVER_ATTR_RW(every_nth);
4949 
max_luns_show(struct device_driver * ddp,char * buf)4950 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4951 {
4952 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4953 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)4954 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4955 			      size_t count)
4956 {
4957 	int n;
4958 	bool changed;
4959 
4960 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4961 		if (n > 256) {
4962 			pr_warn("max_luns can be no more than 256\n");
4963 			return -EINVAL;
4964 		}
4965 		changed = (sdebug_max_luns != n);
4966 		sdebug_max_luns = n;
4967 		sdebug_max_tgts_luns();
4968 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4969 			struct sdebug_host_info *sdhp;
4970 			struct sdebug_dev_info *dp;
4971 
4972 			spin_lock(&sdebug_host_list_lock);
4973 			list_for_each_entry(sdhp, &sdebug_host_list,
4974 					    host_list) {
4975 				list_for_each_entry(dp, &sdhp->dev_info_list,
4976 						    dev_list) {
4977 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4978 						dp->uas_bm);
4979 				}
4980 			}
4981 			spin_unlock(&sdebug_host_list_lock);
4982 		}
4983 		return count;
4984 	}
4985 	return -EINVAL;
4986 }
4987 static DRIVER_ATTR_RW(max_luns);
4988 
max_queue_show(struct device_driver * ddp,char * buf)4989 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4990 {
4991 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4992 }
4993 /* N.B. max_queue can be changed while there are queued commands. In flight
4994  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)4995 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4996 			       size_t count)
4997 {
4998 	int j, n, k, a;
4999 	struct sdebug_queue *sqp;
5000 
5001 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
5002 	    (n <= SDEBUG_CANQUEUE)) {
5003 		block_unblock_all_queues(true);
5004 		k = 0;
5005 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5006 		     ++j, ++sqp) {
5007 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
5008 			if (a > k)
5009 				k = a;
5010 		}
5011 		sdebug_max_queue = n;
5012 		if (k == SDEBUG_CANQUEUE)
5013 			atomic_set(&retired_max_queue, 0);
5014 		else if (k >= n)
5015 			atomic_set(&retired_max_queue, k + 1);
5016 		else
5017 			atomic_set(&retired_max_queue, 0);
5018 		block_unblock_all_queues(false);
5019 		return count;
5020 	}
5021 	return -EINVAL;
5022 }
5023 static DRIVER_ATTR_RW(max_queue);
5024 
no_uld_show(struct device_driver * ddp,char * buf)5025 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
5026 {
5027 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
5028 }
5029 static DRIVER_ATTR_RO(no_uld);
5030 
scsi_level_show(struct device_driver * ddp,char * buf)5031 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
5032 {
5033 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5034 }
5035 static DRIVER_ATTR_RO(scsi_level);
5036 
virtual_gb_show(struct device_driver * ddp,char * buf)5037 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5038 {
5039 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5040 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)5041 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5042 				size_t count)
5043 {
5044 	int n;
5045 	bool changed;
5046 
5047 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5048 		changed = (sdebug_virtual_gb != n);
5049 		sdebug_virtual_gb = n;
5050 		sdebug_capacity = get_sdebug_capacity();
5051 		if (changed) {
5052 			struct sdebug_host_info *sdhp;
5053 			struct sdebug_dev_info *dp;
5054 
5055 			spin_lock(&sdebug_host_list_lock);
5056 			list_for_each_entry(sdhp, &sdebug_host_list,
5057 					    host_list) {
5058 				list_for_each_entry(dp, &sdhp->dev_info_list,
5059 						    dev_list) {
5060 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5061 						dp->uas_bm);
5062 				}
5063 			}
5064 			spin_unlock(&sdebug_host_list_lock);
5065 		}
5066 		return count;
5067 	}
5068 	return -EINVAL;
5069 }
5070 static DRIVER_ATTR_RW(virtual_gb);
5071 
add_host_show(struct device_driver * ddp,char * buf)5072 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5073 {
5074 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
5075 }
5076 
5077 static int sdebug_add_adapter(void);
5078 static void sdebug_remove_adapter(void);
5079 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)5080 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5081 			      size_t count)
5082 {
5083 	int delta_hosts;
5084 
5085 	if (sscanf(buf, "%d", &delta_hosts) != 1)
5086 		return -EINVAL;
5087 	if (delta_hosts > 0) {
5088 		do {
5089 			sdebug_add_adapter();
5090 		} while (--delta_hosts);
5091 	} else if (delta_hosts < 0) {
5092 		do {
5093 			sdebug_remove_adapter();
5094 		} while (++delta_hosts);
5095 	}
5096 	return count;
5097 }
5098 static DRIVER_ATTR_RW(add_host);
5099 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)5100 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5101 {
5102 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5103 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)5104 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5105 				    size_t count)
5106 {
5107 	int n;
5108 
5109 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5110 		sdebug_vpd_use_hostno = n;
5111 		return count;
5112 	}
5113 	return -EINVAL;
5114 }
5115 static DRIVER_ATTR_RW(vpd_use_hostno);
5116 
statistics_show(struct device_driver * ddp,char * buf)5117 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5118 {
5119 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5120 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)5121 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5122 				size_t count)
5123 {
5124 	int n;
5125 
5126 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5127 		if (n > 0)
5128 			sdebug_statistics = true;
5129 		else {
5130 			clear_queue_stats();
5131 			sdebug_statistics = false;
5132 		}
5133 		return count;
5134 	}
5135 	return -EINVAL;
5136 }
5137 static DRIVER_ATTR_RW(statistics);
5138 
sector_size_show(struct device_driver * ddp,char * buf)5139 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5140 {
5141 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5142 }
5143 static DRIVER_ATTR_RO(sector_size);
5144 
submit_queues_show(struct device_driver * ddp,char * buf)5145 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5146 {
5147 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5148 }
5149 static DRIVER_ATTR_RO(submit_queues);
5150 
dix_show(struct device_driver * ddp,char * buf)5151 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5152 {
5153 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5154 }
5155 static DRIVER_ATTR_RO(dix);
5156 
dif_show(struct device_driver * ddp,char * buf)5157 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5158 {
5159 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5160 }
5161 static DRIVER_ATTR_RO(dif);
5162 
guard_show(struct device_driver * ddp,char * buf)5163 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5164 {
5165 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5166 }
5167 static DRIVER_ATTR_RO(guard);
5168 
ato_show(struct device_driver * ddp,char * buf)5169 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5170 {
5171 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5172 }
5173 static DRIVER_ATTR_RO(ato);
5174 
map_show(struct device_driver * ddp,char * buf)5175 static ssize_t map_show(struct device_driver *ddp, char *buf)
5176 {
5177 	ssize_t count;
5178 
5179 	if (!scsi_debug_lbp())
5180 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5181 				 sdebug_store_sectors);
5182 
5183 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5184 			  (int)map_size, map_storep);
5185 	buf[count++] = '\n';
5186 	buf[count] = '\0';
5187 
5188 	return count;
5189 }
5190 static DRIVER_ATTR_RO(map);
5191 
removable_show(struct device_driver * ddp,char * buf)5192 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5193 {
5194 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5195 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)5196 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5197 			       size_t count)
5198 {
5199 	int n;
5200 
5201 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5202 		sdebug_removable = (n > 0);
5203 		return count;
5204 	}
5205 	return -EINVAL;
5206 }
5207 static DRIVER_ATTR_RW(removable);
5208 
host_lock_show(struct device_driver * ddp,char * buf)5209 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5210 {
5211 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5212 }
5213 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)5214 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5215 			       size_t count)
5216 {
5217 	int n;
5218 
5219 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5220 		sdebug_host_lock = (n > 0);
5221 		return count;
5222 	}
5223 	return -EINVAL;
5224 }
5225 static DRIVER_ATTR_RW(host_lock);
5226 
strict_show(struct device_driver * ddp,char * buf)5227 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5228 {
5229 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5230 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)5231 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5232 			    size_t count)
5233 {
5234 	int n;
5235 
5236 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5237 		sdebug_strict = (n > 0);
5238 		return count;
5239 	}
5240 	return -EINVAL;
5241 }
5242 static DRIVER_ATTR_RW(strict);
5243 
uuid_ctl_show(struct device_driver * ddp,char * buf)5244 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5245 {
5246 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5247 }
5248 static DRIVER_ATTR_RO(uuid_ctl);
5249 
cdb_len_show(struct device_driver * ddp,char * buf)5250 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5251 {
5252 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5253 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)5254 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5255 			     size_t count)
5256 {
5257 	int ret, n;
5258 
5259 	ret = kstrtoint(buf, 0, &n);
5260 	if (ret)
5261 		return ret;
5262 	sdebug_cdb_len = n;
5263 	all_config_cdb_len();
5264 	return count;
5265 }
5266 static DRIVER_ATTR_RW(cdb_len);
5267 
5268 
5269 /* Note: The following array creates attribute files in the
5270    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5271    files (over those found in the /sys/module/scsi_debug/parameters
5272    directory) is that auxiliary actions can be triggered when an attribute
5273    is changed. For example see: sdebug_add_host_store() above.
5274  */
5275 
5276 static struct attribute *sdebug_drv_attrs[] = {
5277 	&driver_attr_delay.attr,
5278 	&driver_attr_opts.attr,
5279 	&driver_attr_ptype.attr,
5280 	&driver_attr_dsense.attr,
5281 	&driver_attr_fake_rw.attr,
5282 	&driver_attr_no_lun_0.attr,
5283 	&driver_attr_num_tgts.attr,
5284 	&driver_attr_dev_size_mb.attr,
5285 	&driver_attr_num_parts.attr,
5286 	&driver_attr_every_nth.attr,
5287 	&driver_attr_max_luns.attr,
5288 	&driver_attr_max_queue.attr,
5289 	&driver_attr_no_uld.attr,
5290 	&driver_attr_scsi_level.attr,
5291 	&driver_attr_virtual_gb.attr,
5292 	&driver_attr_add_host.attr,
5293 	&driver_attr_vpd_use_hostno.attr,
5294 	&driver_attr_sector_size.attr,
5295 	&driver_attr_statistics.attr,
5296 	&driver_attr_submit_queues.attr,
5297 	&driver_attr_dix.attr,
5298 	&driver_attr_dif.attr,
5299 	&driver_attr_guard.attr,
5300 	&driver_attr_ato.attr,
5301 	&driver_attr_map.attr,
5302 	&driver_attr_removable.attr,
5303 	&driver_attr_host_lock.attr,
5304 	&driver_attr_ndelay.attr,
5305 	&driver_attr_strict.attr,
5306 	&driver_attr_uuid_ctl.attr,
5307 	&driver_attr_cdb_len.attr,
5308 	NULL,
5309 };
5310 ATTRIBUTE_GROUPS(sdebug_drv);
5311 
5312 static struct device *pseudo_primary;
5313 
scsi_debug_init(void)5314 static int __init scsi_debug_init(void)
5315 {
5316 	unsigned long sz;
5317 	int host_to_add;
5318 	int k;
5319 	int ret;
5320 
5321 	atomic_set(&retired_max_queue, 0);
5322 
5323 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5324 		pr_warn("ndelay must be less than 1 second, ignored\n");
5325 		sdebug_ndelay = 0;
5326 	} else if (sdebug_ndelay > 0)
5327 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5328 
5329 	switch (sdebug_sector_size) {
5330 	case  512:
5331 	case 1024:
5332 	case 2048:
5333 	case 4096:
5334 		break;
5335 	default:
5336 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5337 		return -EINVAL;
5338 	}
5339 
5340 	switch (sdebug_dif) {
5341 	case T10_PI_TYPE0_PROTECTION:
5342 		break;
5343 	case T10_PI_TYPE1_PROTECTION:
5344 	case T10_PI_TYPE2_PROTECTION:
5345 	case T10_PI_TYPE3_PROTECTION:
5346 		have_dif_prot = true;
5347 		break;
5348 
5349 	default:
5350 		pr_err("dif must be 0, 1, 2 or 3\n");
5351 		return -EINVAL;
5352 	}
5353 
5354 	if (sdebug_num_tgts < 0) {
5355 		pr_err("num_tgts must be >= 0\n");
5356 		return -EINVAL;
5357 	}
5358 
5359 	if (sdebug_guard > 1) {
5360 		pr_err("guard must be 0 or 1\n");
5361 		return -EINVAL;
5362 	}
5363 
5364 	if (sdebug_ato > 1) {
5365 		pr_err("ato must be 0 or 1\n");
5366 		return -EINVAL;
5367 	}
5368 
5369 	if (sdebug_physblk_exp > 15) {
5370 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5371 		return -EINVAL;
5372 	}
5373 	if (sdebug_max_luns > 256) {
5374 		pr_warn("max_luns can be no more than 256, use default\n");
5375 		sdebug_max_luns = DEF_MAX_LUNS;
5376 	}
5377 
5378 	if (sdebug_lowest_aligned > 0x3fff) {
5379 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5380 		return -EINVAL;
5381 	}
5382 
5383 	if (submit_queues < 1) {
5384 		pr_err("submit_queues must be 1 or more\n");
5385 		return -EINVAL;
5386 	}
5387 
5388 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
5389 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
5390 		return -EINVAL;
5391 	}
5392 
5393 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5394 			       GFP_KERNEL);
5395 	if (sdebug_q_arr == NULL)
5396 		return -ENOMEM;
5397 	for (k = 0; k < submit_queues; ++k)
5398 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5399 
5400 	if (sdebug_dev_size_mb < 1)
5401 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5402 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5403 	sdebug_store_sectors = sz / sdebug_sector_size;
5404 	sdebug_capacity = get_sdebug_capacity();
5405 
5406 	/* play around with geometry, don't waste too much on track 0 */
5407 	sdebug_heads = 8;
5408 	sdebug_sectors_per = 32;
5409 	if (sdebug_dev_size_mb >= 256)
5410 		sdebug_heads = 64;
5411 	else if (sdebug_dev_size_mb >= 16)
5412 		sdebug_heads = 32;
5413 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5414 			       (sdebug_sectors_per * sdebug_heads);
5415 	if (sdebug_cylinders_per >= 1024) {
5416 		/* other LLDs do this; implies >= 1GB ram disk ... */
5417 		sdebug_heads = 255;
5418 		sdebug_sectors_per = 63;
5419 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5420 			       (sdebug_sectors_per * sdebug_heads);
5421 	}
5422 
5423 	if (sdebug_fake_rw == 0) {
5424 		fake_storep = vzalloc(sz);
5425 		if (NULL == fake_storep) {
5426 			pr_err("out of memory, 1\n");
5427 			ret = -ENOMEM;
5428 			goto free_q_arr;
5429 		}
5430 		if (sdebug_num_parts > 0)
5431 			sdebug_build_parts(fake_storep, sz);
5432 	}
5433 
5434 	if (sdebug_dix) {
5435 		int dif_size;
5436 
5437 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5438 		dif_storep = vmalloc(dif_size);
5439 
5440 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5441 
5442 		if (dif_storep == NULL) {
5443 			pr_err("out of mem. (DIX)\n");
5444 			ret = -ENOMEM;
5445 			goto free_vm;
5446 		}
5447 
5448 		memset(dif_storep, 0xff, dif_size);
5449 	}
5450 
5451 	/* Logical Block Provisioning */
5452 	if (scsi_debug_lbp()) {
5453 		sdebug_unmap_max_blocks =
5454 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5455 
5456 		sdebug_unmap_max_desc =
5457 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5458 
5459 		sdebug_unmap_granularity =
5460 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5461 
5462 		if (sdebug_unmap_alignment &&
5463 		    sdebug_unmap_granularity <=
5464 		    sdebug_unmap_alignment) {
5465 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5466 			ret = -EINVAL;
5467 			goto free_vm;
5468 		}
5469 
5470 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5471 		map_storep = vmalloc(array_size(sizeof(long),
5472 						BITS_TO_LONGS(map_size)));
5473 
5474 		pr_info("%lu provisioning blocks\n", map_size);
5475 
5476 		if (map_storep == NULL) {
5477 			pr_err("out of mem. (MAP)\n");
5478 			ret = -ENOMEM;
5479 			goto free_vm;
5480 		}
5481 
5482 		bitmap_zero(map_storep, map_size);
5483 
5484 		/* Map first 1KB for partition table */
5485 		if (sdebug_num_parts)
5486 			map_region(0, 2);
5487 	}
5488 
5489 	pseudo_primary = root_device_register("pseudo_0");
5490 	if (IS_ERR(pseudo_primary)) {
5491 		pr_warn("root_device_register() error\n");
5492 		ret = PTR_ERR(pseudo_primary);
5493 		goto free_vm;
5494 	}
5495 	ret = bus_register(&pseudo_lld_bus);
5496 	if (ret < 0) {
5497 		pr_warn("bus_register error: %d\n", ret);
5498 		goto dev_unreg;
5499 	}
5500 	ret = driver_register(&sdebug_driverfs_driver);
5501 	if (ret < 0) {
5502 		pr_warn("driver_register error: %d\n", ret);
5503 		goto bus_unreg;
5504 	}
5505 
5506 	host_to_add = sdebug_add_host;
5507 	sdebug_add_host = 0;
5508 
5509 	for (k = 0; k < host_to_add; k++) {
5510 		if (sdebug_add_adapter()) {
5511 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5512 			break;
5513 		}
5514 	}
5515 
5516 	if (sdebug_verbose)
5517 		pr_info("built %d host(s)\n", sdebug_add_host);
5518 
5519 	return 0;
5520 
5521 bus_unreg:
5522 	bus_unregister(&pseudo_lld_bus);
5523 dev_unreg:
5524 	root_device_unregister(pseudo_primary);
5525 free_vm:
5526 	vfree(map_storep);
5527 	vfree(dif_storep);
5528 	vfree(fake_storep);
5529 free_q_arr:
5530 	kfree(sdebug_q_arr);
5531 	return ret;
5532 }
5533 
scsi_debug_exit(void)5534 static void __exit scsi_debug_exit(void)
5535 {
5536 	int k = sdebug_add_host;
5537 
5538 	stop_all_queued();
5539 	for (; k; k--)
5540 		sdebug_remove_adapter();
5541 	free_all_queued();
5542 	driver_unregister(&sdebug_driverfs_driver);
5543 	bus_unregister(&pseudo_lld_bus);
5544 	root_device_unregister(pseudo_primary);
5545 
5546 	vfree(map_storep);
5547 	vfree(dif_storep);
5548 	vfree(fake_storep);
5549 	kfree(sdebug_q_arr);
5550 }
5551 
5552 device_initcall(scsi_debug_init);
5553 module_exit(scsi_debug_exit);
5554 
sdebug_release_adapter(struct device * dev)5555 static void sdebug_release_adapter(struct device *dev)
5556 {
5557 	struct sdebug_host_info *sdbg_host;
5558 
5559 	sdbg_host = to_sdebug_host(dev);
5560 	kfree(sdbg_host);
5561 }
5562 
sdebug_add_adapter(void)5563 static int sdebug_add_adapter(void)
5564 {
5565 	int k, devs_per_host;
5566 	int error = 0;
5567 	struct sdebug_host_info *sdbg_host;
5568 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5569 
5570 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5571 	if (sdbg_host == NULL) {
5572 		pr_err("out of memory at line %d\n", __LINE__);
5573 		return -ENOMEM;
5574 	}
5575 
5576 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5577 
5578 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5579 	for (k = 0; k < devs_per_host; k++) {
5580 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5581 		if (!sdbg_devinfo) {
5582 			pr_err("out of memory at line %d\n", __LINE__);
5583 			error = -ENOMEM;
5584 			goto clean;
5585 		}
5586 	}
5587 
5588 	spin_lock(&sdebug_host_list_lock);
5589 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5590 	spin_unlock(&sdebug_host_list_lock);
5591 
5592 	sdbg_host->dev.bus = &pseudo_lld_bus;
5593 	sdbg_host->dev.parent = pseudo_primary;
5594 	sdbg_host->dev.release = &sdebug_release_adapter;
5595 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5596 
5597 	error = device_register(&sdbg_host->dev);
5598 
5599 	if (error)
5600 		goto clean;
5601 
5602 	++sdebug_add_host;
5603 	return error;
5604 
5605 clean:
5606 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5607 				 dev_list) {
5608 		list_del(&sdbg_devinfo->dev_list);
5609 		kfree(sdbg_devinfo);
5610 	}
5611 
5612 	kfree(sdbg_host);
5613 	return error;
5614 }
5615 
sdebug_remove_adapter(void)5616 static void sdebug_remove_adapter(void)
5617 {
5618 	struct sdebug_host_info *sdbg_host = NULL;
5619 
5620 	spin_lock(&sdebug_host_list_lock);
5621 	if (!list_empty(&sdebug_host_list)) {
5622 		sdbg_host = list_entry(sdebug_host_list.prev,
5623 				       struct sdebug_host_info, host_list);
5624 		list_del(&sdbg_host->host_list);
5625 	}
5626 	spin_unlock(&sdebug_host_list_lock);
5627 
5628 	if (!sdbg_host)
5629 		return;
5630 
5631 	device_unregister(&sdbg_host->dev);
5632 	--sdebug_add_host;
5633 }
5634 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)5635 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5636 {
5637 	int num_in_q = 0;
5638 	struct sdebug_dev_info *devip;
5639 
5640 	block_unblock_all_queues(true);
5641 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5642 	if (NULL == devip) {
5643 		block_unblock_all_queues(false);
5644 		return	-ENODEV;
5645 	}
5646 	num_in_q = atomic_read(&devip->num_in_q);
5647 
5648 	if (qdepth < 1)
5649 		qdepth = 1;
5650 	/* allow to exceed max host qc_arr elements for testing */
5651 	if (qdepth > SDEBUG_CANQUEUE + 10)
5652 		qdepth = SDEBUG_CANQUEUE + 10;
5653 	scsi_change_queue_depth(sdev, qdepth);
5654 
5655 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5656 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5657 			    __func__, qdepth, num_in_q);
5658 	}
5659 	block_unblock_all_queues(false);
5660 	return sdev->queue_depth;
5661 }
5662 
fake_timeout(struct scsi_cmnd * scp)5663 static bool fake_timeout(struct scsi_cmnd *scp)
5664 {
5665 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5666 		if (sdebug_every_nth < -1)
5667 			sdebug_every_nth = -1;
5668 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5669 			return true; /* ignore command causing timeout */
5670 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5671 			 scsi_medium_access_command(scp))
5672 			return true; /* time out reads and writes */
5673 	}
5674 	return false;
5675 }
5676 
fake_host_busy(struct scsi_cmnd * scp)5677 static bool fake_host_busy(struct scsi_cmnd *scp)
5678 {
5679 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5680 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5681 }
5682 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)5683 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5684 				   struct scsi_cmnd *scp)
5685 {
5686 	u8 sdeb_i;
5687 	struct scsi_device *sdp = scp->device;
5688 	const struct opcode_info_t *oip;
5689 	const struct opcode_info_t *r_oip;
5690 	struct sdebug_dev_info *devip;
5691 	u8 *cmd = scp->cmnd;
5692 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5693 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5694 	int k, na;
5695 	int errsts = 0;
5696 	u32 flags;
5697 	u16 sa;
5698 	u8 opcode = cmd[0];
5699 	bool has_wlun_rl;
5700 
5701 	scsi_set_resid(scp, 0);
5702 	if (sdebug_statistics)
5703 		atomic_inc(&sdebug_cmnd_count);
5704 	if (unlikely(sdebug_verbose &&
5705 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5706 		char b[120];
5707 		int n, len, sb;
5708 
5709 		len = scp->cmd_len;
5710 		sb = (int)sizeof(b);
5711 		if (len > 32)
5712 			strcpy(b, "too long, over 32 bytes");
5713 		else {
5714 			for (k = 0, n = 0; k < len && n < sb; ++k)
5715 				n += scnprintf(b + n, sb - n, "%02x ",
5716 					       (u32)cmd[k]);
5717 		}
5718 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5719 			    blk_mq_unique_tag(scp->request), b);
5720 	}
5721 	if (fake_host_busy(scp))
5722 		return SCSI_MLQUEUE_HOST_BUSY;
5723 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5724 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5725 		goto err_out;
5726 
5727 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5728 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5729 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5730 	if (unlikely(!devip)) {
5731 		devip = find_build_dev_info(sdp);
5732 		if (NULL == devip)
5733 			goto err_out;
5734 	}
5735 	na = oip->num_attached;
5736 	r_pfp = oip->pfp;
5737 	if (na) {	/* multiple commands with this opcode */
5738 		r_oip = oip;
5739 		if (FF_SA & r_oip->flags) {
5740 			if (F_SA_LOW & oip->flags)
5741 				sa = 0x1f & cmd[1];
5742 			else
5743 				sa = get_unaligned_be16(cmd + 8);
5744 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5745 				if (opcode == oip->opcode && sa == oip->sa)
5746 					break;
5747 			}
5748 		} else {   /* since no service action only check opcode */
5749 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5750 				if (opcode == oip->opcode)
5751 					break;
5752 			}
5753 		}
5754 		if (k > na) {
5755 			if (F_SA_LOW & r_oip->flags)
5756 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5757 			else if (F_SA_HIGH & r_oip->flags)
5758 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5759 			else
5760 				mk_sense_invalid_opcode(scp);
5761 			goto check_cond;
5762 		}
5763 	}	/* else (when na==0) we assume the oip is a match */
5764 	flags = oip->flags;
5765 	if (unlikely(F_INV_OP & flags)) {
5766 		mk_sense_invalid_opcode(scp);
5767 		goto check_cond;
5768 	}
5769 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5770 		if (sdebug_verbose)
5771 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5772 				    my_name, opcode, " supported for wlun");
5773 		mk_sense_invalid_opcode(scp);
5774 		goto check_cond;
5775 	}
5776 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5777 		u8 rem;
5778 		int j;
5779 
5780 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5781 			rem = ~oip->len_mask[k] & cmd[k];
5782 			if (rem) {
5783 				for (j = 7; j >= 0; --j, rem <<= 1) {
5784 					if (0x80 & rem)
5785 						break;
5786 				}
5787 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5788 				goto check_cond;
5789 			}
5790 		}
5791 	}
5792 	if (unlikely(!(F_SKIP_UA & flags) &&
5793 		     find_first_bit(devip->uas_bm,
5794 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5795 		errsts = make_ua(scp, devip);
5796 		if (errsts)
5797 			goto check_cond;
5798 	}
5799 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5800 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5801 		if (sdebug_verbose)
5802 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5803 				    "%s\n", my_name, "initializing command "
5804 				    "required");
5805 		errsts = check_condition_result;
5806 		goto fini;
5807 	}
5808 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5809 		goto fini;
5810 	if (unlikely(sdebug_every_nth)) {
5811 		if (fake_timeout(scp))
5812 			return 0;	/* ignore command: make trouble */
5813 	}
5814 	if (likely(oip->pfp))
5815 		pfp = oip->pfp;	/* calls a resp_* function */
5816 	else
5817 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5818 
5819 fini:
5820 	if (F_DELAY_OVERR & flags)
5821 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5822 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
5823 					    sdebug_ndelay > 10000)) {
5824 		/*
5825 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
5826 		 * for Start Stop Unit (SSU) want at least 1 second delay and
5827 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
5828 		 * For Synchronize Cache want 1/20 of SSU's delay.
5829 		 */
5830 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5831 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5832 
5833 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5834 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5835 	} else
5836 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5837 				     sdebug_ndelay);
5838 check_cond:
5839 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5840 err_out:
5841 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5842 }
5843 
5844 static struct scsi_host_template sdebug_driver_template = {
5845 	.show_info =		scsi_debug_show_info,
5846 	.write_info =		scsi_debug_write_info,
5847 	.proc_name =		sdebug_proc_name,
5848 	.name =			"SCSI DEBUG",
5849 	.info =			scsi_debug_info,
5850 	.slave_alloc =		scsi_debug_slave_alloc,
5851 	.slave_configure =	scsi_debug_slave_configure,
5852 	.slave_destroy =	scsi_debug_slave_destroy,
5853 	.ioctl =		scsi_debug_ioctl,
5854 	.queuecommand =		scsi_debug_queuecommand,
5855 	.change_queue_depth =	sdebug_change_qdepth,
5856 	.eh_abort_handler =	scsi_debug_abort,
5857 	.eh_device_reset_handler = scsi_debug_device_reset,
5858 	.eh_target_reset_handler = scsi_debug_target_reset,
5859 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5860 	.eh_host_reset_handler = scsi_debug_host_reset,
5861 	.can_queue =		SDEBUG_CANQUEUE,
5862 	.this_id =		7,
5863 	.sg_tablesize =		SG_MAX_SEGMENTS,
5864 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5865 	.max_sectors =		-1U,
5866 	.use_clustering = 	DISABLE_CLUSTERING,
5867 	.module =		THIS_MODULE,
5868 	.track_queue_depth =	1,
5869 };
5870 
sdebug_driver_probe(struct device * dev)5871 static int sdebug_driver_probe(struct device *dev)
5872 {
5873 	int error = 0;
5874 	struct sdebug_host_info *sdbg_host;
5875 	struct Scsi_Host *hpnt;
5876 	int hprot;
5877 
5878 	sdbg_host = to_sdebug_host(dev);
5879 
5880 	sdebug_driver_template.can_queue = sdebug_max_queue;
5881 	if (sdebug_clustering)
5882 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5883 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5884 	if (NULL == hpnt) {
5885 		pr_err("scsi_host_alloc failed\n");
5886 		error = -ENODEV;
5887 		return error;
5888 	}
5889 	if (submit_queues > nr_cpu_ids) {
5890 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5891 			my_name, submit_queues, nr_cpu_ids);
5892 		submit_queues = nr_cpu_ids;
5893 	}
5894 	/* Decide whether to tell scsi subsystem that we want mq */
5895 	/* Following should give the same answer for each host */
5896 	if (shost_use_blk_mq(hpnt))
5897 		hpnt->nr_hw_queues = submit_queues;
5898 
5899 	sdbg_host->shost = hpnt;
5900 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5901 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5902 		hpnt->max_id = sdebug_num_tgts + 1;
5903 	else
5904 		hpnt->max_id = sdebug_num_tgts;
5905 	/* = sdebug_max_luns; */
5906 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5907 
5908 	hprot = 0;
5909 
5910 	switch (sdebug_dif) {
5911 
5912 	case T10_PI_TYPE1_PROTECTION:
5913 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5914 		if (sdebug_dix)
5915 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5916 		break;
5917 
5918 	case T10_PI_TYPE2_PROTECTION:
5919 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5920 		if (sdebug_dix)
5921 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5922 		break;
5923 
5924 	case T10_PI_TYPE3_PROTECTION:
5925 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5926 		if (sdebug_dix)
5927 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5928 		break;
5929 
5930 	default:
5931 		if (sdebug_dix)
5932 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5933 		break;
5934 	}
5935 
5936 	scsi_host_set_prot(hpnt, hprot);
5937 
5938 	if (have_dif_prot || sdebug_dix)
5939 		pr_info("host protection%s%s%s%s%s%s%s\n",
5940 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5941 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5942 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5943 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5944 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5945 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5946 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5947 
5948 	if (sdebug_guard == 1)
5949 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5950 	else
5951 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5952 
5953 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5954 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5955 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5956 		sdebug_statistics = true;
5957 	error = scsi_add_host(hpnt, &sdbg_host->dev);
5958 	if (error) {
5959 		pr_err("scsi_add_host failed\n");
5960 		error = -ENODEV;
5961 		scsi_host_put(hpnt);
5962 	} else
5963 		scsi_scan_host(hpnt);
5964 
5965 	return error;
5966 }
5967 
sdebug_driver_remove(struct device * dev)5968 static int sdebug_driver_remove(struct device *dev)
5969 {
5970 	struct sdebug_host_info *sdbg_host;
5971 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5972 
5973 	sdbg_host = to_sdebug_host(dev);
5974 
5975 	if (!sdbg_host) {
5976 		pr_err("Unable to locate host info\n");
5977 		return -ENODEV;
5978 	}
5979 
5980 	scsi_remove_host(sdbg_host->shost);
5981 
5982 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5983 				 dev_list) {
5984 		list_del(&sdbg_devinfo->dev_list);
5985 		kfree(sdbg_devinfo);
5986 	}
5987 
5988 	scsi_host_put(sdbg_host->shost);
5989 	return 0;
5990 }
5991 
pseudo_lld_bus_match(struct device * dev,struct device_driver * dev_driver)5992 static int pseudo_lld_bus_match(struct device *dev,
5993 				struct device_driver *dev_driver)
5994 {
5995 	return 1;
5996 }
5997 
5998 static struct bus_type pseudo_lld_bus = {
5999 	.name = "pseudo",
6000 	.match = pseudo_lld_bus_match,
6001 	.probe = sdebug_driver_probe,
6002 	.remove = sdebug_driver_remove,
6003 	.drv_groups = sdebug_drv_groups,
6004 };
6005