1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
11 *
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
13 */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58
59 #include "sd.h"
60 #include "scsi_logging.h"
61
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65
66 #define MY_NAME "scsi_debug"
67
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112 */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
135 #define DEF_OPTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
162
163 #define SDEBUG_LUN_0_VAL 0
164
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
211
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
218 */
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN 255
222
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
227 #define F_D_UNKN 8
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
238
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
244
245 #define SDEBUG_MAX_PARTS 4
246
247 #define SDEBUG_MAX_CMD_LEN 32
248
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
256 };
257
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260 ZBC_NOT_WRITE_POINTER = 0x0,
261 ZC1_EMPTY = 0x1,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
264 ZC4_CLOSED = 0x4,
265 ZC6_READ_ONLY = 0xd,
266 ZC5_FULL = 0xe,
267 ZC7_OFFLINE = 0xf,
268 };
269
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
274 unsigned int z_size;
275 sector_t z_start;
276 sector_t z_wp;
277 };
278
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
282 unsigned int target;
283 u64 lun;
284 uuid_t lu_name;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
287 atomic_t num_in_q;
288 atomic_t stopped; /* 1: by SSU, 2: device start */
289 bool used;
290
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
293 unsigned int zsize;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
303 };
304
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
309 struct device dev;
310 struct list_head dev_info_list;
311 };
312
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
319 };
320
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
323
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2};
326
327 struct sdebug_defer {
328 struct hrtimer hrt;
329 struct execute_work ew;
330 int sqa_idx; /* index of sdebug_queue array */
331 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
332 int hc_idx; /* hostwide tag index */
333 int issuing_cpu;
334 bool init_hrt;
335 bool init_wq;
336 bool aborted; /* true when blk_abort_request() already called */
337 enum sdeb_defer_type defer_t;
338 };
339
340 struct sdebug_queued_cmd {
341 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342 * instance indicates this slot is in use.
343 */
344 struct sdebug_defer *sd_dp;
345 struct scsi_cmnd *a_cmnd;
346 };
347
348 struct sdebug_queue {
349 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
350 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
351 spinlock_t qc_lock;
352 atomic_t blocked; /* to temporarily stop more being queued */
353 };
354
355 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
356 static atomic_t sdebug_completions; /* count of deferred completions */
357 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
358 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
359 static atomic_t sdeb_inject_pending;
360
361 struct opcode_info_t {
362 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
363 /* for terminating element */
364 u8 opcode; /* if num_attached > 0, preferred */
365 u16 sa; /* service action */
366 u32 flags; /* OR-ed set of SDEB_F_* */
367 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
368 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
369 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
370 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
371 };
372
373 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
374 enum sdeb_opcode_index {
375 SDEB_I_INVALID_OPCODE = 0,
376 SDEB_I_INQUIRY = 1,
377 SDEB_I_REPORT_LUNS = 2,
378 SDEB_I_REQUEST_SENSE = 3,
379 SDEB_I_TEST_UNIT_READY = 4,
380 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
381 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
382 SDEB_I_LOG_SENSE = 7,
383 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
384 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
385 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
386 SDEB_I_START_STOP = 11,
387 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
388 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
389 SDEB_I_MAINT_IN = 14,
390 SDEB_I_MAINT_OUT = 15,
391 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
392 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
393 SDEB_I_RESERVE = 18, /* 6, 10 */
394 SDEB_I_RELEASE = 19, /* 6, 10 */
395 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
396 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
397 SDEB_I_ATA_PT = 22, /* 12, 16 */
398 SDEB_I_SEND_DIAG = 23,
399 SDEB_I_UNMAP = 24,
400 SDEB_I_WRITE_BUFFER = 25,
401 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
402 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
403 SDEB_I_COMP_WRITE = 28,
404 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
405 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
406 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
407 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
408 };
409
410
411 static const unsigned char opcode_ind_arr[256] = {
412 /* 0x0; 0x0->0x1f: 6 byte cdbs */
413 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
414 0, 0, 0, 0,
415 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
416 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
417 SDEB_I_RELEASE,
418 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
419 SDEB_I_ALLOW_REMOVAL, 0,
420 /* 0x20; 0x20->0x3f: 10 byte cdbs */
421 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
422 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
423 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
424 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
425 /* 0x40; 0x40->0x5f: 10 byte cdbs */
426 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
427 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
428 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
429 SDEB_I_RELEASE,
430 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
431 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
432 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
433 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
434 0, SDEB_I_VARIABLE_LEN,
435 /* 0x80; 0x80->0x9f: 16 byte cdbs */
436 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
437 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
438 0, 0, 0, SDEB_I_VERIFY,
439 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
440 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
441 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
442 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
443 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
444 SDEB_I_MAINT_OUT, 0, 0, 0,
445 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
446 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
447 0, 0, 0, 0, 0, 0, 0, 0,
448 0, 0, 0, 0, 0, 0, 0, 0,
449 /* 0xc0; 0xc0->0xff: vendor specific */
450 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 };
455
456 /*
457 * The following "response" functions return the SCSI mid-level's 4 byte
458 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
459 * command completion, they can mask their return value with
460 * SDEG_RES_IMMED_MASK .
461 */
462 #define SDEG_RES_IMMED_MASK 0x40000000
463
464 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
465 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
466 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493
494 static int sdebug_do_add_host(bool mk_new_store);
495 static int sdebug_add_host_helper(int per_host_idx);
496 static void sdebug_do_remove_host(bool the_end);
497 static int sdebug_add_store(void);
498 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
499 static void sdebug_erase_all_stores(bool apart_from_first);
500
501 /*
502 * The following are overflow arrays for cdbs that "hit" the same index in
503 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
504 * should be placed in opcode_info_arr[], the others should be placed here.
505 */
506 static const struct opcode_info_t msense_iarr[] = {
507 {0, 0x1a, 0, F_D_IN, NULL, NULL,
508 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510
511 static const struct opcode_info_t mselect_iarr[] = {
512 {0, 0x15, 0, F_D_OUT, NULL, NULL,
513 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 };
515
516 static const struct opcode_info_t read_iarr[] = {
517 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
518 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
519 0, 0, 0, 0} },
520 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
521 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
523 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
524 0xc7, 0, 0, 0, 0} },
525 };
526
527 static const struct opcode_info_t write_iarr[] = {
528 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
529 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
530 0, 0, 0, 0, 0, 0} },
531 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
532 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
533 0, 0, 0} },
534 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
535 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
536 0xbf, 0xc7, 0, 0, 0, 0} },
537 };
538
539 static const struct opcode_info_t verify_iarr[] = {
540 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
541 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
542 0, 0, 0, 0, 0, 0} },
543 };
544
545 static const struct opcode_info_t sa_in_16_iarr[] = {
546 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
547 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
549 };
550
551 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
552 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
553 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
554 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
555 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
557 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
558 };
559
560 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
561 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
562 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
563 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
564 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
565 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
566 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
567 };
568
569 static const struct opcode_info_t write_same_iarr[] = {
570 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
571 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
572 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
573 };
574
575 static const struct opcode_info_t reserve_iarr[] = {
576 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
577 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
578 };
579
580 static const struct opcode_info_t release_iarr[] = {
581 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
582 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 };
584
585 static const struct opcode_info_t sync_cache_iarr[] = {
586 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
587 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
588 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
589 };
590
591 static const struct opcode_info_t pre_fetch_iarr[] = {
592 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
593 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
595 };
596
597 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
598 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
599 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
601 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
604 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
605 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
607 };
608
609 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
610 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
611 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
613 };
614
615
616 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
617 * plus the terminating elements for logic that scans this table such as
618 * REPORT SUPPORTED OPERATION CODES. */
619 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
620 /* 0 */
621 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
622 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
623 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
624 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
626 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627 0, 0} }, /* REPORT LUNS */
628 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
629 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
631 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 /* 5 */
633 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
634 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
635 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
637 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
638 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
640 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
641 0, 0, 0} },
642 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
643 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
644 0, 0} },
645 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
646 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
647 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
648 /* 10 */
649 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
650 resp_write_dt0, write_iarr, /* WRITE(16) */
651 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
652 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
654 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
656 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
657 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
659 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
660 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
662 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
663 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
664 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
665 0xff, 0, 0xc7, 0, 0, 0, 0} },
666 /* 15 */
667 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
668 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
669 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
670 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
671 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
673 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
674 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
675 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
676 0xff, 0xff} },
677 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
678 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
679 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
680 0} },
681 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
682 NULL, release_iarr, /* RELEASE(10) <no response function> */
683 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 0} },
685 /* 20 */
686 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
687 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
688 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
689 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
691 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
693 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
695 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 /* 25 */
697 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
698 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
699 0, 0, 0, 0} }, /* WRITE_BUFFER */
700 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
701 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
702 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
703 0, 0, 0, 0, 0} },
704 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
705 resp_sync_cache, sync_cache_iarr,
706 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
707 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
708 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
709 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
710 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
711 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
712 resp_pre_fetch, pre_fetch_iarr,
713 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
714 0, 0, 0, 0} }, /* PRE-FETCH (10) */
715
716 /* 30 */
717 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
718 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
719 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
720 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
721 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
722 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
723 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
725 /* sentinel */
726 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
727 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
728 };
729
730 static int sdebug_num_hosts;
731 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
732 static int sdebug_ato = DEF_ATO;
733 static int sdebug_cdb_len = DEF_CDB_LEN;
734 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
735 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
736 static int sdebug_dif = DEF_DIF;
737 static int sdebug_dix = DEF_DIX;
738 static int sdebug_dsense = DEF_D_SENSE;
739 static int sdebug_every_nth = DEF_EVERY_NTH;
740 static int sdebug_fake_rw = DEF_FAKE_RW;
741 static unsigned int sdebug_guard = DEF_GUARD;
742 static int sdebug_host_max_queue; /* per host */
743 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
744 static int sdebug_max_luns = DEF_MAX_LUNS;
745 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
746 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
747 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
748 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
749 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
750 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
751 static int sdebug_no_uld;
752 static int sdebug_num_parts = DEF_NUM_PARTS;
753 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
754 static int sdebug_opt_blks = DEF_OPT_BLKS;
755 static int sdebug_opts = DEF_OPTS;
756 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
757 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
758 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
759 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
760 static int sdebug_sector_size = DEF_SECTOR_SIZE;
761 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
762 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
763 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
764 static unsigned int sdebug_lbpu = DEF_LBPU;
765 static unsigned int sdebug_lbpws = DEF_LBPWS;
766 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
767 static unsigned int sdebug_lbprz = DEF_LBPRZ;
768 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
769 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
770 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
771 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
772 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
773 static int sdebug_uuid_ctl = DEF_UUID_CTL;
774 static bool sdebug_random = DEF_RANDOM;
775 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
776 static bool sdebug_removable = DEF_REMOVABLE;
777 static bool sdebug_clustering;
778 static bool sdebug_host_lock = DEF_HOST_LOCK;
779 static bool sdebug_strict = DEF_STRICT;
780 static bool sdebug_any_injecting_opt;
781 static bool sdebug_verbose;
782 static bool have_dif_prot;
783 static bool write_since_sync;
784 static bool sdebug_statistics = DEF_STATISTICS;
785 static bool sdebug_wp;
786 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
787 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
788 static char *sdeb_zbc_model_s;
789
790 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
791 SAM_LUN_AM_FLAT = 0x1,
792 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
793 SAM_LUN_AM_EXTENDED = 0x3};
794 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
795 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
796
797 static unsigned int sdebug_store_sectors;
798 static sector_t sdebug_capacity; /* in sectors */
799
800 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
801 may still need them */
802 static int sdebug_heads; /* heads per disk */
803 static int sdebug_cylinders_per; /* cylinders per surface */
804 static int sdebug_sectors_per; /* sectors per cylinder */
805
806 static LIST_HEAD(sdebug_host_list);
807 static DEFINE_SPINLOCK(sdebug_host_list_lock);
808
809 static struct xarray per_store_arr;
810 static struct xarray *per_store_ap = &per_store_arr;
811 static int sdeb_first_idx = -1; /* invalid index ==> none created */
812 static int sdeb_most_recent_idx = -1;
813 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
814
815 static unsigned long map_size;
816 static int num_aborts;
817 static int num_dev_resets;
818 static int num_target_resets;
819 static int num_bus_resets;
820 static int num_host_resets;
821 static int dix_writes;
822 static int dix_reads;
823 static int dif_errors;
824
825 /* ZBC global data */
826 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
827 static int sdeb_zbc_zone_size_mb;
828 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
829 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
830
831 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
832 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
833
834 static DEFINE_RWLOCK(atomic_rw);
835 static DEFINE_RWLOCK(atomic_rw2);
836
837 static rwlock_t *ramdisk_lck_a[2];
838
839 static char sdebug_proc_name[] = MY_NAME;
840 static const char *my_name = MY_NAME;
841
842 static struct bus_type pseudo_lld_bus;
843
844 static struct device_driver sdebug_driverfs_driver = {
845 .name = sdebug_proc_name,
846 .bus = &pseudo_lld_bus,
847 };
848
849 static const int check_condition_result =
850 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
851
852 static const int illegal_condition_result =
853 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
854
855 static const int device_qfull_result =
856 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
857
858 static const int condition_met_result = SAM_STAT_CONDITION_MET;
859
860
861 /* Only do the extra work involved in logical block provisioning if one or
862 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
863 * real reads and writes (i.e. not skipping them for speed).
864 */
scsi_debug_lbp(void)865 static inline bool scsi_debug_lbp(void)
866 {
867 return 0 == sdebug_fake_rw &&
868 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
869 }
870
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)871 static void *lba2fake_store(struct sdeb_store_info *sip,
872 unsigned long long lba)
873 {
874 struct sdeb_store_info *lsip = sip;
875
876 lba = do_div(lba, sdebug_store_sectors);
877 if (!sip || !sip->storep) {
878 WARN_ON_ONCE(true);
879 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
880 }
881 return lsip->storep + lba * sdebug_sector_size;
882 }
883
dif_store(struct sdeb_store_info * sip,sector_t sector)884 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
885 sector_t sector)
886 {
887 sector = sector_div(sector, sdebug_store_sectors);
888
889 return sip->dif_storep + sector;
890 }
891
sdebug_max_tgts_luns(void)892 static void sdebug_max_tgts_luns(void)
893 {
894 struct sdebug_host_info *sdbg_host;
895 struct Scsi_Host *hpnt;
896
897 spin_lock(&sdebug_host_list_lock);
898 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899 hpnt = sdbg_host->shost;
900 if ((hpnt->this_id >= 0) &&
901 (sdebug_num_tgts > hpnt->this_id))
902 hpnt->max_id = sdebug_num_tgts + 1;
903 else
904 hpnt->max_id = sdebug_num_tgts;
905 /* sdebug_max_luns; */
906 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
907 }
908 spin_unlock(&sdebug_host_list_lock);
909 }
910
911 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
912
913 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)914 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
915 enum sdeb_cmd_data c_d,
916 int in_byte, int in_bit)
917 {
918 unsigned char *sbuff;
919 u8 sks[4];
920 int sl, asc;
921
922 sbuff = scp->sense_buffer;
923 if (!sbuff) {
924 sdev_printk(KERN_ERR, scp->device,
925 "%s: sense_buffer is NULL\n", __func__);
926 return;
927 }
928 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
929 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
930 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
931 memset(sks, 0, sizeof(sks));
932 sks[0] = 0x80;
933 if (c_d)
934 sks[0] |= 0x40;
935 if (in_bit >= 0) {
936 sks[0] |= 0x8;
937 sks[0] |= 0x7 & in_bit;
938 }
939 put_unaligned_be16(in_byte, sks + 1);
940 if (sdebug_dsense) {
941 sl = sbuff[7] + 8;
942 sbuff[7] = sl;
943 sbuff[sl] = 0x2;
944 sbuff[sl + 1] = 0x6;
945 memcpy(sbuff + sl + 4, sks, 3);
946 } else
947 memcpy(sbuff + 15, sks, 3);
948 if (sdebug_verbose)
949 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
950 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
951 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
952 }
953
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)954 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
955 {
956 unsigned char *sbuff;
957
958 sbuff = scp->sense_buffer;
959 if (!sbuff) {
960 sdev_printk(KERN_ERR, scp->device,
961 "%s: sense_buffer is NULL\n", __func__);
962 return;
963 }
964 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
965
966 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
967
968 if (sdebug_verbose)
969 sdev_printk(KERN_INFO, scp->device,
970 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971 my_name, key, asc, asq);
972 }
973
mk_sense_invalid_opcode(struct scsi_cmnd * scp)974 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
975 {
976 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
977 }
978
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)979 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
980 void __user *arg)
981 {
982 if (sdebug_verbose) {
983 if (0x1261 == cmd)
984 sdev_printk(KERN_INFO, dev,
985 "%s: BLKFLSBUF [0x1261]\n", __func__);
986 else if (0x5331 == cmd)
987 sdev_printk(KERN_INFO, dev,
988 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
989 __func__);
990 else
991 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
992 __func__, cmd);
993 }
994 return -EINVAL;
995 /* return -ENOTTY; // correct return but upsets fdisk */
996 }
997
config_cdb_len(struct scsi_device * sdev)998 static void config_cdb_len(struct scsi_device *sdev)
999 {
1000 switch (sdebug_cdb_len) {
1001 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002 sdev->use_10_for_rw = false;
1003 sdev->use_16_for_rw = false;
1004 sdev->use_10_for_ms = false;
1005 break;
1006 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007 sdev->use_10_for_rw = true;
1008 sdev->use_16_for_rw = false;
1009 sdev->use_10_for_ms = false;
1010 break;
1011 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012 sdev->use_10_for_rw = true;
1013 sdev->use_16_for_rw = false;
1014 sdev->use_10_for_ms = true;
1015 break;
1016 case 16:
1017 sdev->use_10_for_rw = false;
1018 sdev->use_16_for_rw = true;
1019 sdev->use_10_for_ms = true;
1020 break;
1021 case 32: /* No knobs to suggest this so same as 16 for now */
1022 sdev->use_10_for_rw = false;
1023 sdev->use_16_for_rw = true;
1024 sdev->use_10_for_ms = true;
1025 break;
1026 default:
1027 pr_warn("unexpected cdb_len=%d, force to 10\n",
1028 sdebug_cdb_len);
1029 sdev->use_10_for_rw = true;
1030 sdev->use_16_for_rw = false;
1031 sdev->use_10_for_ms = false;
1032 sdebug_cdb_len = 10;
1033 break;
1034 }
1035 }
1036
all_config_cdb_len(void)1037 static void all_config_cdb_len(void)
1038 {
1039 struct sdebug_host_info *sdbg_host;
1040 struct Scsi_Host *shost;
1041 struct scsi_device *sdev;
1042
1043 spin_lock(&sdebug_host_list_lock);
1044 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1045 shost = sdbg_host->shost;
1046 shost_for_each_device(sdev, shost) {
1047 config_cdb_len(sdev);
1048 }
1049 }
1050 spin_unlock(&sdebug_host_list_lock);
1051 }
1052
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1053 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1054 {
1055 struct sdebug_host_info *sdhp;
1056 struct sdebug_dev_info *dp;
1057
1058 spin_lock(&sdebug_host_list_lock);
1059 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1060 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 if ((devip->sdbg_host == dp->sdbg_host) &&
1062 (devip->target == dp->target))
1063 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064 }
1065 }
1066 spin_unlock(&sdebug_host_list_lock);
1067 }
1068
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1069 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1070 {
1071 int k;
1072
1073 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1074 if (k != SDEBUG_NUM_UAS) {
1075 const char *cp = NULL;
1076
1077 switch (k) {
1078 case SDEBUG_UA_POR:
1079 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 POWER_ON_RESET_ASCQ);
1081 if (sdebug_verbose)
1082 cp = "power on reset";
1083 break;
1084 case SDEBUG_UA_BUS_RESET:
1085 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1086 BUS_RESET_ASCQ);
1087 if (sdebug_verbose)
1088 cp = "bus reset";
1089 break;
1090 case SDEBUG_UA_MODE_CHANGED:
1091 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092 MODE_CHANGED_ASCQ);
1093 if (sdebug_verbose)
1094 cp = "mode parameters changed";
1095 break;
1096 case SDEBUG_UA_CAPACITY_CHANGED:
1097 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098 CAPACITY_CHANGED_ASCQ);
1099 if (sdebug_verbose)
1100 cp = "capacity data changed";
1101 break;
1102 case SDEBUG_UA_MICROCODE_CHANGED:
1103 mk_sense_buffer(scp, UNIT_ATTENTION,
1104 TARGET_CHANGED_ASC,
1105 MICROCODE_CHANGED_ASCQ);
1106 if (sdebug_verbose)
1107 cp = "microcode has been changed";
1108 break;
1109 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1110 mk_sense_buffer(scp, UNIT_ATTENTION,
1111 TARGET_CHANGED_ASC,
1112 MICROCODE_CHANGED_WO_RESET_ASCQ);
1113 if (sdebug_verbose)
1114 cp = "microcode has been changed without reset";
1115 break;
1116 case SDEBUG_UA_LUNS_CHANGED:
1117 /*
1118 * SPC-3 behavior is to report a UNIT ATTENTION with
1119 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120 * on the target, until a REPORT LUNS command is
1121 * received. SPC-4 behavior is to report it only once.
1122 * NOTE: sdebug_scsi_level does not use the same
1123 * values as struct scsi_device->scsi_level.
1124 */
1125 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1126 clear_luns_changed_on_target(devip);
1127 mk_sense_buffer(scp, UNIT_ATTENTION,
1128 TARGET_CHANGED_ASC,
1129 LUNS_CHANGED_ASCQ);
1130 if (sdebug_verbose)
1131 cp = "reported luns data has changed";
1132 break;
1133 default:
1134 pr_warn("unexpected unit attention code=%d\n", k);
1135 if (sdebug_verbose)
1136 cp = "unknown";
1137 break;
1138 }
1139 clear_bit(k, devip->uas_bm);
1140 if (sdebug_verbose)
1141 sdev_printk(KERN_INFO, scp->device,
1142 "%s reports: Unit attention: %s\n",
1143 my_name, cp);
1144 return check_condition_result;
1145 }
1146 return 0;
1147 }
1148
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1150 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1151 int arr_len)
1152 {
1153 int act_len;
1154 struct scsi_data_buffer *sdb = &scp->sdb;
1155
1156 if (!sdb->length)
1157 return 0;
1158 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1159 return DID_ERROR << 16;
1160
1161 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1162 arr, arr_len);
1163 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1164
1165 return 0;
1166 }
1167
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170 * calls, not required to write in ascending offset order. Assumes resid
1171 * set to scsi_bufflen() prior to any calls.
1172 */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1173 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1174 int arr_len, unsigned int off_dst)
1175 {
1176 unsigned int act_len, n;
1177 struct scsi_data_buffer *sdb = &scp->sdb;
1178 off_t skip = off_dst;
1179
1180 if (sdb->length <= off_dst)
1181 return 0;
1182 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1183 return DID_ERROR << 16;
1184
1185 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1186 arr, arr_len, skip);
1187 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188 __func__, off_dst, scsi_bufflen(scp), act_len,
1189 scsi_get_resid(scp));
1190 n = scsi_bufflen(scp) - (off_dst + act_len);
1191 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1192 return 0;
1193 }
1194
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196 * 'arr' or -1 if error.
1197 */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1198 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1199 int arr_len)
1200 {
1201 if (!scsi_bufflen(scp))
1202 return 0;
1203 if (scp->sc_data_direction != DMA_TO_DEVICE)
1204 return -1;
1205
1206 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1207 }
1208
1209
1210 static char sdebug_inq_vendor_id[9] = "Linux ";
1211 static char sdebug_inq_product_id[17] = "scsi_debug ";
1212 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1215 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1216 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1217
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1219 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1220 int target_dev_id, int dev_id_num,
1221 const char *dev_id_str, int dev_id_str_len,
1222 const uuid_t *lu_name)
1223 {
1224 int num, port_a;
1225 char b[32];
1226
1227 port_a = target_dev_id + 1;
1228 /* T10 vendor identifier field format (faked) */
1229 arr[0] = 0x2; /* ASCII */
1230 arr[1] = 0x1;
1231 arr[2] = 0x0;
1232 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1233 memcpy(&arr[12], sdebug_inq_product_id, 16);
1234 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1235 num = 8 + 16 + dev_id_str_len;
1236 arr[3] = num;
1237 num += 4;
1238 if (dev_id_num >= 0) {
1239 if (sdebug_uuid_ctl) {
1240 /* Locally assigned UUID */
1241 arr[num++] = 0x1; /* binary (not necessarily sas) */
1242 arr[num++] = 0xa; /* PIV=0, lu, naa */
1243 arr[num++] = 0x0;
1244 arr[num++] = 0x12;
1245 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1246 arr[num++] = 0x0;
1247 memcpy(arr + num, lu_name, 16);
1248 num += 16;
1249 } else {
1250 /* NAA-3, Logical unit identifier (binary) */
1251 arr[num++] = 0x1; /* binary (not necessarily sas) */
1252 arr[num++] = 0x3; /* PIV=0, lu, naa */
1253 arr[num++] = 0x0;
1254 arr[num++] = 0x8;
1255 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1256 num += 8;
1257 }
1258 /* Target relative port number */
1259 arr[num++] = 0x61; /* proto=sas, binary */
1260 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1261 arr[num++] = 0x0; /* reserved */
1262 arr[num++] = 0x4; /* length */
1263 arr[num++] = 0x0; /* reserved */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0;
1266 arr[num++] = 0x1; /* relative port A */
1267 }
1268 /* NAA-3, Target port identifier */
1269 arr[num++] = 0x61; /* proto=sas, binary */
1270 arr[num++] = 0x93; /* piv=1, target port, naa */
1271 arr[num++] = 0x0;
1272 arr[num++] = 0x8;
1273 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1274 num += 8;
1275 /* NAA-3, Target port group identifier */
1276 arr[num++] = 0x61; /* proto=sas, binary */
1277 arr[num++] = 0x95; /* piv=1, target port group id */
1278 arr[num++] = 0x0;
1279 arr[num++] = 0x4;
1280 arr[num++] = 0;
1281 arr[num++] = 0;
1282 put_unaligned_be16(port_group_id, arr + num);
1283 num += 2;
1284 /* NAA-3, Target device identifier */
1285 arr[num++] = 0x61; /* proto=sas, binary */
1286 arr[num++] = 0xa3; /* piv=1, target device, naa */
1287 arr[num++] = 0x0;
1288 arr[num++] = 0x8;
1289 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1290 num += 8;
1291 /* SCSI name string: Target device identifier */
1292 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1293 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1294 arr[num++] = 0x0;
1295 arr[num++] = 24;
1296 memcpy(arr + num, "naa.32222220", 12);
1297 num += 12;
1298 snprintf(b, sizeof(b), "%08X", target_dev_id);
1299 memcpy(arr + num, b, 8);
1300 num += 8;
1301 memset(arr + num, 0, 4);
1302 num += 4;
1303 return num;
1304 }
1305
1306 static unsigned char vpd84_data[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308 0x22,0x22,0x22,0x0,0xbb,0x1,
1309 0x22,0x22,0x22,0x0,0xbb,0x2,
1310 };
1311
1312 /* Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1313 static int inquiry_vpd_84(unsigned char *arr)
1314 {
1315 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1316 return sizeof(vpd84_data);
1317 }
1318
1319 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1320 static int inquiry_vpd_85(unsigned char *arr)
1321 {
1322 int num = 0;
1323 const char *na1 = "https://www.kernel.org/config";
1324 const char *na2 = "http://www.kernel.org/log";
1325 int plen, olen;
1326
1327 arr[num++] = 0x1; /* lu, storage config */
1328 arr[num++] = 0x0; /* reserved */
1329 arr[num++] = 0x0;
1330 olen = strlen(na1);
1331 plen = olen + 1;
1332 if (plen % 4)
1333 plen = ((plen / 4) + 1) * 4;
1334 arr[num++] = plen; /* length, null termianted, padded */
1335 memcpy(arr + num, na1, olen);
1336 memset(arr + num + olen, 0, plen - olen);
1337 num += plen;
1338
1339 arr[num++] = 0x4; /* lu, logging */
1340 arr[num++] = 0x0; /* reserved */
1341 arr[num++] = 0x0;
1342 olen = strlen(na2);
1343 plen = olen + 1;
1344 if (plen % 4)
1345 plen = ((plen / 4) + 1) * 4;
1346 arr[num++] = plen; /* length, null terminated, padded */
1347 memcpy(arr + num, na2, olen);
1348 memset(arr + num + olen, 0, plen - olen);
1349 num += plen;
1350
1351 return num;
1352 }
1353
1354 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1355 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1356 {
1357 int num = 0;
1358 int port_a, port_b;
1359
1360 port_a = target_dev_id + 1;
1361 port_b = port_a + 1;
1362 arr[num++] = 0x0; /* reserved */
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0;
1365 arr[num++] = 0x1; /* relative port 1 (primary) */
1366 memset(arr + num, 0, 6);
1367 num += 6;
1368 arr[num++] = 0x0;
1369 arr[num++] = 12; /* length tp descriptor */
1370 /* naa-5 target port identifier (A) */
1371 arr[num++] = 0x61; /* proto=sas, binary */
1372 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1373 arr[num++] = 0x0; /* reserved */
1374 arr[num++] = 0x8; /* length */
1375 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1376 num += 8;
1377 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0;
1380 arr[num++] = 0x2; /* relative port 2 (secondary) */
1381 memset(arr + num, 0, 6);
1382 num += 6;
1383 arr[num++] = 0x0;
1384 arr[num++] = 12; /* length tp descriptor */
1385 /* naa-5 target port identifier (B) */
1386 arr[num++] = 0x61; /* proto=sas, binary */
1387 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1388 arr[num++] = 0x0; /* reserved */
1389 arr[num++] = 0x8; /* length */
1390 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1391 num += 8;
1392
1393 return num;
1394 }
1395
1396
1397 static unsigned char vpd89_data[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1401 '1','2','3','4',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1403 0xec,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1408 0x53,0x41,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1410 0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x10,0x80,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1439 };
1440
1441 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1442 static int inquiry_vpd_89(unsigned char *arr)
1443 {
1444 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1445 return sizeof(vpd89_data);
1446 }
1447
1448
1449 static unsigned char vpdb0_data[] = {
1450 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 };
1455
1456 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1457 static int inquiry_vpd_b0(unsigned char *arr)
1458 {
1459 unsigned int gran;
1460
1461 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1462
1463 /* Optimal transfer length granularity */
1464 if (sdebug_opt_xferlen_exp != 0 &&
1465 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1466 gran = 1 << sdebug_opt_xferlen_exp;
1467 else
1468 gran = 1 << sdebug_physblk_exp;
1469 put_unaligned_be16(gran, arr + 2);
1470
1471 /* Maximum Transfer Length */
1472 if (sdebug_store_sectors > 0x400)
1473 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1474
1475 /* Optimal Transfer Length */
1476 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1477
1478 if (sdebug_lbpu) {
1479 /* Maximum Unmap LBA Count */
1480 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1481
1482 /* Maximum Unmap Block Descriptor Count */
1483 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1484 }
1485
1486 /* Unmap Granularity Alignment */
1487 if (sdebug_unmap_alignment) {
1488 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1489 arr[28] |= 0x80; /* UGAVALID */
1490 }
1491
1492 /* Optimal Unmap Granularity */
1493 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1494
1495 /* Maximum WRITE SAME Length */
1496 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1497
1498 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1499
1500 return sizeof(vpdb0_data);
1501 }
1502
1503 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1504 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1505 {
1506 memset(arr, 0, 0x3c);
1507 arr[0] = 0;
1508 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1509 arr[2] = 0;
1510 arr[3] = 5; /* less than 1.8" */
1511 if (devip->zmodel == BLK_ZONED_HA)
1512 arr[4] = 1 << 4; /* zoned field = 01b */
1513
1514 return 0x3c;
1515 }
1516
1517 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1518 static int inquiry_vpd_b2(unsigned char *arr)
1519 {
1520 memset(arr, 0, 0x4);
1521 arr[0] = 0; /* threshold exponent */
1522 if (sdebug_lbpu)
1523 arr[1] = 1 << 7;
1524 if (sdebug_lbpws)
1525 arr[1] |= 1 << 6;
1526 if (sdebug_lbpws10)
1527 arr[1] |= 1 << 5;
1528 if (sdebug_lbprz && scsi_debug_lbp())
1529 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1530 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1532 /* threshold_percentage=0 */
1533 return 0x4;
1534 }
1535
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1537 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1538 {
1539 memset(arr, 0, 0x3c);
1540 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1541 /*
1542 * Set Optimal number of open sequential write preferred zones and
1543 * Optimal number of non-sequentially written sequential write
1544 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545 * fields set to zero, apart from Max. number of open swrz_s field.
1546 */
1547 put_unaligned_be32(0xffffffff, &arr[4]);
1548 put_unaligned_be32(0xffffffff, &arr[8]);
1549 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1550 put_unaligned_be32(devip->max_open, &arr[12]);
1551 else
1552 put_unaligned_be32(0xffffffff, &arr[12]);
1553 return 0x3c;
1554 }
1555
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1558
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1559 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1560 {
1561 unsigned char pq_pdt;
1562 unsigned char *arr;
1563 unsigned char *cmd = scp->cmnd;
1564 u32 alloc_len, n;
1565 int ret;
1566 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1567
1568 alloc_len = get_unaligned_be16(cmd + 3);
1569 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1570 if (! arr)
1571 return DID_REQUEUE << 16;
1572 is_disk = (sdebug_ptype == TYPE_DISK);
1573 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574 is_disk_zbc = (is_disk || is_zbc);
1575 have_wlun = scsi_is_wlun(scp->device->lun);
1576 if (have_wlun)
1577 pq_pdt = TYPE_WLUN; /* present, wlun */
1578 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1580 else
1581 pq_pdt = (sdebug_ptype & 0x1f);
1582 arr[0] = pq_pdt;
1583 if (0x2 & cmd[1]) { /* CMDDT bit set */
1584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1585 kfree(arr);
1586 return check_condition_result;
1587 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1588 int lu_id_num, port_group_id, target_dev_id;
1589 u32 len;
1590 char lu_id_str[6];
1591 int host_no = devip->sdbg_host->shost->host_no;
1592
1593 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1594 (devip->channel & 0x7f);
1595 if (sdebug_vpd_use_hostno == 0)
1596 host_no = 0;
1597 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1598 (devip->target * 1000) + devip->lun);
1599 target_dev_id = ((host_no + 1) * 2000) +
1600 (devip->target * 1000) - 3;
1601 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1602 if (0 == cmd[2]) { /* supported vital product data pages */
1603 arr[1] = cmd[2]; /*sanity */
1604 n = 4;
1605 arr[n++] = 0x0; /* this page */
1606 arr[n++] = 0x80; /* unit serial number */
1607 arr[n++] = 0x83; /* device identification */
1608 arr[n++] = 0x84; /* software interface ident. */
1609 arr[n++] = 0x85; /* management network addresses */
1610 arr[n++] = 0x86; /* extended inquiry */
1611 arr[n++] = 0x87; /* mode page policy */
1612 arr[n++] = 0x88; /* SCSI ports */
1613 if (is_disk_zbc) { /* SBC or ZBC */
1614 arr[n++] = 0x89; /* ATA information */
1615 arr[n++] = 0xb0; /* Block limits */
1616 arr[n++] = 0xb1; /* Block characteristics */
1617 if (is_disk)
1618 arr[n++] = 0xb2; /* LB Provisioning */
1619 if (is_zbc)
1620 arr[n++] = 0xb6; /* ZB dev. char. */
1621 }
1622 arr[3] = n - 4; /* number of supported VPD pages */
1623 } else if (0x80 == cmd[2]) { /* unit serial number */
1624 arr[1] = cmd[2]; /*sanity */
1625 arr[3] = len;
1626 memcpy(&arr[4], lu_id_str, len);
1627 } else if (0x83 == cmd[2]) { /* device identification */
1628 arr[1] = cmd[2]; /*sanity */
1629 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1630 target_dev_id, lu_id_num,
1631 lu_id_str, len,
1632 &devip->lu_name);
1633 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1634 arr[1] = cmd[2]; /*sanity */
1635 arr[3] = inquiry_vpd_84(&arr[4]);
1636 } else if (0x85 == cmd[2]) { /* Management network addresses */
1637 arr[1] = cmd[2]; /*sanity */
1638 arr[3] = inquiry_vpd_85(&arr[4]);
1639 } else if (0x86 == cmd[2]) { /* extended inquiry */
1640 arr[1] = cmd[2]; /*sanity */
1641 arr[3] = 0x3c; /* number of following entries */
1642 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1643 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1644 else if (have_dif_prot)
1645 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1646 else
1647 arr[4] = 0x0; /* no protection stuff */
1648 arr[5] = 0x7; /* head of q, ordered + simple q's */
1649 } else if (0x87 == cmd[2]) { /* mode page policy */
1650 arr[1] = cmd[2]; /*sanity */
1651 arr[3] = 0x8; /* number of following entries */
1652 arr[4] = 0x2; /* disconnect-reconnect mp */
1653 arr[6] = 0x80; /* mlus, shared */
1654 arr[8] = 0x18; /* protocol specific lu */
1655 arr[10] = 0x82; /* mlus, per initiator port */
1656 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1657 arr[1] = cmd[2]; /*sanity */
1658 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1659 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1660 arr[1] = cmd[2]; /*sanity */
1661 n = inquiry_vpd_89(&arr[4]);
1662 put_unaligned_be16(n, arr + 2);
1663 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1664 arr[1] = cmd[2]; /*sanity */
1665 arr[3] = inquiry_vpd_b0(&arr[4]);
1666 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1667 arr[1] = cmd[2]; /*sanity */
1668 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1669 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1670 arr[1] = cmd[2]; /*sanity */
1671 arr[3] = inquiry_vpd_b2(&arr[4]);
1672 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1673 arr[1] = cmd[2]; /*sanity */
1674 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1675 } else {
1676 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1677 kfree(arr);
1678 return check_condition_result;
1679 }
1680 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1681 ret = fill_from_dev_buffer(scp, arr,
1682 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1683 kfree(arr);
1684 return ret;
1685 }
1686 /* drops through here for a standard inquiry */
1687 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1688 arr[2] = sdebug_scsi_level;
1689 arr[3] = 2; /* response_data_format==2 */
1690 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1691 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1692 if (sdebug_vpd_use_hostno == 0)
1693 arr[5] |= 0x10; /* claim: implicit TPGS */
1694 arr[6] = 0x10; /* claim: MultiP */
1695 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1696 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1697 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1698 memcpy(&arr[16], sdebug_inq_product_id, 16);
1699 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1700 /* Use Vendor Specific area to place driver date in ASCII hex */
1701 memcpy(&arr[36], sdebug_version_date, 8);
1702 /* version descriptors (2 bytes each) follow */
1703 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1704 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1705 n = 62;
1706 if (is_disk) { /* SBC-4 no version claimed */
1707 put_unaligned_be16(0x600, arr + n);
1708 n += 2;
1709 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1710 put_unaligned_be16(0x525, arr + n);
1711 n += 2;
1712 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1713 put_unaligned_be16(0x624, arr + n);
1714 n += 2;
1715 }
1716 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1717 ret = fill_from_dev_buffer(scp, arr,
1718 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1719 kfree(arr);
1720 return ret;
1721 }
1722
1723 /* See resp_iec_m_pg() for how this data is manipulated */
1724 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1725 0, 0, 0x0, 0x0};
1726
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1727 static int resp_requests(struct scsi_cmnd *scp,
1728 struct sdebug_dev_info *devip)
1729 {
1730 unsigned char *cmd = scp->cmnd;
1731 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1732 bool dsense = !!(cmd[1] & 1);
1733 u32 alloc_len = cmd[4];
1734 u32 len = 18;
1735 int stopped_state = atomic_read(&devip->stopped);
1736
1737 memset(arr, 0, sizeof(arr));
1738 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1739 if (dsense) {
1740 arr[0] = 0x72;
1741 arr[1] = NOT_READY;
1742 arr[2] = LOGICAL_UNIT_NOT_READY;
1743 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1744 len = 8;
1745 } else {
1746 arr[0] = 0x70;
1747 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1748 arr[7] = 0xa; /* 18 byte sense buffer */
1749 arr[12] = LOGICAL_UNIT_NOT_READY;
1750 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1751 }
1752 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1753 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1754 if (dsense) {
1755 arr[0] = 0x72;
1756 arr[1] = 0x0; /* NO_SENSE in sense_key */
1757 arr[2] = THRESHOLD_EXCEEDED;
1758 arr[3] = 0xff; /* Failure prediction(false) */
1759 len = 8;
1760 } else {
1761 arr[0] = 0x70;
1762 arr[2] = 0x0; /* NO_SENSE in sense_key */
1763 arr[7] = 0xa; /* 18 byte sense buffer */
1764 arr[12] = THRESHOLD_EXCEEDED;
1765 arr[13] = 0xff; /* Failure prediction(false) */
1766 }
1767 } else { /* nothing to report */
1768 if (dsense) {
1769 len = 8;
1770 memset(arr, 0, len);
1771 arr[0] = 0x72;
1772 } else {
1773 memset(arr, 0, len);
1774 arr[0] = 0x70;
1775 arr[7] = 0xa;
1776 }
1777 }
1778 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1779 }
1780
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1781 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1782 {
1783 unsigned char *cmd = scp->cmnd;
1784 int power_cond, want_stop, stopped_state;
1785 bool changing;
1786
1787 power_cond = (cmd[4] & 0xf0) >> 4;
1788 if (power_cond) {
1789 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1790 return check_condition_result;
1791 }
1792 want_stop = !(cmd[4] & 1);
1793 stopped_state = atomic_read(&devip->stopped);
1794 if (stopped_state == 2) {
1795 ktime_t now_ts = ktime_get_boottime();
1796
1797 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1798 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1799
1800 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1801 /* tur_ms_to_ready timer extinguished */
1802 atomic_set(&devip->stopped, 0);
1803 stopped_state = 0;
1804 }
1805 }
1806 if (stopped_state == 2) {
1807 if (want_stop) {
1808 stopped_state = 1; /* dummy up success */
1809 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1810 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1811 return check_condition_result;
1812 }
1813 }
1814 }
1815 changing = (stopped_state != want_stop);
1816 if (changing)
1817 atomic_xchg(&devip->stopped, want_stop);
1818 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1819 return SDEG_RES_IMMED_MASK;
1820 else
1821 return 0;
1822 }
1823
get_sdebug_capacity(void)1824 static sector_t get_sdebug_capacity(void)
1825 {
1826 static const unsigned int gibibyte = 1073741824;
1827
1828 if (sdebug_virtual_gb > 0)
1829 return (sector_t)sdebug_virtual_gb *
1830 (gibibyte / sdebug_sector_size);
1831 else
1832 return sdebug_store_sectors;
1833 }
1834
1835 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1836 static int resp_readcap(struct scsi_cmnd *scp,
1837 struct sdebug_dev_info *devip)
1838 {
1839 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1840 unsigned int capac;
1841
1842 /* following just in case virtual_gb changed */
1843 sdebug_capacity = get_sdebug_capacity();
1844 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1845 if (sdebug_capacity < 0xffffffff) {
1846 capac = (unsigned int)sdebug_capacity - 1;
1847 put_unaligned_be32(capac, arr + 0);
1848 } else
1849 put_unaligned_be32(0xffffffff, arr + 0);
1850 put_unaligned_be16(sdebug_sector_size, arr + 6);
1851 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1852 }
1853
1854 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1855 static int resp_readcap16(struct scsi_cmnd *scp,
1856 struct sdebug_dev_info *devip)
1857 {
1858 unsigned char *cmd = scp->cmnd;
1859 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1860 u32 alloc_len;
1861
1862 alloc_len = get_unaligned_be32(cmd + 10);
1863 /* following just in case virtual_gb changed */
1864 sdebug_capacity = get_sdebug_capacity();
1865 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1866 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1867 put_unaligned_be32(sdebug_sector_size, arr + 8);
1868 arr[13] = sdebug_physblk_exp & 0xf;
1869 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1870
1871 if (scsi_debug_lbp()) {
1872 arr[14] |= 0x80; /* LBPME */
1873 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1874 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1875 * in the wider field maps to 0 in this field.
1876 */
1877 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1878 arr[14] |= 0x40;
1879 }
1880
1881 /*
1882 * Since the scsi_debug READ CAPACITY implementation always reports the
1883 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1884 */
1885 if (devip->zmodel == BLK_ZONED_HM)
1886 arr[12] |= 1 << 4;
1887
1888 arr[15] = sdebug_lowest_aligned & 0xff;
1889
1890 if (have_dif_prot) {
1891 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1892 arr[12] |= 1; /* PROT_EN */
1893 }
1894
1895 return fill_from_dev_buffer(scp, arr,
1896 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1897 }
1898
1899 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1900
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1901 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1902 struct sdebug_dev_info *devip)
1903 {
1904 unsigned char *cmd = scp->cmnd;
1905 unsigned char *arr;
1906 int host_no = devip->sdbg_host->shost->host_no;
1907 int port_group_a, port_group_b, port_a, port_b;
1908 u32 alen, n, rlen;
1909 int ret;
1910
1911 alen = get_unaligned_be32(cmd + 6);
1912 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1913 if (! arr)
1914 return DID_REQUEUE << 16;
1915 /*
1916 * EVPD page 0x88 states we have two ports, one
1917 * real and a fake port with no device connected.
1918 * So we create two port groups with one port each
1919 * and set the group with port B to unavailable.
1920 */
1921 port_a = 0x1; /* relative port A */
1922 port_b = 0x2; /* relative port B */
1923 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1924 (devip->channel & 0x7f);
1925 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1926 (devip->channel & 0x7f) + 0x80;
1927
1928 /*
1929 * The asymmetric access state is cycled according to the host_id.
1930 */
1931 n = 4;
1932 if (sdebug_vpd_use_hostno == 0) {
1933 arr[n++] = host_no % 3; /* Asymm access state */
1934 arr[n++] = 0x0F; /* claim: all states are supported */
1935 } else {
1936 arr[n++] = 0x0; /* Active/Optimized path */
1937 arr[n++] = 0x01; /* only support active/optimized paths */
1938 }
1939 put_unaligned_be16(port_group_a, arr + n);
1940 n += 2;
1941 arr[n++] = 0; /* Reserved */
1942 arr[n++] = 0; /* Status code */
1943 arr[n++] = 0; /* Vendor unique */
1944 arr[n++] = 0x1; /* One port per group */
1945 arr[n++] = 0; /* Reserved */
1946 arr[n++] = 0; /* Reserved */
1947 put_unaligned_be16(port_a, arr + n);
1948 n += 2;
1949 arr[n++] = 3; /* Port unavailable */
1950 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1951 put_unaligned_be16(port_group_b, arr + n);
1952 n += 2;
1953 arr[n++] = 0; /* Reserved */
1954 arr[n++] = 0; /* Status code */
1955 arr[n++] = 0; /* Vendor unique */
1956 arr[n++] = 0x1; /* One port per group */
1957 arr[n++] = 0; /* Reserved */
1958 arr[n++] = 0; /* Reserved */
1959 put_unaligned_be16(port_b, arr + n);
1960 n += 2;
1961
1962 rlen = n - 4;
1963 put_unaligned_be32(rlen, arr + 0);
1964
1965 /*
1966 * Return the smallest value of either
1967 * - The allocated length
1968 * - The constructed command length
1969 * - The maximum array size
1970 */
1971 rlen = min(alen, n);
1972 ret = fill_from_dev_buffer(scp, arr,
1973 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1974 kfree(arr);
1975 return ret;
1976 }
1977
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1978 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1979 struct sdebug_dev_info *devip)
1980 {
1981 bool rctd;
1982 u8 reporting_opts, req_opcode, sdeb_i, supp;
1983 u16 req_sa, u;
1984 u32 alloc_len, a_len;
1985 int k, offset, len, errsts, count, bump, na;
1986 const struct opcode_info_t *oip;
1987 const struct opcode_info_t *r_oip;
1988 u8 *arr;
1989 u8 *cmd = scp->cmnd;
1990
1991 rctd = !!(cmd[2] & 0x80);
1992 reporting_opts = cmd[2] & 0x7;
1993 req_opcode = cmd[3];
1994 req_sa = get_unaligned_be16(cmd + 4);
1995 alloc_len = get_unaligned_be32(cmd + 6);
1996 if (alloc_len < 4 || alloc_len > 0xffff) {
1997 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1998 return check_condition_result;
1999 }
2000 if (alloc_len > 8192)
2001 a_len = 8192;
2002 else
2003 a_len = alloc_len;
2004 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2005 if (NULL == arr) {
2006 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2007 INSUFF_RES_ASCQ);
2008 return check_condition_result;
2009 }
2010 switch (reporting_opts) {
2011 case 0: /* all commands */
2012 /* count number of commands */
2013 for (count = 0, oip = opcode_info_arr;
2014 oip->num_attached != 0xff; ++oip) {
2015 if (F_INV_OP & oip->flags)
2016 continue;
2017 count += (oip->num_attached + 1);
2018 }
2019 bump = rctd ? 20 : 8;
2020 put_unaligned_be32(count * bump, arr);
2021 for (offset = 4, oip = opcode_info_arr;
2022 oip->num_attached != 0xff && offset < a_len; ++oip) {
2023 if (F_INV_OP & oip->flags)
2024 continue;
2025 na = oip->num_attached;
2026 arr[offset] = oip->opcode;
2027 put_unaligned_be16(oip->sa, arr + offset + 2);
2028 if (rctd)
2029 arr[offset + 5] |= 0x2;
2030 if (FF_SA & oip->flags)
2031 arr[offset + 5] |= 0x1;
2032 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2033 if (rctd)
2034 put_unaligned_be16(0xa, arr + offset + 8);
2035 r_oip = oip;
2036 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2037 if (F_INV_OP & oip->flags)
2038 continue;
2039 offset += bump;
2040 arr[offset] = oip->opcode;
2041 put_unaligned_be16(oip->sa, arr + offset + 2);
2042 if (rctd)
2043 arr[offset + 5] |= 0x2;
2044 if (FF_SA & oip->flags)
2045 arr[offset + 5] |= 0x1;
2046 put_unaligned_be16(oip->len_mask[0],
2047 arr + offset + 6);
2048 if (rctd)
2049 put_unaligned_be16(0xa,
2050 arr + offset + 8);
2051 }
2052 oip = r_oip;
2053 offset += bump;
2054 }
2055 break;
2056 case 1: /* one command: opcode only */
2057 case 2: /* one command: opcode plus service action */
2058 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2059 sdeb_i = opcode_ind_arr[req_opcode];
2060 oip = &opcode_info_arr[sdeb_i];
2061 if (F_INV_OP & oip->flags) {
2062 supp = 1;
2063 offset = 4;
2064 } else {
2065 if (1 == reporting_opts) {
2066 if (FF_SA & oip->flags) {
2067 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2068 2, 2);
2069 kfree(arr);
2070 return check_condition_result;
2071 }
2072 req_sa = 0;
2073 } else if (2 == reporting_opts &&
2074 0 == (FF_SA & oip->flags)) {
2075 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2076 kfree(arr); /* point at requested sa */
2077 return check_condition_result;
2078 }
2079 if (0 == (FF_SA & oip->flags) &&
2080 req_opcode == oip->opcode)
2081 supp = 3;
2082 else if (0 == (FF_SA & oip->flags)) {
2083 na = oip->num_attached;
2084 for (k = 0, oip = oip->arrp; k < na;
2085 ++k, ++oip) {
2086 if (req_opcode == oip->opcode)
2087 break;
2088 }
2089 supp = (k >= na) ? 1 : 3;
2090 } else if (req_sa != oip->sa) {
2091 na = oip->num_attached;
2092 for (k = 0, oip = oip->arrp; k < na;
2093 ++k, ++oip) {
2094 if (req_sa == oip->sa)
2095 break;
2096 }
2097 supp = (k >= na) ? 1 : 3;
2098 } else
2099 supp = 3;
2100 if (3 == supp) {
2101 u = oip->len_mask[0];
2102 put_unaligned_be16(u, arr + 2);
2103 arr[4] = oip->opcode;
2104 for (k = 1; k < u; ++k)
2105 arr[4 + k] = (k < 16) ?
2106 oip->len_mask[k] : 0xff;
2107 offset = 4 + u;
2108 } else
2109 offset = 4;
2110 }
2111 arr[1] = (rctd ? 0x80 : 0) | supp;
2112 if (rctd) {
2113 put_unaligned_be16(0xa, arr + offset);
2114 offset += 12;
2115 }
2116 break;
2117 default:
2118 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2119 kfree(arr);
2120 return check_condition_result;
2121 }
2122 offset = (offset < a_len) ? offset : a_len;
2123 len = (offset < alloc_len) ? offset : alloc_len;
2124 errsts = fill_from_dev_buffer(scp, arr, len);
2125 kfree(arr);
2126 return errsts;
2127 }
2128
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2129 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2130 struct sdebug_dev_info *devip)
2131 {
2132 bool repd;
2133 u32 alloc_len, len;
2134 u8 arr[16];
2135 u8 *cmd = scp->cmnd;
2136
2137 memset(arr, 0, sizeof(arr));
2138 repd = !!(cmd[2] & 0x80);
2139 alloc_len = get_unaligned_be32(cmd + 6);
2140 if (alloc_len < 4) {
2141 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2142 return check_condition_result;
2143 }
2144 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2145 arr[1] = 0x1; /* ITNRS */
2146 if (repd) {
2147 arr[3] = 0xc;
2148 len = 16;
2149 } else
2150 len = 4;
2151
2152 len = (len < alloc_len) ? len : alloc_len;
2153 return fill_from_dev_buffer(scp, arr, len);
2154 }
2155
2156 /* <<Following mode page info copied from ST318451LW>> */
2157
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2158 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2159 { /* Read-Write Error Recovery page for mode_sense */
2160 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2161 5, 0, 0xff, 0xff};
2162
2163 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2164 if (1 == pcontrol)
2165 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2166 return sizeof(err_recov_pg);
2167 }
2168
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2169 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2170 { /* Disconnect-Reconnect page for mode_sense */
2171 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2172 0, 0, 0, 0, 0, 0, 0, 0};
2173
2174 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2175 if (1 == pcontrol)
2176 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2177 return sizeof(disconnect_pg);
2178 }
2179
resp_format_pg(unsigned char * p,int pcontrol,int target)2180 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2181 { /* Format device page for mode_sense */
2182 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2183 0, 0, 0, 0, 0, 0, 0, 0,
2184 0, 0, 0, 0, 0x40, 0, 0, 0};
2185
2186 memcpy(p, format_pg, sizeof(format_pg));
2187 put_unaligned_be16(sdebug_sectors_per, p + 10);
2188 put_unaligned_be16(sdebug_sector_size, p + 12);
2189 if (sdebug_removable)
2190 p[20] |= 0x20; /* should agree with INQUIRY */
2191 if (1 == pcontrol)
2192 memset(p + 2, 0, sizeof(format_pg) - 2);
2193 return sizeof(format_pg);
2194 }
2195
2196 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2197 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2198 0, 0, 0, 0};
2199
resp_caching_pg(unsigned char * p,int pcontrol,int target)2200 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2201 { /* Caching page for mode_sense */
2202 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2203 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2204 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2205 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2206
2207 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2208 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2209 memcpy(p, caching_pg, sizeof(caching_pg));
2210 if (1 == pcontrol)
2211 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2212 else if (2 == pcontrol)
2213 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2214 return sizeof(caching_pg);
2215 }
2216
2217 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2218 0, 0, 0x2, 0x4b};
2219
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2220 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2221 { /* Control mode page for mode_sense */
2222 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2223 0, 0, 0, 0};
2224 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2225 0, 0, 0x2, 0x4b};
2226
2227 if (sdebug_dsense)
2228 ctrl_m_pg[2] |= 0x4;
2229 else
2230 ctrl_m_pg[2] &= ~0x4;
2231
2232 if (sdebug_ato)
2233 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2234
2235 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2236 if (1 == pcontrol)
2237 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2238 else if (2 == pcontrol)
2239 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2240 return sizeof(ctrl_m_pg);
2241 }
2242
2243
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2244 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2245 { /* Informational Exceptions control mode page for mode_sense */
2246 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2247 0, 0, 0x0, 0x0};
2248 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2249 0, 0, 0x0, 0x0};
2250
2251 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2252 if (1 == pcontrol)
2253 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2254 else if (2 == pcontrol)
2255 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2256 return sizeof(iec_m_pg);
2257 }
2258
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2259 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2260 { /* SAS SSP mode page - short format for mode_sense */
2261 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2262 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2263
2264 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2265 if (1 == pcontrol)
2266 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2267 return sizeof(sas_sf_m_pg);
2268 }
2269
2270
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2271 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2272 int target_dev_id)
2273 { /* SAS phy control and discover mode page for mode_sense */
2274 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2275 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2276 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2277 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2278 0x2, 0, 0, 0, 0, 0, 0, 0,
2279 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 0, 0, 0, 0, 0, 0, 0, 0,
2281 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2282 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2283 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2284 0x3, 0, 0, 0, 0, 0, 0, 0,
2285 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2286 0, 0, 0, 0, 0, 0, 0, 0,
2287 };
2288 int port_a, port_b;
2289
2290 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2291 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2292 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2293 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2294 port_a = target_dev_id + 1;
2295 port_b = port_a + 1;
2296 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2297 put_unaligned_be32(port_a, p + 20);
2298 put_unaligned_be32(port_b, p + 48 + 20);
2299 if (1 == pcontrol)
2300 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2301 return sizeof(sas_pcd_m_pg);
2302 }
2303
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2304 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2305 { /* SAS SSP shared protocol specific port mode subpage */
2306 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2307 0, 0, 0, 0, 0, 0, 0, 0,
2308 };
2309
2310 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2311 if (1 == pcontrol)
2312 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2313 return sizeof(sas_sha_m_pg);
2314 }
2315
2316 #define SDEBUG_MAX_MSENSE_SZ 256
2317
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2318 static int resp_mode_sense(struct scsi_cmnd *scp,
2319 struct sdebug_dev_info *devip)
2320 {
2321 int pcontrol, pcode, subpcode, bd_len;
2322 unsigned char dev_spec;
2323 u32 alloc_len, offset, len;
2324 int target_dev_id;
2325 int target = scp->device->id;
2326 unsigned char *ap;
2327 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2328 unsigned char *cmd = scp->cmnd;
2329 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2330
2331 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2332 pcontrol = (cmd[2] & 0xc0) >> 6;
2333 pcode = cmd[2] & 0x3f;
2334 subpcode = cmd[3];
2335 msense_6 = (MODE_SENSE == cmd[0]);
2336 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2337 is_disk = (sdebug_ptype == TYPE_DISK);
2338 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2339 if ((is_disk || is_zbc) && !dbd)
2340 bd_len = llbaa ? 16 : 8;
2341 else
2342 bd_len = 0;
2343 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2344 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2345 if (0x3 == pcontrol) { /* Saving values not supported */
2346 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2347 return check_condition_result;
2348 }
2349 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2350 (devip->target * 1000) - 3;
2351 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2352 if (is_disk || is_zbc) {
2353 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2354 if (sdebug_wp)
2355 dev_spec |= 0x80;
2356 } else
2357 dev_spec = 0x0;
2358 if (msense_6) {
2359 arr[2] = dev_spec;
2360 arr[3] = bd_len;
2361 offset = 4;
2362 } else {
2363 arr[3] = dev_spec;
2364 if (16 == bd_len)
2365 arr[4] = 0x1; /* set LONGLBA bit */
2366 arr[7] = bd_len; /* assume 255 or less */
2367 offset = 8;
2368 }
2369 ap = arr + offset;
2370 if ((bd_len > 0) && (!sdebug_capacity))
2371 sdebug_capacity = get_sdebug_capacity();
2372
2373 if (8 == bd_len) {
2374 if (sdebug_capacity > 0xfffffffe)
2375 put_unaligned_be32(0xffffffff, ap + 0);
2376 else
2377 put_unaligned_be32(sdebug_capacity, ap + 0);
2378 put_unaligned_be16(sdebug_sector_size, ap + 6);
2379 offset += bd_len;
2380 ap = arr + offset;
2381 } else if (16 == bd_len) {
2382 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2383 put_unaligned_be32(sdebug_sector_size, ap + 12);
2384 offset += bd_len;
2385 ap = arr + offset;
2386 }
2387
2388 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2389 /* TODO: Control Extension page */
2390 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2391 return check_condition_result;
2392 }
2393 bad_pcode = false;
2394
2395 switch (pcode) {
2396 case 0x1: /* Read-Write error recovery page, direct access */
2397 len = resp_err_recov_pg(ap, pcontrol, target);
2398 offset += len;
2399 break;
2400 case 0x2: /* Disconnect-Reconnect page, all devices */
2401 len = resp_disconnect_pg(ap, pcontrol, target);
2402 offset += len;
2403 break;
2404 case 0x3: /* Format device page, direct access */
2405 if (is_disk) {
2406 len = resp_format_pg(ap, pcontrol, target);
2407 offset += len;
2408 } else
2409 bad_pcode = true;
2410 break;
2411 case 0x8: /* Caching page, direct access */
2412 if (is_disk || is_zbc) {
2413 len = resp_caching_pg(ap, pcontrol, target);
2414 offset += len;
2415 } else
2416 bad_pcode = true;
2417 break;
2418 case 0xa: /* Control Mode page, all devices */
2419 len = resp_ctrl_m_pg(ap, pcontrol, target);
2420 offset += len;
2421 break;
2422 case 0x19: /* if spc==1 then sas phy, control+discover */
2423 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2424 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2425 return check_condition_result;
2426 }
2427 len = 0;
2428 if ((0x0 == subpcode) || (0xff == subpcode))
2429 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2430 if ((0x1 == subpcode) || (0xff == subpcode))
2431 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2432 target_dev_id);
2433 if ((0x2 == subpcode) || (0xff == subpcode))
2434 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2435 offset += len;
2436 break;
2437 case 0x1c: /* Informational Exceptions Mode page, all devices */
2438 len = resp_iec_m_pg(ap, pcontrol, target);
2439 offset += len;
2440 break;
2441 case 0x3f: /* Read all Mode pages */
2442 if ((0 == subpcode) || (0xff == subpcode)) {
2443 len = resp_err_recov_pg(ap, pcontrol, target);
2444 len += resp_disconnect_pg(ap + len, pcontrol, target);
2445 if (is_disk) {
2446 len += resp_format_pg(ap + len, pcontrol,
2447 target);
2448 len += resp_caching_pg(ap + len, pcontrol,
2449 target);
2450 } else if (is_zbc) {
2451 len += resp_caching_pg(ap + len, pcontrol,
2452 target);
2453 }
2454 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2455 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2456 if (0xff == subpcode) {
2457 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2458 target, target_dev_id);
2459 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2460 }
2461 len += resp_iec_m_pg(ap + len, pcontrol, target);
2462 offset += len;
2463 } else {
2464 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2465 return check_condition_result;
2466 }
2467 break;
2468 default:
2469 bad_pcode = true;
2470 break;
2471 }
2472 if (bad_pcode) {
2473 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2474 return check_condition_result;
2475 }
2476 if (msense_6)
2477 arr[0] = offset - 1;
2478 else
2479 put_unaligned_be16((offset - 2), arr + 0);
2480 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2481 }
2482
2483 #define SDEBUG_MAX_MSELECT_SZ 512
2484
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2485 static int resp_mode_select(struct scsi_cmnd *scp,
2486 struct sdebug_dev_info *devip)
2487 {
2488 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2489 int param_len, res, mpage;
2490 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2491 unsigned char *cmd = scp->cmnd;
2492 int mselect6 = (MODE_SELECT == cmd[0]);
2493
2494 memset(arr, 0, sizeof(arr));
2495 pf = cmd[1] & 0x10;
2496 sp = cmd[1] & 0x1;
2497 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2498 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2499 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2500 return check_condition_result;
2501 }
2502 res = fetch_to_dev_buffer(scp, arr, param_len);
2503 if (-1 == res)
2504 return DID_ERROR << 16;
2505 else if (sdebug_verbose && (res < param_len))
2506 sdev_printk(KERN_INFO, scp->device,
2507 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2508 __func__, param_len, res);
2509 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2510 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2511 off = bd_len + (mselect6 ? 4 : 8);
2512 if (md_len > 2 || off >= res) {
2513 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2514 return check_condition_result;
2515 }
2516 mpage = arr[off] & 0x3f;
2517 ps = !!(arr[off] & 0x80);
2518 if (ps) {
2519 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2520 return check_condition_result;
2521 }
2522 spf = !!(arr[off] & 0x40);
2523 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2524 (arr[off + 1] + 2);
2525 if ((pg_len + off) > param_len) {
2526 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2527 PARAMETER_LIST_LENGTH_ERR, 0);
2528 return check_condition_result;
2529 }
2530 switch (mpage) {
2531 case 0x8: /* Caching Mode page */
2532 if (caching_pg[1] == arr[off + 1]) {
2533 memcpy(caching_pg + 2, arr + off + 2,
2534 sizeof(caching_pg) - 2);
2535 goto set_mode_changed_ua;
2536 }
2537 break;
2538 case 0xa: /* Control Mode page */
2539 if (ctrl_m_pg[1] == arr[off + 1]) {
2540 memcpy(ctrl_m_pg + 2, arr + off + 2,
2541 sizeof(ctrl_m_pg) - 2);
2542 if (ctrl_m_pg[4] & 0x8)
2543 sdebug_wp = true;
2544 else
2545 sdebug_wp = false;
2546 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2547 goto set_mode_changed_ua;
2548 }
2549 break;
2550 case 0x1c: /* Informational Exceptions Mode page */
2551 if (iec_m_pg[1] == arr[off + 1]) {
2552 memcpy(iec_m_pg + 2, arr + off + 2,
2553 sizeof(iec_m_pg) - 2);
2554 goto set_mode_changed_ua;
2555 }
2556 break;
2557 default:
2558 break;
2559 }
2560 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2561 return check_condition_result;
2562 set_mode_changed_ua:
2563 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2564 return 0;
2565 }
2566
resp_temp_l_pg(unsigned char * arr)2567 static int resp_temp_l_pg(unsigned char *arr)
2568 {
2569 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2570 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2571 };
2572
2573 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2574 return sizeof(temp_l_pg);
2575 }
2576
resp_ie_l_pg(unsigned char * arr)2577 static int resp_ie_l_pg(unsigned char *arr)
2578 {
2579 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2580 };
2581
2582 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2583 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2584 arr[4] = THRESHOLD_EXCEEDED;
2585 arr[5] = 0xff;
2586 }
2587 return sizeof(ie_l_pg);
2588 }
2589
2590 #define SDEBUG_MAX_LSENSE_SZ 512
2591
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2592 static int resp_log_sense(struct scsi_cmnd *scp,
2593 struct sdebug_dev_info *devip)
2594 {
2595 int ppc, sp, pcode, subpcode;
2596 u32 alloc_len, len, n;
2597 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2598 unsigned char *cmd = scp->cmnd;
2599
2600 memset(arr, 0, sizeof(arr));
2601 ppc = cmd[1] & 0x2;
2602 sp = cmd[1] & 0x1;
2603 if (ppc || sp) {
2604 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2605 return check_condition_result;
2606 }
2607 pcode = cmd[2] & 0x3f;
2608 subpcode = cmd[3] & 0xff;
2609 alloc_len = get_unaligned_be16(cmd + 7);
2610 arr[0] = pcode;
2611 if (0 == subpcode) {
2612 switch (pcode) {
2613 case 0x0: /* Supported log pages log page */
2614 n = 4;
2615 arr[n++] = 0x0; /* this page */
2616 arr[n++] = 0xd; /* Temperature */
2617 arr[n++] = 0x2f; /* Informational exceptions */
2618 arr[3] = n - 4;
2619 break;
2620 case 0xd: /* Temperature log page */
2621 arr[3] = resp_temp_l_pg(arr + 4);
2622 break;
2623 case 0x2f: /* Informational exceptions log page */
2624 arr[3] = resp_ie_l_pg(arr + 4);
2625 break;
2626 default:
2627 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2628 return check_condition_result;
2629 }
2630 } else if (0xff == subpcode) {
2631 arr[0] |= 0x40;
2632 arr[1] = subpcode;
2633 switch (pcode) {
2634 case 0x0: /* Supported log pages and subpages log page */
2635 n = 4;
2636 arr[n++] = 0x0;
2637 arr[n++] = 0x0; /* 0,0 page */
2638 arr[n++] = 0x0;
2639 arr[n++] = 0xff; /* this page */
2640 arr[n++] = 0xd;
2641 arr[n++] = 0x0; /* Temperature */
2642 arr[n++] = 0x2f;
2643 arr[n++] = 0x0; /* Informational exceptions */
2644 arr[3] = n - 4;
2645 break;
2646 case 0xd: /* Temperature subpages */
2647 n = 4;
2648 arr[n++] = 0xd;
2649 arr[n++] = 0x0; /* Temperature */
2650 arr[3] = n - 4;
2651 break;
2652 case 0x2f: /* Informational exceptions subpages */
2653 n = 4;
2654 arr[n++] = 0x2f;
2655 arr[n++] = 0x0; /* Informational exceptions */
2656 arr[3] = n - 4;
2657 break;
2658 default:
2659 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2660 return check_condition_result;
2661 }
2662 } else {
2663 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2664 return check_condition_result;
2665 }
2666 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2667 return fill_from_dev_buffer(scp, arr,
2668 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2669 }
2670
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)2671 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2672 {
2673 return devip->nr_zones != 0;
2674 }
2675
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)2676 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2677 unsigned long long lba)
2678 {
2679 return &devip->zstate[lba >> devip->zsize_shift];
2680 }
2681
zbc_zone_is_conv(struct sdeb_zone_state * zsp)2682 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2683 {
2684 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2685 }
2686
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2687 static void zbc_close_zone(struct sdebug_dev_info *devip,
2688 struct sdeb_zone_state *zsp)
2689 {
2690 enum sdebug_z_cond zc;
2691
2692 if (zbc_zone_is_conv(zsp))
2693 return;
2694
2695 zc = zsp->z_cond;
2696 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2697 return;
2698
2699 if (zc == ZC2_IMPLICIT_OPEN)
2700 devip->nr_imp_open--;
2701 else
2702 devip->nr_exp_open--;
2703
2704 if (zsp->z_wp == zsp->z_start) {
2705 zsp->z_cond = ZC1_EMPTY;
2706 } else {
2707 zsp->z_cond = ZC4_CLOSED;
2708 devip->nr_closed++;
2709 }
2710 }
2711
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)2712 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2713 {
2714 struct sdeb_zone_state *zsp = &devip->zstate[0];
2715 unsigned int i;
2716
2717 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2718 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2719 zbc_close_zone(devip, zsp);
2720 return;
2721 }
2722 }
2723 }
2724
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)2725 static void zbc_open_zone(struct sdebug_dev_info *devip,
2726 struct sdeb_zone_state *zsp, bool explicit)
2727 {
2728 enum sdebug_z_cond zc;
2729
2730 if (zbc_zone_is_conv(zsp))
2731 return;
2732
2733 zc = zsp->z_cond;
2734 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2735 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2736 return;
2737
2738 /* Close an implicit open zone if necessary */
2739 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2740 zbc_close_zone(devip, zsp);
2741 else if (devip->max_open &&
2742 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2743 zbc_close_imp_open_zone(devip);
2744
2745 if (zsp->z_cond == ZC4_CLOSED)
2746 devip->nr_closed--;
2747 if (explicit) {
2748 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2749 devip->nr_exp_open++;
2750 } else {
2751 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2752 devip->nr_imp_open++;
2753 }
2754 }
2755
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2756 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2757 struct sdeb_zone_state *zsp)
2758 {
2759 switch (zsp->z_cond) {
2760 case ZC2_IMPLICIT_OPEN:
2761 devip->nr_imp_open--;
2762 break;
2763 case ZC3_EXPLICIT_OPEN:
2764 devip->nr_exp_open--;
2765 break;
2766 default:
2767 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2768 zsp->z_start, zsp->z_cond);
2769 break;
2770 }
2771 zsp->z_cond = ZC5_FULL;
2772 }
2773
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)2774 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2775 unsigned long long lba, unsigned int num)
2776 {
2777 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2778 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2779
2780 if (zbc_zone_is_conv(zsp))
2781 return;
2782
2783 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2784 zsp->z_wp += num;
2785 if (zsp->z_wp >= zend)
2786 zbc_set_zone_full(devip, zsp);
2787 return;
2788 }
2789
2790 while (num) {
2791 if (lba != zsp->z_wp)
2792 zsp->z_non_seq_resource = true;
2793
2794 end = lba + num;
2795 if (end >= zend) {
2796 n = zend - lba;
2797 zsp->z_wp = zend;
2798 } else if (end > zsp->z_wp) {
2799 n = num;
2800 zsp->z_wp = end;
2801 } else {
2802 n = num;
2803 }
2804 if (zsp->z_wp >= zend)
2805 zbc_set_zone_full(devip, zsp);
2806
2807 num -= n;
2808 lba += n;
2809 if (num) {
2810 zsp++;
2811 zend = zsp->z_start + zsp->z_size;
2812 }
2813 }
2814 }
2815
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2816 static int check_zbc_access_params(struct scsi_cmnd *scp,
2817 unsigned long long lba, unsigned int num, bool write)
2818 {
2819 struct scsi_device *sdp = scp->device;
2820 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2821 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2822 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2823
2824 if (!write) {
2825 if (devip->zmodel == BLK_ZONED_HA)
2826 return 0;
2827 /* For host-managed, reads cannot cross zone types boundaries */
2828 if (zsp_end != zsp &&
2829 zbc_zone_is_conv(zsp) &&
2830 !zbc_zone_is_conv(zsp_end)) {
2831 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2832 LBA_OUT_OF_RANGE,
2833 READ_INVDATA_ASCQ);
2834 return check_condition_result;
2835 }
2836 return 0;
2837 }
2838
2839 /* No restrictions for writes within conventional zones */
2840 if (zbc_zone_is_conv(zsp)) {
2841 if (!zbc_zone_is_conv(zsp_end)) {
2842 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2843 LBA_OUT_OF_RANGE,
2844 WRITE_BOUNDARY_ASCQ);
2845 return check_condition_result;
2846 }
2847 return 0;
2848 }
2849
2850 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2851 /* Writes cannot cross sequential zone boundaries */
2852 if (zsp_end != zsp) {
2853 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2854 LBA_OUT_OF_RANGE,
2855 WRITE_BOUNDARY_ASCQ);
2856 return check_condition_result;
2857 }
2858 /* Cannot write full zones */
2859 if (zsp->z_cond == ZC5_FULL) {
2860 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2861 INVALID_FIELD_IN_CDB, 0);
2862 return check_condition_result;
2863 }
2864 /* Writes must be aligned to the zone WP */
2865 if (lba != zsp->z_wp) {
2866 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2867 LBA_OUT_OF_RANGE,
2868 UNALIGNED_WRITE_ASCQ);
2869 return check_condition_result;
2870 }
2871 }
2872
2873 /* Handle implicit open of closed and empty zones */
2874 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2875 if (devip->max_open &&
2876 devip->nr_exp_open >= devip->max_open) {
2877 mk_sense_buffer(scp, DATA_PROTECT,
2878 INSUFF_RES_ASC,
2879 INSUFF_ZONE_ASCQ);
2880 return check_condition_result;
2881 }
2882 zbc_open_zone(devip, zsp, false);
2883 }
2884
2885 return 0;
2886 }
2887
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2888 static inline int check_device_access_params
2889 (struct scsi_cmnd *scp, unsigned long long lba,
2890 unsigned int num, bool write)
2891 {
2892 struct scsi_device *sdp = scp->device;
2893 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2894
2895 if (lba + num > sdebug_capacity) {
2896 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2897 return check_condition_result;
2898 }
2899 /* transfer length excessive (tie in to block limits VPD page) */
2900 if (num > sdebug_store_sectors) {
2901 /* needs work to find which cdb byte 'num' comes from */
2902 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2903 return check_condition_result;
2904 }
2905 if (write && unlikely(sdebug_wp)) {
2906 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2907 return check_condition_result;
2908 }
2909 if (sdebug_dev_is_zoned(devip))
2910 return check_zbc_access_params(scp, lba, num, write);
2911
2912 return 0;
2913 }
2914
2915 /*
2916 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2917 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2918 * that access any of the "stores" in struct sdeb_store_info should call this
2919 * function with bug_if_fake_rw set to true.
2920 */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)2921 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2922 bool bug_if_fake_rw)
2923 {
2924 if (sdebug_fake_rw) {
2925 BUG_ON(bug_if_fake_rw); /* See note above */
2926 return NULL;
2927 }
2928 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2929 }
2930
2931 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,bool do_write)2932 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2933 u32 sg_skip, u64 lba, u32 num, bool do_write)
2934 {
2935 int ret;
2936 u64 block, rest = 0;
2937 enum dma_data_direction dir;
2938 struct scsi_data_buffer *sdb = &scp->sdb;
2939 u8 *fsp;
2940
2941 if (do_write) {
2942 dir = DMA_TO_DEVICE;
2943 write_since_sync = true;
2944 } else {
2945 dir = DMA_FROM_DEVICE;
2946 }
2947
2948 if (!sdb->length || !sip)
2949 return 0;
2950 if (scp->sc_data_direction != dir)
2951 return -1;
2952 fsp = sip->storep;
2953
2954 block = do_div(lba, sdebug_store_sectors);
2955 if (block + num > sdebug_store_sectors)
2956 rest = block + num - sdebug_store_sectors;
2957
2958 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2959 fsp + (block * sdebug_sector_size),
2960 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2961 if (ret != (num - rest) * sdebug_sector_size)
2962 return ret;
2963
2964 if (rest) {
2965 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2966 fsp, rest * sdebug_sector_size,
2967 sg_skip + ((num - rest) * sdebug_sector_size),
2968 do_write);
2969 }
2970
2971 return ret;
2972 }
2973
2974 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)2975 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2976 {
2977 struct scsi_data_buffer *sdb = &scp->sdb;
2978
2979 if (!sdb->length)
2980 return 0;
2981 if (scp->sc_data_direction != DMA_TO_DEVICE)
2982 return -1;
2983 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2984 num * sdebug_sector_size, 0, true);
2985 }
2986
2987 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2988 * arr into sip->storep+lba and return true. If comparison fails then
2989 * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)2990 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2991 const u8 *arr, bool compare_only)
2992 {
2993 bool res;
2994 u64 block, rest = 0;
2995 u32 store_blks = sdebug_store_sectors;
2996 u32 lb_size = sdebug_sector_size;
2997 u8 *fsp = sip->storep;
2998
2999 block = do_div(lba, store_blks);
3000 if (block + num > store_blks)
3001 rest = block + num - store_blks;
3002
3003 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3004 if (!res)
3005 return res;
3006 if (rest)
3007 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3008 rest * lb_size);
3009 if (!res)
3010 return res;
3011 if (compare_only)
3012 return true;
3013 arr += num * lb_size;
3014 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3015 if (rest)
3016 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3017 return res;
3018 }
3019
dif_compute_csum(const void * buf,int len)3020 static __be16 dif_compute_csum(const void *buf, int len)
3021 {
3022 __be16 csum;
3023
3024 if (sdebug_guard)
3025 csum = (__force __be16)ip_compute_csum(buf, len);
3026 else
3027 csum = cpu_to_be16(crc_t10dif(buf, len));
3028
3029 return csum;
3030 }
3031
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3032 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3033 sector_t sector, u32 ei_lba)
3034 {
3035 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3036
3037 if (sdt->guard_tag != csum) {
3038 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3039 (unsigned long)sector,
3040 be16_to_cpu(sdt->guard_tag),
3041 be16_to_cpu(csum));
3042 return 0x01;
3043 }
3044 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3045 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3046 pr_err("REF check failed on sector %lu\n",
3047 (unsigned long)sector);
3048 return 0x03;
3049 }
3050 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3051 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3052 pr_err("REF check failed on sector %lu\n",
3053 (unsigned long)sector);
3054 return 0x03;
3055 }
3056 return 0;
3057 }
3058
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3059 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3060 unsigned int sectors, bool read)
3061 {
3062 size_t resid;
3063 void *paddr;
3064 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3065 scp->device->hostdata, true);
3066 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3067 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3068 struct sg_mapping_iter miter;
3069
3070 /* Bytes of protection data to copy into sgl */
3071 resid = sectors * sizeof(*dif_storep);
3072
3073 sg_miter_start(&miter, scsi_prot_sglist(scp),
3074 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3075 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3076
3077 while (sg_miter_next(&miter) && resid > 0) {
3078 size_t len = min_t(size_t, miter.length, resid);
3079 void *start = dif_store(sip, sector);
3080 size_t rest = 0;
3081
3082 if (dif_store_end < start + len)
3083 rest = start + len - dif_store_end;
3084
3085 paddr = miter.addr;
3086
3087 if (read)
3088 memcpy(paddr, start, len - rest);
3089 else
3090 memcpy(start, paddr, len - rest);
3091
3092 if (rest) {
3093 if (read)
3094 memcpy(paddr + len - rest, dif_storep, rest);
3095 else
3096 memcpy(dif_storep, paddr + len - rest, rest);
3097 }
3098
3099 sector += len / sizeof(*dif_storep);
3100 resid -= len;
3101 }
3102 sg_miter_stop(&miter);
3103 }
3104
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3105 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3106 unsigned int sectors, u32 ei_lba)
3107 {
3108 unsigned int i;
3109 sector_t sector;
3110 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3111 scp->device->hostdata, true);
3112 struct t10_pi_tuple *sdt;
3113
3114 for (i = 0; i < sectors; i++, ei_lba++) {
3115 int ret;
3116
3117 sector = start_sec + i;
3118 sdt = dif_store(sip, sector);
3119
3120 if (sdt->app_tag == cpu_to_be16(0xffff))
3121 continue;
3122
3123 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3124 ei_lba);
3125 if (ret) {
3126 dif_errors++;
3127 return ret;
3128 }
3129 }
3130
3131 dif_copy_prot(scp, start_sec, sectors, true);
3132 dix_reads++;
3133
3134 return 0;
3135 }
3136
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3137 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3138 {
3139 bool check_prot;
3140 u32 num;
3141 u32 ei_lba;
3142 int ret;
3143 u64 lba;
3144 struct sdeb_store_info *sip = devip2sip(devip, true);
3145 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3146 u8 *cmd = scp->cmnd;
3147
3148 switch (cmd[0]) {
3149 case READ_16:
3150 ei_lba = 0;
3151 lba = get_unaligned_be64(cmd + 2);
3152 num = get_unaligned_be32(cmd + 10);
3153 check_prot = true;
3154 break;
3155 case READ_10:
3156 ei_lba = 0;
3157 lba = get_unaligned_be32(cmd + 2);
3158 num = get_unaligned_be16(cmd + 7);
3159 check_prot = true;
3160 break;
3161 case READ_6:
3162 ei_lba = 0;
3163 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3164 (u32)(cmd[1] & 0x1f) << 16;
3165 num = (0 == cmd[4]) ? 256 : cmd[4];
3166 check_prot = true;
3167 break;
3168 case READ_12:
3169 ei_lba = 0;
3170 lba = get_unaligned_be32(cmd + 2);
3171 num = get_unaligned_be32(cmd + 6);
3172 check_prot = true;
3173 break;
3174 case XDWRITEREAD_10:
3175 ei_lba = 0;
3176 lba = get_unaligned_be32(cmd + 2);
3177 num = get_unaligned_be16(cmd + 7);
3178 check_prot = false;
3179 break;
3180 default: /* assume READ(32) */
3181 lba = get_unaligned_be64(cmd + 12);
3182 ei_lba = get_unaligned_be32(cmd + 20);
3183 num = get_unaligned_be32(cmd + 28);
3184 check_prot = false;
3185 break;
3186 }
3187 if (unlikely(have_dif_prot && check_prot)) {
3188 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3189 (cmd[1] & 0xe0)) {
3190 mk_sense_invalid_opcode(scp);
3191 return check_condition_result;
3192 }
3193 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3194 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3195 (cmd[1] & 0xe0) == 0)
3196 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3197 "to DIF device\n");
3198 }
3199 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3200 atomic_read(&sdeb_inject_pending))) {
3201 num /= 2;
3202 atomic_set(&sdeb_inject_pending, 0);
3203 }
3204
3205 ret = check_device_access_params(scp, lba, num, false);
3206 if (ret)
3207 return ret;
3208 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3209 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3210 ((lba + num) > sdebug_medium_error_start))) {
3211 /* claim unrecoverable read error */
3212 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3213 /* set info field and valid bit for fixed descriptor */
3214 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3215 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3216 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3217 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3218 put_unaligned_be32(ret, scp->sense_buffer + 3);
3219 }
3220 scsi_set_resid(scp, scsi_bufflen(scp));
3221 return check_condition_result;
3222 }
3223
3224 read_lock(macc_lckp);
3225
3226 /* DIX + T10 DIF */
3227 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3228 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3229
3230 if (prot_ret) {
3231 read_unlock(macc_lckp);
3232 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3233 return illegal_condition_result;
3234 }
3235 }
3236
3237 ret = do_device_access(sip, scp, 0, lba, num, false);
3238 read_unlock(macc_lckp);
3239 if (unlikely(ret == -1))
3240 return DID_ERROR << 16;
3241
3242 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3243
3244 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3245 atomic_read(&sdeb_inject_pending))) {
3246 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3247 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3248 atomic_set(&sdeb_inject_pending, 0);
3249 return check_condition_result;
3250 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3251 /* Logical block guard check failed */
3252 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3253 atomic_set(&sdeb_inject_pending, 0);
3254 return illegal_condition_result;
3255 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3256 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3257 atomic_set(&sdeb_inject_pending, 0);
3258 return illegal_condition_result;
3259 }
3260 }
3261 return 0;
3262 }
3263
dump_sector(unsigned char * buf,int len)3264 static void dump_sector(unsigned char *buf, int len)
3265 {
3266 int i, j, n;
3267
3268 pr_err(">>> Sector Dump <<<\n");
3269 for (i = 0 ; i < len ; i += 16) {
3270 char b[128];
3271
3272 for (j = 0, n = 0; j < 16; j++) {
3273 unsigned char c = buf[i+j];
3274
3275 if (c >= 0x20 && c < 0x7e)
3276 n += scnprintf(b + n, sizeof(b) - n,
3277 " %c ", buf[i+j]);
3278 else
3279 n += scnprintf(b + n, sizeof(b) - n,
3280 "%02x ", buf[i+j]);
3281 }
3282 pr_err("%04d: %s\n", i, b);
3283 }
3284 }
3285
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)3286 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3287 unsigned int sectors, u32 ei_lba)
3288 {
3289 int ret;
3290 struct t10_pi_tuple *sdt;
3291 void *daddr;
3292 sector_t sector = start_sec;
3293 int ppage_offset;
3294 int dpage_offset;
3295 struct sg_mapping_iter diter;
3296 struct sg_mapping_iter piter;
3297
3298 BUG_ON(scsi_sg_count(SCpnt) == 0);
3299 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3300
3301 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3302 scsi_prot_sg_count(SCpnt),
3303 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3304 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3305 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3306
3307 /* For each protection page */
3308 while (sg_miter_next(&piter)) {
3309 dpage_offset = 0;
3310 if (WARN_ON(!sg_miter_next(&diter))) {
3311 ret = 0x01;
3312 goto out;
3313 }
3314
3315 for (ppage_offset = 0; ppage_offset < piter.length;
3316 ppage_offset += sizeof(struct t10_pi_tuple)) {
3317 /* If we're at the end of the current
3318 * data page advance to the next one
3319 */
3320 if (dpage_offset >= diter.length) {
3321 if (WARN_ON(!sg_miter_next(&diter))) {
3322 ret = 0x01;
3323 goto out;
3324 }
3325 dpage_offset = 0;
3326 }
3327
3328 sdt = piter.addr + ppage_offset;
3329 daddr = diter.addr + dpage_offset;
3330
3331 ret = dif_verify(sdt, daddr, sector, ei_lba);
3332 if (ret) {
3333 dump_sector(daddr, sdebug_sector_size);
3334 goto out;
3335 }
3336
3337 sector++;
3338 ei_lba++;
3339 dpage_offset += sdebug_sector_size;
3340 }
3341 diter.consumed = dpage_offset;
3342 sg_miter_stop(&diter);
3343 }
3344 sg_miter_stop(&piter);
3345
3346 dif_copy_prot(SCpnt, start_sec, sectors, false);
3347 dix_writes++;
3348
3349 return 0;
3350
3351 out:
3352 dif_errors++;
3353 sg_miter_stop(&diter);
3354 sg_miter_stop(&piter);
3355 return ret;
3356 }
3357
lba_to_map_index(sector_t lba)3358 static unsigned long lba_to_map_index(sector_t lba)
3359 {
3360 if (sdebug_unmap_alignment)
3361 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3362 sector_div(lba, sdebug_unmap_granularity);
3363 return lba;
3364 }
3365
map_index_to_lba(unsigned long index)3366 static sector_t map_index_to_lba(unsigned long index)
3367 {
3368 sector_t lba = index * sdebug_unmap_granularity;
3369
3370 if (sdebug_unmap_alignment)
3371 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3372 return lba;
3373 }
3374
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)3375 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3376 unsigned int *num)
3377 {
3378 sector_t end;
3379 unsigned int mapped;
3380 unsigned long index;
3381 unsigned long next;
3382
3383 index = lba_to_map_index(lba);
3384 mapped = test_bit(index, sip->map_storep);
3385
3386 if (mapped)
3387 next = find_next_zero_bit(sip->map_storep, map_size, index);
3388 else
3389 next = find_next_bit(sip->map_storep, map_size, index);
3390
3391 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3392 *num = end - lba;
3393 return mapped;
3394 }
3395
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3396 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3397 unsigned int len)
3398 {
3399 sector_t end = lba + len;
3400
3401 while (lba < end) {
3402 unsigned long index = lba_to_map_index(lba);
3403
3404 if (index < map_size)
3405 set_bit(index, sip->map_storep);
3406
3407 lba = map_index_to_lba(index + 1);
3408 }
3409 }
3410
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3411 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3412 unsigned int len)
3413 {
3414 sector_t end = lba + len;
3415 u8 *fsp = sip->storep;
3416
3417 while (lba < end) {
3418 unsigned long index = lba_to_map_index(lba);
3419
3420 if (lba == map_index_to_lba(index) &&
3421 lba + sdebug_unmap_granularity <= end &&
3422 index < map_size) {
3423 clear_bit(index, sip->map_storep);
3424 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3425 memset(fsp + lba * sdebug_sector_size,
3426 (sdebug_lbprz & 1) ? 0 : 0xff,
3427 sdebug_sector_size *
3428 sdebug_unmap_granularity);
3429 }
3430 if (sip->dif_storep) {
3431 memset(sip->dif_storep + lba, 0xff,
3432 sizeof(*sip->dif_storep) *
3433 sdebug_unmap_granularity);
3434 }
3435 }
3436 lba = map_index_to_lba(index + 1);
3437 }
3438 }
3439
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3440 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3441 {
3442 bool check_prot;
3443 u32 num;
3444 u32 ei_lba;
3445 int ret;
3446 u64 lba;
3447 struct sdeb_store_info *sip = devip2sip(devip, true);
3448 rwlock_t *macc_lckp = &sip->macc_lck;
3449 u8 *cmd = scp->cmnd;
3450
3451 switch (cmd[0]) {
3452 case WRITE_16:
3453 ei_lba = 0;
3454 lba = get_unaligned_be64(cmd + 2);
3455 num = get_unaligned_be32(cmd + 10);
3456 check_prot = true;
3457 break;
3458 case WRITE_10:
3459 ei_lba = 0;
3460 lba = get_unaligned_be32(cmd + 2);
3461 num = get_unaligned_be16(cmd + 7);
3462 check_prot = true;
3463 break;
3464 case WRITE_6:
3465 ei_lba = 0;
3466 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3467 (u32)(cmd[1] & 0x1f) << 16;
3468 num = (0 == cmd[4]) ? 256 : cmd[4];
3469 check_prot = true;
3470 break;
3471 case WRITE_12:
3472 ei_lba = 0;
3473 lba = get_unaligned_be32(cmd + 2);
3474 num = get_unaligned_be32(cmd + 6);
3475 check_prot = true;
3476 break;
3477 case 0x53: /* XDWRITEREAD(10) */
3478 ei_lba = 0;
3479 lba = get_unaligned_be32(cmd + 2);
3480 num = get_unaligned_be16(cmd + 7);
3481 check_prot = false;
3482 break;
3483 default: /* assume WRITE(32) */
3484 lba = get_unaligned_be64(cmd + 12);
3485 ei_lba = get_unaligned_be32(cmd + 20);
3486 num = get_unaligned_be32(cmd + 28);
3487 check_prot = false;
3488 break;
3489 }
3490 if (unlikely(have_dif_prot && check_prot)) {
3491 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3492 (cmd[1] & 0xe0)) {
3493 mk_sense_invalid_opcode(scp);
3494 return check_condition_result;
3495 }
3496 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3497 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3498 (cmd[1] & 0xe0) == 0)
3499 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3500 "to DIF device\n");
3501 }
3502
3503 write_lock(macc_lckp);
3504 ret = check_device_access_params(scp, lba, num, true);
3505 if (ret) {
3506 write_unlock(macc_lckp);
3507 return ret;
3508 }
3509
3510 /* DIX + T10 DIF */
3511 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3512 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3513
3514 if (prot_ret) {
3515 write_unlock(macc_lckp);
3516 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3517 return illegal_condition_result;
3518 }
3519 }
3520
3521 ret = do_device_access(sip, scp, 0, lba, num, true);
3522 if (unlikely(scsi_debug_lbp()))
3523 map_region(sip, lba, num);
3524 /* If ZBC zone then bump its write pointer */
3525 if (sdebug_dev_is_zoned(devip))
3526 zbc_inc_wp(devip, lba, num);
3527 write_unlock(macc_lckp);
3528 if (unlikely(-1 == ret))
3529 return DID_ERROR << 16;
3530 else if (unlikely(sdebug_verbose &&
3531 (ret < (num * sdebug_sector_size))))
3532 sdev_printk(KERN_INFO, scp->device,
3533 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3534 my_name, num * sdebug_sector_size, ret);
3535
3536 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3537 atomic_read(&sdeb_inject_pending))) {
3538 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3539 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3540 atomic_set(&sdeb_inject_pending, 0);
3541 return check_condition_result;
3542 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3543 /* Logical block guard check failed */
3544 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3545 atomic_set(&sdeb_inject_pending, 0);
3546 return illegal_condition_result;
3547 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3548 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3549 atomic_set(&sdeb_inject_pending, 0);
3550 return illegal_condition_result;
3551 }
3552 }
3553 return 0;
3554 }
3555
3556 /*
3557 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3558 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3559 */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3560 static int resp_write_scat(struct scsi_cmnd *scp,
3561 struct sdebug_dev_info *devip)
3562 {
3563 u8 *cmd = scp->cmnd;
3564 u8 *lrdp = NULL;
3565 u8 *up;
3566 struct sdeb_store_info *sip = devip2sip(devip, true);
3567 rwlock_t *macc_lckp = &sip->macc_lck;
3568 u8 wrprotect;
3569 u16 lbdof, num_lrd, k;
3570 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3571 u32 lb_size = sdebug_sector_size;
3572 u32 ei_lba;
3573 u64 lba;
3574 int ret, res;
3575 bool is_16;
3576 static const u32 lrd_size = 32; /* + parameter list header size */
3577
3578 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3579 is_16 = false;
3580 wrprotect = (cmd[10] >> 5) & 0x7;
3581 lbdof = get_unaligned_be16(cmd + 12);
3582 num_lrd = get_unaligned_be16(cmd + 16);
3583 bt_len = get_unaligned_be32(cmd + 28);
3584 } else { /* that leaves WRITE SCATTERED(16) */
3585 is_16 = true;
3586 wrprotect = (cmd[2] >> 5) & 0x7;
3587 lbdof = get_unaligned_be16(cmd + 4);
3588 num_lrd = get_unaligned_be16(cmd + 8);
3589 bt_len = get_unaligned_be32(cmd + 10);
3590 if (unlikely(have_dif_prot)) {
3591 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3592 wrprotect) {
3593 mk_sense_invalid_opcode(scp);
3594 return illegal_condition_result;
3595 }
3596 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3597 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3598 wrprotect == 0)
3599 sdev_printk(KERN_ERR, scp->device,
3600 "Unprotected WR to DIF device\n");
3601 }
3602 }
3603 if ((num_lrd == 0) || (bt_len == 0))
3604 return 0; /* T10 says these do-nothings are not errors */
3605 if (lbdof == 0) {
3606 if (sdebug_verbose)
3607 sdev_printk(KERN_INFO, scp->device,
3608 "%s: %s: LB Data Offset field bad\n",
3609 my_name, __func__);
3610 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3611 return illegal_condition_result;
3612 }
3613 lbdof_blen = lbdof * lb_size;
3614 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3615 if (sdebug_verbose)
3616 sdev_printk(KERN_INFO, scp->device,
3617 "%s: %s: LBA range descriptors don't fit\n",
3618 my_name, __func__);
3619 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3620 return illegal_condition_result;
3621 }
3622 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3623 if (lrdp == NULL)
3624 return SCSI_MLQUEUE_HOST_BUSY;
3625 if (sdebug_verbose)
3626 sdev_printk(KERN_INFO, scp->device,
3627 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3628 my_name, __func__, lbdof_blen);
3629 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3630 if (res == -1) {
3631 ret = DID_ERROR << 16;
3632 goto err_out;
3633 }
3634
3635 write_lock(macc_lckp);
3636 sg_off = lbdof_blen;
3637 /* Spec says Buffer xfer Length field in number of LBs in dout */
3638 cum_lb = 0;
3639 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3640 lba = get_unaligned_be64(up + 0);
3641 num = get_unaligned_be32(up + 8);
3642 if (sdebug_verbose)
3643 sdev_printk(KERN_INFO, scp->device,
3644 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3645 my_name, __func__, k, lba, num, sg_off);
3646 if (num == 0)
3647 continue;
3648 ret = check_device_access_params(scp, lba, num, true);
3649 if (ret)
3650 goto err_out_unlock;
3651 num_by = num * lb_size;
3652 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3653
3654 if ((cum_lb + num) > bt_len) {
3655 if (sdebug_verbose)
3656 sdev_printk(KERN_INFO, scp->device,
3657 "%s: %s: sum of blocks > data provided\n",
3658 my_name, __func__);
3659 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3660 0);
3661 ret = illegal_condition_result;
3662 goto err_out_unlock;
3663 }
3664
3665 /* DIX + T10 DIF */
3666 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3667 int prot_ret = prot_verify_write(scp, lba, num,
3668 ei_lba);
3669
3670 if (prot_ret) {
3671 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3672 prot_ret);
3673 ret = illegal_condition_result;
3674 goto err_out_unlock;
3675 }
3676 }
3677
3678 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3679 /* If ZBC zone then bump its write pointer */
3680 if (sdebug_dev_is_zoned(devip))
3681 zbc_inc_wp(devip, lba, num);
3682 if (unlikely(scsi_debug_lbp()))
3683 map_region(sip, lba, num);
3684 if (unlikely(-1 == ret)) {
3685 ret = DID_ERROR << 16;
3686 goto err_out_unlock;
3687 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3688 sdev_printk(KERN_INFO, scp->device,
3689 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3690 my_name, num_by, ret);
3691
3692 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3693 atomic_read(&sdeb_inject_pending))) {
3694 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3695 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3696 atomic_set(&sdeb_inject_pending, 0);
3697 ret = check_condition_result;
3698 goto err_out_unlock;
3699 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3700 /* Logical block guard check failed */
3701 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3702 atomic_set(&sdeb_inject_pending, 0);
3703 ret = illegal_condition_result;
3704 goto err_out_unlock;
3705 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3706 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3707 atomic_set(&sdeb_inject_pending, 0);
3708 ret = illegal_condition_result;
3709 goto err_out_unlock;
3710 }
3711 }
3712 sg_off += num_by;
3713 cum_lb += num;
3714 }
3715 ret = 0;
3716 err_out_unlock:
3717 write_unlock(macc_lckp);
3718 err_out:
3719 kfree(lrdp);
3720 return ret;
3721 }
3722
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)3723 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3724 u32 ei_lba, bool unmap, bool ndob)
3725 {
3726 struct scsi_device *sdp = scp->device;
3727 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3728 unsigned long long i;
3729 u64 block, lbaa;
3730 u32 lb_size = sdebug_sector_size;
3731 int ret;
3732 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3733 scp->device->hostdata, true);
3734 rwlock_t *macc_lckp = &sip->macc_lck;
3735 u8 *fs1p;
3736 u8 *fsp;
3737
3738 write_lock(macc_lckp);
3739
3740 ret = check_device_access_params(scp, lba, num, true);
3741 if (ret) {
3742 write_unlock(macc_lckp);
3743 return ret;
3744 }
3745
3746 if (unmap && scsi_debug_lbp()) {
3747 unmap_region(sip, lba, num);
3748 goto out;
3749 }
3750 lbaa = lba;
3751 block = do_div(lbaa, sdebug_store_sectors);
3752 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3753 fsp = sip->storep;
3754 fs1p = fsp + (block * lb_size);
3755 if (ndob) {
3756 memset(fs1p, 0, lb_size);
3757 ret = 0;
3758 } else
3759 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3760
3761 if (-1 == ret) {
3762 write_unlock(&sip->macc_lck);
3763 return DID_ERROR << 16;
3764 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3765 sdev_printk(KERN_INFO, scp->device,
3766 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3767 my_name, "write same", lb_size, ret);
3768
3769 /* Copy first sector to remaining blocks */
3770 for (i = 1 ; i < num ; i++) {
3771 lbaa = lba + i;
3772 block = do_div(lbaa, sdebug_store_sectors);
3773 memmove(fsp + (block * lb_size), fs1p, lb_size);
3774 }
3775 if (scsi_debug_lbp())
3776 map_region(sip, lba, num);
3777 /* If ZBC zone then bump its write pointer */
3778 if (sdebug_dev_is_zoned(devip))
3779 zbc_inc_wp(devip, lba, num);
3780 out:
3781 write_unlock(macc_lckp);
3782
3783 return 0;
3784 }
3785
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3786 static int resp_write_same_10(struct scsi_cmnd *scp,
3787 struct sdebug_dev_info *devip)
3788 {
3789 u8 *cmd = scp->cmnd;
3790 u32 lba;
3791 u16 num;
3792 u32 ei_lba = 0;
3793 bool unmap = false;
3794
3795 if (cmd[1] & 0x8) {
3796 if (sdebug_lbpws10 == 0) {
3797 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3798 return check_condition_result;
3799 } else
3800 unmap = true;
3801 }
3802 lba = get_unaligned_be32(cmd + 2);
3803 num = get_unaligned_be16(cmd + 7);
3804 if (num > sdebug_write_same_length) {
3805 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3806 return check_condition_result;
3807 }
3808 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3809 }
3810
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3811 static int resp_write_same_16(struct scsi_cmnd *scp,
3812 struct sdebug_dev_info *devip)
3813 {
3814 u8 *cmd = scp->cmnd;
3815 u64 lba;
3816 u32 num;
3817 u32 ei_lba = 0;
3818 bool unmap = false;
3819 bool ndob = false;
3820
3821 if (cmd[1] & 0x8) { /* UNMAP */
3822 if (sdebug_lbpws == 0) {
3823 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3824 return check_condition_result;
3825 } else
3826 unmap = true;
3827 }
3828 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3829 ndob = true;
3830 lba = get_unaligned_be64(cmd + 2);
3831 num = get_unaligned_be32(cmd + 10);
3832 if (num > sdebug_write_same_length) {
3833 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3834 return check_condition_result;
3835 }
3836 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3837 }
3838
3839 /* Note the mode field is in the same position as the (lower) service action
3840 * field. For the Report supported operation codes command, SPC-4 suggests
3841 * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3842 static int resp_write_buffer(struct scsi_cmnd *scp,
3843 struct sdebug_dev_info *devip)
3844 {
3845 u8 *cmd = scp->cmnd;
3846 struct scsi_device *sdp = scp->device;
3847 struct sdebug_dev_info *dp;
3848 u8 mode;
3849
3850 mode = cmd[1] & 0x1f;
3851 switch (mode) {
3852 case 0x4: /* download microcode (MC) and activate (ACT) */
3853 /* set UAs on this device only */
3854 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3855 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3856 break;
3857 case 0x5: /* download MC, save and ACT */
3858 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3859 break;
3860 case 0x6: /* download MC with offsets and ACT */
3861 /* set UAs on most devices (LUs) in this target */
3862 list_for_each_entry(dp,
3863 &devip->sdbg_host->dev_info_list,
3864 dev_list)
3865 if (dp->target == sdp->id) {
3866 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3867 if (devip != dp)
3868 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3869 dp->uas_bm);
3870 }
3871 break;
3872 case 0x7: /* download MC with offsets, save, and ACT */
3873 /* set UA on all devices (LUs) in this target */
3874 list_for_each_entry(dp,
3875 &devip->sdbg_host->dev_info_list,
3876 dev_list)
3877 if (dp->target == sdp->id)
3878 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3879 dp->uas_bm);
3880 break;
3881 default:
3882 /* do nothing for this command for other mode values */
3883 break;
3884 }
3885 return 0;
3886 }
3887
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3888 static int resp_comp_write(struct scsi_cmnd *scp,
3889 struct sdebug_dev_info *devip)
3890 {
3891 u8 *cmd = scp->cmnd;
3892 u8 *arr;
3893 struct sdeb_store_info *sip = devip2sip(devip, true);
3894 rwlock_t *macc_lckp = &sip->macc_lck;
3895 u64 lba;
3896 u32 dnum;
3897 u32 lb_size = sdebug_sector_size;
3898 u8 num;
3899 int ret;
3900 int retval = 0;
3901
3902 lba = get_unaligned_be64(cmd + 2);
3903 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3904 if (0 == num)
3905 return 0; /* degenerate case, not an error */
3906 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3907 (cmd[1] & 0xe0)) {
3908 mk_sense_invalid_opcode(scp);
3909 return check_condition_result;
3910 }
3911 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3912 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3913 (cmd[1] & 0xe0) == 0)
3914 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3915 "to DIF device\n");
3916 ret = check_device_access_params(scp, lba, num, false);
3917 if (ret)
3918 return ret;
3919 dnum = 2 * num;
3920 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3921 if (NULL == arr) {
3922 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3923 INSUFF_RES_ASCQ);
3924 return check_condition_result;
3925 }
3926
3927 write_lock(macc_lckp);
3928
3929 ret = do_dout_fetch(scp, dnum, arr);
3930 if (ret == -1) {
3931 retval = DID_ERROR << 16;
3932 goto cleanup;
3933 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3934 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3935 "indicated=%u, IO sent=%d bytes\n", my_name,
3936 dnum * lb_size, ret);
3937 if (!comp_write_worker(sip, lba, num, arr, false)) {
3938 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3939 retval = check_condition_result;
3940 goto cleanup;
3941 }
3942 if (scsi_debug_lbp())
3943 map_region(sip, lba, num);
3944 cleanup:
3945 write_unlock(macc_lckp);
3946 kfree(arr);
3947 return retval;
3948 }
3949
3950 struct unmap_block_desc {
3951 __be64 lba;
3952 __be32 blocks;
3953 __be32 __reserved;
3954 };
3955
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3956 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3957 {
3958 unsigned char *buf;
3959 struct unmap_block_desc *desc;
3960 struct sdeb_store_info *sip = devip2sip(devip, true);
3961 rwlock_t *macc_lckp = &sip->macc_lck;
3962 unsigned int i, payload_len, descriptors;
3963 int ret;
3964
3965 if (!scsi_debug_lbp())
3966 return 0; /* fib and say its done */
3967 payload_len = get_unaligned_be16(scp->cmnd + 7);
3968 BUG_ON(scsi_bufflen(scp) != payload_len);
3969
3970 descriptors = (payload_len - 8) / 16;
3971 if (descriptors > sdebug_unmap_max_desc) {
3972 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3973 return check_condition_result;
3974 }
3975
3976 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3977 if (!buf) {
3978 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3979 INSUFF_RES_ASCQ);
3980 return check_condition_result;
3981 }
3982
3983 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3984
3985 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3986 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3987
3988 desc = (void *)&buf[8];
3989
3990 write_lock(macc_lckp);
3991
3992 for (i = 0 ; i < descriptors ; i++) {
3993 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3994 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3995
3996 ret = check_device_access_params(scp, lba, num, true);
3997 if (ret)
3998 goto out;
3999
4000 unmap_region(sip, lba, num);
4001 }
4002
4003 ret = 0;
4004
4005 out:
4006 write_unlock(macc_lckp);
4007 kfree(buf);
4008
4009 return ret;
4010 }
4011
4012 #define SDEBUG_GET_LBA_STATUS_LEN 32
4013
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4014 static int resp_get_lba_status(struct scsi_cmnd *scp,
4015 struct sdebug_dev_info *devip)
4016 {
4017 u8 *cmd = scp->cmnd;
4018 u64 lba;
4019 u32 alloc_len, mapped, num;
4020 int ret;
4021 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4022
4023 lba = get_unaligned_be64(cmd + 2);
4024 alloc_len = get_unaligned_be32(cmd + 10);
4025
4026 if (alloc_len < 24)
4027 return 0;
4028
4029 ret = check_device_access_params(scp, lba, 1, false);
4030 if (ret)
4031 return ret;
4032
4033 if (scsi_debug_lbp()) {
4034 struct sdeb_store_info *sip = devip2sip(devip, true);
4035
4036 mapped = map_state(sip, lba, &num);
4037 } else {
4038 mapped = 1;
4039 /* following just in case virtual_gb changed */
4040 sdebug_capacity = get_sdebug_capacity();
4041 if (sdebug_capacity - lba <= 0xffffffff)
4042 num = sdebug_capacity - lba;
4043 else
4044 num = 0xffffffff;
4045 }
4046
4047 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4048 put_unaligned_be32(20, arr); /* Parameter Data Length */
4049 put_unaligned_be64(lba, arr + 8); /* LBA */
4050 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4051 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4052
4053 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4054 }
4055
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4056 static int resp_sync_cache(struct scsi_cmnd *scp,
4057 struct sdebug_dev_info *devip)
4058 {
4059 int res = 0;
4060 u64 lba;
4061 u32 num_blocks;
4062 u8 *cmd = scp->cmnd;
4063
4064 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4065 lba = get_unaligned_be32(cmd + 2);
4066 num_blocks = get_unaligned_be16(cmd + 7);
4067 } else { /* SYNCHRONIZE_CACHE(16) */
4068 lba = get_unaligned_be64(cmd + 2);
4069 num_blocks = get_unaligned_be32(cmd + 10);
4070 }
4071 if (lba + num_blocks > sdebug_capacity) {
4072 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4073 return check_condition_result;
4074 }
4075 if (!write_since_sync || (cmd[1] & 0x2))
4076 res = SDEG_RES_IMMED_MASK;
4077 else /* delay if write_since_sync and IMMED clear */
4078 write_since_sync = false;
4079 return res;
4080 }
4081
4082 /*
4083 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4084 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4085 * a GOOD status otherwise. Model a disk with a big cache and yield
4086 * CONDITION MET. Actually tries to bring range in main memory into the
4087 * cache associated with the CPU(s).
4088 */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4089 static int resp_pre_fetch(struct scsi_cmnd *scp,
4090 struct sdebug_dev_info *devip)
4091 {
4092 int res = 0;
4093 u64 lba;
4094 u64 block, rest = 0;
4095 u32 nblks;
4096 u8 *cmd = scp->cmnd;
4097 struct sdeb_store_info *sip = devip2sip(devip, true);
4098 rwlock_t *macc_lckp = &sip->macc_lck;
4099 u8 *fsp = sip->storep;
4100
4101 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4102 lba = get_unaligned_be32(cmd + 2);
4103 nblks = get_unaligned_be16(cmd + 7);
4104 } else { /* PRE-FETCH(16) */
4105 lba = get_unaligned_be64(cmd + 2);
4106 nblks = get_unaligned_be32(cmd + 10);
4107 }
4108 if (lba + nblks > sdebug_capacity) {
4109 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4110 return check_condition_result;
4111 }
4112 if (!fsp)
4113 goto fini;
4114 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4115 block = do_div(lba, sdebug_store_sectors);
4116 if (block + nblks > sdebug_store_sectors)
4117 rest = block + nblks - sdebug_store_sectors;
4118
4119 /* Try to bring the PRE-FETCH range into CPU's cache */
4120 read_lock(macc_lckp);
4121 prefetch_range(fsp + (sdebug_sector_size * block),
4122 (nblks - rest) * sdebug_sector_size);
4123 if (rest)
4124 prefetch_range(fsp, rest * sdebug_sector_size);
4125 read_unlock(macc_lckp);
4126 fini:
4127 if (cmd[1] & 0x2)
4128 res = SDEG_RES_IMMED_MASK;
4129 return res | condition_met_result;
4130 }
4131
4132 #define RL_BUCKET_ELEMS 8
4133
4134 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4135 * (W-LUN), the normal Linux scanning logic does not associate it with a
4136 * device (e.g. /dev/sg7). The following magic will make that association:
4137 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4138 * where <n> is a host number. If there are multiple targets in a host then
4139 * the above will associate a W-LUN to each target. To only get a W-LUN
4140 * for target 2, then use "echo '- 2 49409' > scan" .
4141 */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4142 static int resp_report_luns(struct scsi_cmnd *scp,
4143 struct sdebug_dev_info *devip)
4144 {
4145 unsigned char *cmd = scp->cmnd;
4146 unsigned int alloc_len;
4147 unsigned char select_report;
4148 u64 lun;
4149 struct scsi_lun *lun_p;
4150 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4151 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4152 unsigned int wlun_cnt; /* report luns W-LUN count */
4153 unsigned int tlun_cnt; /* total LUN count */
4154 unsigned int rlen; /* response length (in bytes) */
4155 int k, j, n, res;
4156 unsigned int off_rsp = 0;
4157 const int sz_lun = sizeof(struct scsi_lun);
4158
4159 clear_luns_changed_on_target(devip);
4160
4161 select_report = cmd[2];
4162 alloc_len = get_unaligned_be32(cmd + 6);
4163
4164 if (alloc_len < 4) {
4165 pr_err("alloc len too small %d\n", alloc_len);
4166 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4167 return check_condition_result;
4168 }
4169
4170 switch (select_report) {
4171 case 0: /* all LUNs apart from W-LUNs */
4172 lun_cnt = sdebug_max_luns;
4173 wlun_cnt = 0;
4174 break;
4175 case 1: /* only W-LUNs */
4176 lun_cnt = 0;
4177 wlun_cnt = 1;
4178 break;
4179 case 2: /* all LUNs */
4180 lun_cnt = sdebug_max_luns;
4181 wlun_cnt = 1;
4182 break;
4183 case 0x10: /* only administrative LUs */
4184 case 0x11: /* see SPC-5 */
4185 case 0x12: /* only subsiduary LUs owned by referenced LU */
4186 default:
4187 pr_debug("select report invalid %d\n", select_report);
4188 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4189 return check_condition_result;
4190 }
4191
4192 if (sdebug_no_lun_0 && (lun_cnt > 0))
4193 --lun_cnt;
4194
4195 tlun_cnt = lun_cnt + wlun_cnt;
4196 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4197 scsi_set_resid(scp, scsi_bufflen(scp));
4198 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4199 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4200
4201 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4202 lun = sdebug_no_lun_0 ? 1 : 0;
4203 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4204 memset(arr, 0, sizeof(arr));
4205 lun_p = (struct scsi_lun *)&arr[0];
4206 if (k == 0) {
4207 put_unaligned_be32(rlen, &arr[0]);
4208 ++lun_p;
4209 j = 1;
4210 }
4211 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4212 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4213 break;
4214 int_to_scsilun(lun++, lun_p);
4215 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4216 lun_p->scsi_lun[0] |= 0x40;
4217 }
4218 if (j < RL_BUCKET_ELEMS)
4219 break;
4220 n = j * sz_lun;
4221 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4222 if (res)
4223 return res;
4224 off_rsp += n;
4225 }
4226 if (wlun_cnt) {
4227 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4228 ++j;
4229 }
4230 if (j > 0)
4231 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4232 return res;
4233 }
4234
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4235 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4236 {
4237 bool is_bytchk3 = false;
4238 u8 bytchk;
4239 int ret, j;
4240 u32 vnum, a_num, off;
4241 const u32 lb_size = sdebug_sector_size;
4242 u64 lba;
4243 u8 *arr;
4244 u8 *cmd = scp->cmnd;
4245 struct sdeb_store_info *sip = devip2sip(devip, true);
4246 rwlock_t *macc_lckp = &sip->macc_lck;
4247
4248 bytchk = (cmd[1] >> 1) & 0x3;
4249 if (bytchk == 0) {
4250 return 0; /* always claim internal verify okay */
4251 } else if (bytchk == 2) {
4252 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4253 return check_condition_result;
4254 } else if (bytchk == 3) {
4255 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4256 }
4257 switch (cmd[0]) {
4258 case VERIFY_16:
4259 lba = get_unaligned_be64(cmd + 2);
4260 vnum = get_unaligned_be32(cmd + 10);
4261 break;
4262 case VERIFY: /* is VERIFY(10) */
4263 lba = get_unaligned_be32(cmd + 2);
4264 vnum = get_unaligned_be16(cmd + 7);
4265 break;
4266 default:
4267 mk_sense_invalid_opcode(scp);
4268 return check_condition_result;
4269 }
4270 if (vnum == 0)
4271 return 0; /* not an error */
4272 a_num = is_bytchk3 ? 1 : vnum;
4273 /* Treat following check like one for read (i.e. no write) access */
4274 ret = check_device_access_params(scp, lba, a_num, false);
4275 if (ret)
4276 return ret;
4277
4278 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4279 if (!arr) {
4280 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4281 INSUFF_RES_ASCQ);
4282 return check_condition_result;
4283 }
4284 /* Not changing store, so only need read access */
4285 read_lock(macc_lckp);
4286
4287 ret = do_dout_fetch(scp, a_num, arr);
4288 if (ret == -1) {
4289 ret = DID_ERROR << 16;
4290 goto cleanup;
4291 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4292 sdev_printk(KERN_INFO, scp->device,
4293 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4294 my_name, __func__, a_num * lb_size, ret);
4295 }
4296 if (is_bytchk3) {
4297 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4298 memcpy(arr + off, arr, lb_size);
4299 }
4300 ret = 0;
4301 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4302 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4303 ret = check_condition_result;
4304 goto cleanup;
4305 }
4306 cleanup:
4307 read_unlock(macc_lckp);
4308 kfree(arr);
4309 return ret;
4310 }
4311
4312 #define RZONES_DESC_HD 64
4313
4314 /* Report zones depending on start LBA nad reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4315 static int resp_report_zones(struct scsi_cmnd *scp,
4316 struct sdebug_dev_info *devip)
4317 {
4318 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4319 int ret = 0;
4320 u32 alloc_len, rep_opts, rep_len;
4321 bool partial;
4322 u64 lba, zs_lba;
4323 u8 *arr = NULL, *desc;
4324 u8 *cmd = scp->cmnd;
4325 struct sdeb_zone_state *zsp;
4326 struct sdeb_store_info *sip = devip2sip(devip, false);
4327 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4328
4329 if (!sdebug_dev_is_zoned(devip)) {
4330 mk_sense_invalid_opcode(scp);
4331 return check_condition_result;
4332 }
4333 zs_lba = get_unaligned_be64(cmd + 2);
4334 alloc_len = get_unaligned_be32(cmd + 10);
4335 if (alloc_len == 0)
4336 return 0; /* not an error */
4337 rep_opts = cmd[14] & 0x3f;
4338 partial = cmd[14] & 0x80;
4339
4340 if (zs_lba >= sdebug_capacity) {
4341 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4342 return check_condition_result;
4343 }
4344
4345 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4346 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4347 max_zones);
4348
4349 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4350 if (!arr) {
4351 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4352 INSUFF_RES_ASCQ);
4353 return check_condition_result;
4354 }
4355
4356 read_lock(macc_lckp);
4357
4358 desc = arr + 64;
4359 for (i = 0; i < max_zones; i++) {
4360 lba = zs_lba + devip->zsize * i;
4361 if (lba > sdebug_capacity)
4362 break;
4363 zsp = zbc_zone(devip, lba);
4364 switch (rep_opts) {
4365 case 0x00:
4366 /* All zones */
4367 break;
4368 case 0x01:
4369 /* Empty zones */
4370 if (zsp->z_cond != ZC1_EMPTY)
4371 continue;
4372 break;
4373 case 0x02:
4374 /* Implicit open zones */
4375 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4376 continue;
4377 break;
4378 case 0x03:
4379 /* Explicit open zones */
4380 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4381 continue;
4382 break;
4383 case 0x04:
4384 /* Closed zones */
4385 if (zsp->z_cond != ZC4_CLOSED)
4386 continue;
4387 break;
4388 case 0x05:
4389 /* Full zones */
4390 if (zsp->z_cond != ZC5_FULL)
4391 continue;
4392 break;
4393 case 0x06:
4394 case 0x07:
4395 case 0x10:
4396 /*
4397 * Read-only, offline, reset WP recommended are
4398 * not emulated: no zones to report;
4399 */
4400 continue;
4401 case 0x11:
4402 /* non-seq-resource set */
4403 if (!zsp->z_non_seq_resource)
4404 continue;
4405 break;
4406 case 0x3f:
4407 /* Not write pointer (conventional) zones */
4408 if (!zbc_zone_is_conv(zsp))
4409 continue;
4410 break;
4411 default:
4412 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4413 INVALID_FIELD_IN_CDB, 0);
4414 ret = check_condition_result;
4415 goto fini;
4416 }
4417
4418 if (nrz < rep_max_zones) {
4419 /* Fill zone descriptor */
4420 desc[0] = zsp->z_type;
4421 desc[1] = zsp->z_cond << 4;
4422 if (zsp->z_non_seq_resource)
4423 desc[1] |= 1 << 1;
4424 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4425 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4426 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4427 desc += 64;
4428 }
4429
4430 if (partial && nrz >= rep_max_zones)
4431 break;
4432
4433 nrz++;
4434 }
4435
4436 /* Report header */
4437 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4438 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4439
4440 rep_len = (unsigned long)desc - (unsigned long)arr;
4441 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4442
4443 fini:
4444 read_unlock(macc_lckp);
4445 kfree(arr);
4446 return ret;
4447 }
4448
4449 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)4450 static void zbc_open_all(struct sdebug_dev_info *devip)
4451 {
4452 struct sdeb_zone_state *zsp = &devip->zstate[0];
4453 unsigned int i;
4454
4455 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4456 if (zsp->z_cond == ZC4_CLOSED)
4457 zbc_open_zone(devip, &devip->zstate[i], true);
4458 }
4459 }
4460
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4461 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4462 {
4463 int res = 0;
4464 u64 z_id;
4465 enum sdebug_z_cond zc;
4466 u8 *cmd = scp->cmnd;
4467 struct sdeb_zone_state *zsp;
4468 bool all = cmd[14] & 0x01;
4469 struct sdeb_store_info *sip = devip2sip(devip, false);
4470 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4471
4472 if (!sdebug_dev_is_zoned(devip)) {
4473 mk_sense_invalid_opcode(scp);
4474 return check_condition_result;
4475 }
4476
4477 write_lock(macc_lckp);
4478
4479 if (all) {
4480 /* Check if all closed zones can be open */
4481 if (devip->max_open &&
4482 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4483 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4484 INSUFF_ZONE_ASCQ);
4485 res = check_condition_result;
4486 goto fini;
4487 }
4488 /* Open all closed zones */
4489 zbc_open_all(devip);
4490 goto fini;
4491 }
4492
4493 /* Open the specified zone */
4494 z_id = get_unaligned_be64(cmd + 2);
4495 if (z_id >= sdebug_capacity) {
4496 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4497 res = check_condition_result;
4498 goto fini;
4499 }
4500
4501 zsp = zbc_zone(devip, z_id);
4502 if (z_id != zsp->z_start) {
4503 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4504 res = check_condition_result;
4505 goto fini;
4506 }
4507 if (zbc_zone_is_conv(zsp)) {
4508 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4509 res = check_condition_result;
4510 goto fini;
4511 }
4512
4513 zc = zsp->z_cond;
4514 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4515 goto fini;
4516
4517 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4518 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4519 INSUFF_ZONE_ASCQ);
4520 res = check_condition_result;
4521 goto fini;
4522 }
4523
4524 zbc_open_zone(devip, zsp, true);
4525 fini:
4526 write_unlock(macc_lckp);
4527 return res;
4528 }
4529
zbc_close_all(struct sdebug_dev_info * devip)4530 static void zbc_close_all(struct sdebug_dev_info *devip)
4531 {
4532 unsigned int i;
4533
4534 for (i = 0; i < devip->nr_zones; i++)
4535 zbc_close_zone(devip, &devip->zstate[i]);
4536 }
4537
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4538 static int resp_close_zone(struct scsi_cmnd *scp,
4539 struct sdebug_dev_info *devip)
4540 {
4541 int res = 0;
4542 u64 z_id;
4543 u8 *cmd = scp->cmnd;
4544 struct sdeb_zone_state *zsp;
4545 bool all = cmd[14] & 0x01;
4546 struct sdeb_store_info *sip = devip2sip(devip, false);
4547 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4548
4549 if (!sdebug_dev_is_zoned(devip)) {
4550 mk_sense_invalid_opcode(scp);
4551 return check_condition_result;
4552 }
4553
4554 write_lock(macc_lckp);
4555
4556 if (all) {
4557 zbc_close_all(devip);
4558 goto fini;
4559 }
4560
4561 /* Close specified zone */
4562 z_id = get_unaligned_be64(cmd + 2);
4563 if (z_id >= sdebug_capacity) {
4564 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4565 res = check_condition_result;
4566 goto fini;
4567 }
4568
4569 zsp = zbc_zone(devip, z_id);
4570 if (z_id != zsp->z_start) {
4571 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4572 res = check_condition_result;
4573 goto fini;
4574 }
4575 if (zbc_zone_is_conv(zsp)) {
4576 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4577 res = check_condition_result;
4578 goto fini;
4579 }
4580
4581 zbc_close_zone(devip, zsp);
4582 fini:
4583 write_unlock(macc_lckp);
4584 return res;
4585 }
4586
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)4587 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4588 struct sdeb_zone_state *zsp, bool empty)
4589 {
4590 enum sdebug_z_cond zc = zsp->z_cond;
4591
4592 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4593 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4594 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4595 zbc_close_zone(devip, zsp);
4596 if (zsp->z_cond == ZC4_CLOSED)
4597 devip->nr_closed--;
4598 zsp->z_wp = zsp->z_start + zsp->z_size;
4599 zsp->z_cond = ZC5_FULL;
4600 }
4601 }
4602
zbc_finish_all(struct sdebug_dev_info * devip)4603 static void zbc_finish_all(struct sdebug_dev_info *devip)
4604 {
4605 unsigned int i;
4606
4607 for (i = 0; i < devip->nr_zones; i++)
4608 zbc_finish_zone(devip, &devip->zstate[i], false);
4609 }
4610
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4611 static int resp_finish_zone(struct scsi_cmnd *scp,
4612 struct sdebug_dev_info *devip)
4613 {
4614 struct sdeb_zone_state *zsp;
4615 int res = 0;
4616 u64 z_id;
4617 u8 *cmd = scp->cmnd;
4618 bool all = cmd[14] & 0x01;
4619 struct sdeb_store_info *sip = devip2sip(devip, false);
4620 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4621
4622 if (!sdebug_dev_is_zoned(devip)) {
4623 mk_sense_invalid_opcode(scp);
4624 return check_condition_result;
4625 }
4626
4627 write_lock(macc_lckp);
4628
4629 if (all) {
4630 zbc_finish_all(devip);
4631 goto fini;
4632 }
4633
4634 /* Finish the specified zone */
4635 z_id = get_unaligned_be64(cmd + 2);
4636 if (z_id >= sdebug_capacity) {
4637 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4638 res = check_condition_result;
4639 goto fini;
4640 }
4641
4642 zsp = zbc_zone(devip, z_id);
4643 if (z_id != zsp->z_start) {
4644 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4645 res = check_condition_result;
4646 goto fini;
4647 }
4648 if (zbc_zone_is_conv(zsp)) {
4649 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4650 res = check_condition_result;
4651 goto fini;
4652 }
4653
4654 zbc_finish_zone(devip, zsp, true);
4655 fini:
4656 write_unlock(macc_lckp);
4657 return res;
4658 }
4659
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)4660 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4661 struct sdeb_zone_state *zsp)
4662 {
4663 enum sdebug_z_cond zc;
4664 struct sdeb_store_info *sip = devip2sip(devip, false);
4665
4666 if (zbc_zone_is_conv(zsp))
4667 return;
4668
4669 zc = zsp->z_cond;
4670 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4671 zbc_close_zone(devip, zsp);
4672
4673 if (zsp->z_cond == ZC4_CLOSED)
4674 devip->nr_closed--;
4675
4676 if (zsp->z_wp > zsp->z_start)
4677 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4678 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4679
4680 zsp->z_non_seq_resource = false;
4681 zsp->z_wp = zsp->z_start;
4682 zsp->z_cond = ZC1_EMPTY;
4683 }
4684
zbc_rwp_all(struct sdebug_dev_info * devip)4685 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4686 {
4687 unsigned int i;
4688
4689 for (i = 0; i < devip->nr_zones; i++)
4690 zbc_rwp_zone(devip, &devip->zstate[i]);
4691 }
4692
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4693 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4694 {
4695 struct sdeb_zone_state *zsp;
4696 int res = 0;
4697 u64 z_id;
4698 u8 *cmd = scp->cmnd;
4699 bool all = cmd[14] & 0x01;
4700 struct sdeb_store_info *sip = devip2sip(devip, false);
4701 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4702
4703 if (!sdebug_dev_is_zoned(devip)) {
4704 mk_sense_invalid_opcode(scp);
4705 return check_condition_result;
4706 }
4707
4708 write_lock(macc_lckp);
4709
4710 if (all) {
4711 zbc_rwp_all(devip);
4712 goto fini;
4713 }
4714
4715 z_id = get_unaligned_be64(cmd + 2);
4716 if (z_id >= sdebug_capacity) {
4717 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4718 res = check_condition_result;
4719 goto fini;
4720 }
4721
4722 zsp = zbc_zone(devip, z_id);
4723 if (z_id != zsp->z_start) {
4724 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4725 res = check_condition_result;
4726 goto fini;
4727 }
4728 if (zbc_zone_is_conv(zsp)) {
4729 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4730 res = check_condition_result;
4731 goto fini;
4732 }
4733
4734 zbc_rwp_zone(devip, zsp);
4735 fini:
4736 write_unlock(macc_lckp);
4737 return res;
4738 }
4739
get_queue(struct scsi_cmnd * cmnd)4740 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4741 {
4742 u16 hwq;
4743 u32 tag = blk_mq_unique_tag(cmnd->request);
4744
4745 hwq = blk_mq_unique_tag_to_hwq(tag);
4746
4747 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4748 if (WARN_ON_ONCE(hwq >= submit_queues))
4749 hwq = 0;
4750
4751 return sdebug_q_arr + hwq;
4752 }
4753
get_tag(struct scsi_cmnd * cmnd)4754 static u32 get_tag(struct scsi_cmnd *cmnd)
4755 {
4756 return blk_mq_unique_tag(cmnd->request);
4757 }
4758
4759 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)4760 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4761 {
4762 bool aborted = sd_dp->aborted;
4763 int qc_idx;
4764 int retiring = 0;
4765 unsigned long iflags;
4766 struct sdebug_queue *sqp;
4767 struct sdebug_queued_cmd *sqcp;
4768 struct scsi_cmnd *scp;
4769 struct sdebug_dev_info *devip;
4770
4771 sd_dp->defer_t = SDEB_DEFER_NONE;
4772 if (unlikely(aborted))
4773 sd_dp->aborted = false;
4774 qc_idx = sd_dp->qc_idx;
4775 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4776 if (sdebug_statistics) {
4777 atomic_inc(&sdebug_completions);
4778 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4779 atomic_inc(&sdebug_miss_cpus);
4780 }
4781 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4782 pr_err("wild qc_idx=%d\n", qc_idx);
4783 return;
4784 }
4785 spin_lock_irqsave(&sqp->qc_lock, iflags);
4786 sqcp = &sqp->qc_arr[qc_idx];
4787 scp = sqcp->a_cmnd;
4788 if (unlikely(scp == NULL)) {
4789 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4790 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4791 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4792 return;
4793 }
4794 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4795 if (likely(devip))
4796 atomic_dec(&devip->num_in_q);
4797 else
4798 pr_err("devip=NULL\n");
4799 if (unlikely(atomic_read(&retired_max_queue) > 0))
4800 retiring = 1;
4801
4802 sqcp->a_cmnd = NULL;
4803 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4804 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4805 pr_err("Unexpected completion\n");
4806 return;
4807 }
4808
4809 if (unlikely(retiring)) { /* user has reduced max_queue */
4810 int k, retval;
4811
4812 retval = atomic_read(&retired_max_queue);
4813 if (qc_idx >= retval) {
4814 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4815 pr_err("index %d too large\n", retval);
4816 return;
4817 }
4818 k = find_last_bit(sqp->in_use_bm, retval);
4819 if ((k < sdebug_max_queue) || (k == retval))
4820 atomic_set(&retired_max_queue, 0);
4821 else
4822 atomic_set(&retired_max_queue, k + 1);
4823 }
4824 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4825 if (unlikely(aborted)) {
4826 if (sdebug_verbose)
4827 pr_info("bypassing scsi_done() due to aborted cmd\n");
4828 return;
4829 }
4830 scp->scsi_done(scp); /* callback to mid level */
4831 }
4832
4833 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)4834 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4835 {
4836 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4837 hrt);
4838 sdebug_q_cmd_complete(sd_dp);
4839 return HRTIMER_NORESTART;
4840 }
4841
4842 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)4843 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4844 {
4845 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4846 ew.work);
4847 sdebug_q_cmd_complete(sd_dp);
4848 }
4849
4850 static bool got_shared_uuid;
4851 static uuid_t shared_uuid;
4852
sdebug_device_create_zones(struct sdebug_dev_info * devip)4853 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4854 {
4855 struct sdeb_zone_state *zsp;
4856 sector_t capacity = get_sdebug_capacity();
4857 sector_t zstart = 0;
4858 unsigned int i;
4859
4860 /*
4861 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4862 * a zone size allowing for at least 4 zones on the device. Otherwise,
4863 * use the specified zone size checking that at least 2 zones can be
4864 * created for the device.
4865 */
4866 if (!sdeb_zbc_zone_size_mb) {
4867 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4868 >> ilog2(sdebug_sector_size);
4869 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4870 devip->zsize >>= 1;
4871 if (devip->zsize < 2) {
4872 pr_err("Device capacity too small\n");
4873 return -EINVAL;
4874 }
4875 } else {
4876 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4877 pr_err("Zone size is not a power of 2\n");
4878 return -EINVAL;
4879 }
4880 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4881 >> ilog2(sdebug_sector_size);
4882 if (devip->zsize >= capacity) {
4883 pr_err("Zone size too large for device capacity\n");
4884 return -EINVAL;
4885 }
4886 }
4887
4888 devip->zsize_shift = ilog2(devip->zsize);
4889 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4890
4891 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4892 pr_err("Number of conventional zones too large\n");
4893 return -EINVAL;
4894 }
4895 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4896
4897 if (devip->zmodel == BLK_ZONED_HM) {
4898 /* zbc_max_open_zones can be 0, meaning "not reported" */
4899 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4900 devip->max_open = (devip->nr_zones - 1) / 2;
4901 else
4902 devip->max_open = sdeb_zbc_max_open;
4903 }
4904
4905 devip->zstate = kcalloc(devip->nr_zones,
4906 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4907 if (!devip->zstate)
4908 return -ENOMEM;
4909
4910 for (i = 0; i < devip->nr_zones; i++) {
4911 zsp = &devip->zstate[i];
4912
4913 zsp->z_start = zstart;
4914
4915 if (i < devip->nr_conv_zones) {
4916 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4917 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4918 zsp->z_wp = (sector_t)-1;
4919 } else {
4920 if (devip->zmodel == BLK_ZONED_HM)
4921 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4922 else
4923 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4924 zsp->z_cond = ZC1_EMPTY;
4925 zsp->z_wp = zsp->z_start;
4926 }
4927
4928 if (zsp->z_start + devip->zsize < capacity)
4929 zsp->z_size = devip->zsize;
4930 else
4931 zsp->z_size = capacity - zsp->z_start;
4932
4933 zstart += zsp->z_size;
4934 }
4935
4936 return 0;
4937 }
4938
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)4939 static struct sdebug_dev_info *sdebug_device_create(
4940 struct sdebug_host_info *sdbg_host, gfp_t flags)
4941 {
4942 struct sdebug_dev_info *devip;
4943
4944 devip = kzalloc(sizeof(*devip), flags);
4945 if (devip) {
4946 if (sdebug_uuid_ctl == 1)
4947 uuid_gen(&devip->lu_name);
4948 else if (sdebug_uuid_ctl == 2) {
4949 if (got_shared_uuid)
4950 devip->lu_name = shared_uuid;
4951 else {
4952 uuid_gen(&shared_uuid);
4953 got_shared_uuid = true;
4954 devip->lu_name = shared_uuid;
4955 }
4956 }
4957 devip->sdbg_host = sdbg_host;
4958 if (sdeb_zbc_in_use) {
4959 devip->zmodel = sdeb_zbc_model;
4960 if (sdebug_device_create_zones(devip)) {
4961 kfree(devip);
4962 return NULL;
4963 }
4964 } else {
4965 devip->zmodel = BLK_ZONED_NONE;
4966 }
4967 devip->sdbg_host = sdbg_host;
4968 devip->create_ts = ktime_get_boottime();
4969 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4970 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4971 }
4972 return devip;
4973 }
4974
find_build_dev_info(struct scsi_device * sdev)4975 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4976 {
4977 struct sdebug_host_info *sdbg_host;
4978 struct sdebug_dev_info *open_devip = NULL;
4979 struct sdebug_dev_info *devip;
4980
4981 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4982 if (!sdbg_host) {
4983 pr_err("Host info NULL\n");
4984 return NULL;
4985 }
4986
4987 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4988 if ((devip->used) && (devip->channel == sdev->channel) &&
4989 (devip->target == sdev->id) &&
4990 (devip->lun == sdev->lun))
4991 return devip;
4992 else {
4993 if ((!devip->used) && (!open_devip))
4994 open_devip = devip;
4995 }
4996 }
4997 if (!open_devip) { /* try and make a new one */
4998 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4999 if (!open_devip) {
5000 pr_err("out of memory at line %d\n", __LINE__);
5001 return NULL;
5002 }
5003 }
5004
5005 open_devip->channel = sdev->channel;
5006 open_devip->target = sdev->id;
5007 open_devip->lun = sdev->lun;
5008 open_devip->sdbg_host = sdbg_host;
5009 atomic_set(&open_devip->num_in_q, 0);
5010 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5011 open_devip->used = true;
5012 return open_devip;
5013 }
5014
scsi_debug_slave_alloc(struct scsi_device * sdp)5015 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5016 {
5017 if (sdebug_verbose)
5018 pr_info("slave_alloc <%u %u %u %llu>\n",
5019 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5020 return 0;
5021 }
5022
scsi_debug_slave_configure(struct scsi_device * sdp)5023 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5024 {
5025 struct sdebug_dev_info *devip =
5026 (struct sdebug_dev_info *)sdp->hostdata;
5027
5028 if (sdebug_verbose)
5029 pr_info("slave_configure <%u %u %u %llu>\n",
5030 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5031 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5032 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5033 if (devip == NULL) {
5034 devip = find_build_dev_info(sdp);
5035 if (devip == NULL)
5036 return 1; /* no resources, will be marked offline */
5037 }
5038 sdp->hostdata = devip;
5039 if (sdebug_no_uld)
5040 sdp->no_uld_attach = 1;
5041 config_cdb_len(sdp);
5042 return 0;
5043 }
5044
scsi_debug_slave_destroy(struct scsi_device * sdp)5045 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5046 {
5047 struct sdebug_dev_info *devip =
5048 (struct sdebug_dev_info *)sdp->hostdata;
5049
5050 if (sdebug_verbose)
5051 pr_info("slave_destroy <%u %u %u %llu>\n",
5052 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5053 if (devip) {
5054 /* make this slot available for re-use */
5055 devip->used = false;
5056 sdp->hostdata = NULL;
5057 }
5058 }
5059
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5060 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5061 enum sdeb_defer_type defer_t)
5062 {
5063 if (!sd_dp)
5064 return;
5065 if (defer_t == SDEB_DEFER_HRT)
5066 hrtimer_cancel(&sd_dp->hrt);
5067 else if (defer_t == SDEB_DEFER_WQ)
5068 cancel_work_sync(&sd_dp->ew.work);
5069 }
5070
5071 /* If @cmnd found deletes its timer or work queue and returns true; else
5072 returns false */
stop_queued_cmnd(struct scsi_cmnd * cmnd)5073 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5074 {
5075 unsigned long iflags;
5076 int j, k, qmax, r_qmax;
5077 enum sdeb_defer_type l_defer_t;
5078 struct sdebug_queue *sqp;
5079 struct sdebug_queued_cmd *sqcp;
5080 struct sdebug_dev_info *devip;
5081 struct sdebug_defer *sd_dp;
5082
5083 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5084 spin_lock_irqsave(&sqp->qc_lock, iflags);
5085 qmax = sdebug_max_queue;
5086 r_qmax = atomic_read(&retired_max_queue);
5087 if (r_qmax > qmax)
5088 qmax = r_qmax;
5089 for (k = 0; k < qmax; ++k) {
5090 if (test_bit(k, sqp->in_use_bm)) {
5091 sqcp = &sqp->qc_arr[k];
5092 if (cmnd != sqcp->a_cmnd)
5093 continue;
5094 /* found */
5095 devip = (struct sdebug_dev_info *)
5096 cmnd->device->hostdata;
5097 if (devip)
5098 atomic_dec(&devip->num_in_q);
5099 sqcp->a_cmnd = NULL;
5100 sd_dp = sqcp->sd_dp;
5101 if (sd_dp) {
5102 l_defer_t = sd_dp->defer_t;
5103 sd_dp->defer_t = SDEB_DEFER_NONE;
5104 } else
5105 l_defer_t = SDEB_DEFER_NONE;
5106 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5107 stop_qc_helper(sd_dp, l_defer_t);
5108 clear_bit(k, sqp->in_use_bm);
5109 return true;
5110 }
5111 }
5112 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5113 }
5114 return false;
5115 }
5116
5117 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)5118 static void stop_all_queued(void)
5119 {
5120 unsigned long iflags;
5121 int j, k;
5122 enum sdeb_defer_type l_defer_t;
5123 struct sdebug_queue *sqp;
5124 struct sdebug_queued_cmd *sqcp;
5125 struct sdebug_dev_info *devip;
5126 struct sdebug_defer *sd_dp;
5127
5128 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5129 spin_lock_irqsave(&sqp->qc_lock, iflags);
5130 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5131 if (test_bit(k, sqp->in_use_bm)) {
5132 sqcp = &sqp->qc_arr[k];
5133 if (sqcp->a_cmnd == NULL)
5134 continue;
5135 devip = (struct sdebug_dev_info *)
5136 sqcp->a_cmnd->device->hostdata;
5137 if (devip)
5138 atomic_dec(&devip->num_in_q);
5139 sqcp->a_cmnd = NULL;
5140 sd_dp = sqcp->sd_dp;
5141 if (sd_dp) {
5142 l_defer_t = sd_dp->defer_t;
5143 sd_dp->defer_t = SDEB_DEFER_NONE;
5144 } else
5145 l_defer_t = SDEB_DEFER_NONE;
5146 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5147 stop_qc_helper(sd_dp, l_defer_t);
5148 clear_bit(k, sqp->in_use_bm);
5149 spin_lock_irqsave(&sqp->qc_lock, iflags);
5150 }
5151 }
5152 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5153 }
5154 }
5155
5156 /* Free queued command memory on heap */
free_all_queued(void)5157 static void free_all_queued(void)
5158 {
5159 int j, k;
5160 struct sdebug_queue *sqp;
5161 struct sdebug_queued_cmd *sqcp;
5162
5163 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5164 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5165 sqcp = &sqp->qc_arr[k];
5166 kfree(sqcp->sd_dp);
5167 sqcp->sd_dp = NULL;
5168 }
5169 }
5170 }
5171
scsi_debug_abort(struct scsi_cmnd * SCpnt)5172 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5173 {
5174 bool ok;
5175
5176 ++num_aborts;
5177 if (SCpnt) {
5178 ok = stop_queued_cmnd(SCpnt);
5179 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5180 sdev_printk(KERN_INFO, SCpnt->device,
5181 "%s: command%s found\n", __func__,
5182 ok ? "" : " not");
5183 }
5184 return SUCCESS;
5185 }
5186
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)5187 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5188 {
5189 ++num_dev_resets;
5190 if (SCpnt && SCpnt->device) {
5191 struct scsi_device *sdp = SCpnt->device;
5192 struct sdebug_dev_info *devip =
5193 (struct sdebug_dev_info *)sdp->hostdata;
5194
5195 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5196 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5197 if (devip)
5198 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5199 }
5200 return SUCCESS;
5201 }
5202
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)5203 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5204 {
5205 struct sdebug_host_info *sdbg_host;
5206 struct sdebug_dev_info *devip;
5207 struct scsi_device *sdp;
5208 struct Scsi_Host *hp;
5209 int k = 0;
5210
5211 ++num_target_resets;
5212 if (!SCpnt)
5213 goto lie;
5214 sdp = SCpnt->device;
5215 if (!sdp)
5216 goto lie;
5217 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5218 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5219 hp = sdp->host;
5220 if (!hp)
5221 goto lie;
5222 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5223 if (sdbg_host) {
5224 list_for_each_entry(devip,
5225 &sdbg_host->dev_info_list,
5226 dev_list)
5227 if (devip->target == sdp->id) {
5228 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5229 ++k;
5230 }
5231 }
5232 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5233 sdev_printk(KERN_INFO, sdp,
5234 "%s: %d device(s) found in target\n", __func__, k);
5235 lie:
5236 return SUCCESS;
5237 }
5238
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)5239 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5240 {
5241 struct sdebug_host_info *sdbg_host;
5242 struct sdebug_dev_info *devip;
5243 struct scsi_device *sdp;
5244 struct Scsi_Host *hp;
5245 int k = 0;
5246
5247 ++num_bus_resets;
5248 if (!(SCpnt && SCpnt->device))
5249 goto lie;
5250 sdp = SCpnt->device;
5251 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5252 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5253 hp = sdp->host;
5254 if (hp) {
5255 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5256 if (sdbg_host) {
5257 list_for_each_entry(devip,
5258 &sdbg_host->dev_info_list,
5259 dev_list) {
5260 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5261 ++k;
5262 }
5263 }
5264 }
5265 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5266 sdev_printk(KERN_INFO, sdp,
5267 "%s: %d device(s) found in host\n", __func__, k);
5268 lie:
5269 return SUCCESS;
5270 }
5271
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)5272 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5273 {
5274 struct sdebug_host_info *sdbg_host;
5275 struct sdebug_dev_info *devip;
5276 int k = 0;
5277
5278 ++num_host_resets;
5279 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5280 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5281 spin_lock(&sdebug_host_list_lock);
5282 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5283 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5284 dev_list) {
5285 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5286 ++k;
5287 }
5288 }
5289 spin_unlock(&sdebug_host_list_lock);
5290 stop_all_queued();
5291 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5292 sdev_printk(KERN_INFO, SCpnt->device,
5293 "%s: %d device(s) found\n", __func__, k);
5294 return SUCCESS;
5295 }
5296
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)5297 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5298 {
5299 struct msdos_partition *pp;
5300 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5301 int sectors_per_part, num_sectors, k;
5302 int heads_by_sects, start_sec, end_sec;
5303
5304 /* assume partition table already zeroed */
5305 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5306 return;
5307 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5308 sdebug_num_parts = SDEBUG_MAX_PARTS;
5309 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5310 }
5311 num_sectors = (int)get_sdebug_capacity();
5312 sectors_per_part = (num_sectors - sdebug_sectors_per)
5313 / sdebug_num_parts;
5314 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5315 starts[0] = sdebug_sectors_per;
5316 max_part_secs = sectors_per_part;
5317 for (k = 1; k < sdebug_num_parts; ++k) {
5318 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5319 * heads_by_sects;
5320 if (starts[k] - starts[k - 1] < max_part_secs)
5321 max_part_secs = starts[k] - starts[k - 1];
5322 }
5323 starts[sdebug_num_parts] = num_sectors;
5324 starts[sdebug_num_parts + 1] = 0;
5325
5326 ramp[510] = 0x55; /* magic partition markings */
5327 ramp[511] = 0xAA;
5328 pp = (struct msdos_partition *)(ramp + 0x1be);
5329 for (k = 0; starts[k + 1]; ++k, ++pp) {
5330 start_sec = starts[k];
5331 end_sec = starts[k] + max_part_secs - 1;
5332 pp->boot_ind = 0;
5333
5334 pp->cyl = start_sec / heads_by_sects;
5335 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5336 / sdebug_sectors_per;
5337 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5338
5339 pp->end_cyl = end_sec / heads_by_sects;
5340 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5341 / sdebug_sectors_per;
5342 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5343
5344 pp->start_sect = cpu_to_le32(start_sec);
5345 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5346 pp->sys_ind = 0x83; /* plain Linux partition */
5347 }
5348 }
5349
block_unblock_all_queues(bool block)5350 static void block_unblock_all_queues(bool block)
5351 {
5352 int j;
5353 struct sdebug_queue *sqp;
5354
5355 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5356 atomic_set(&sqp->blocked, (int)block);
5357 }
5358
5359 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5360 * commands will be processed normally before triggers occur.
5361 */
tweak_cmnd_count(void)5362 static void tweak_cmnd_count(void)
5363 {
5364 int count, modulo;
5365
5366 modulo = abs(sdebug_every_nth);
5367 if (modulo < 2)
5368 return;
5369 block_unblock_all_queues(true);
5370 count = atomic_read(&sdebug_cmnd_count);
5371 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5372 block_unblock_all_queues(false);
5373 }
5374
clear_queue_stats(void)5375 static void clear_queue_stats(void)
5376 {
5377 atomic_set(&sdebug_cmnd_count, 0);
5378 atomic_set(&sdebug_completions, 0);
5379 atomic_set(&sdebug_miss_cpus, 0);
5380 atomic_set(&sdebug_a_tsf, 0);
5381 }
5382
inject_on_this_cmd(void)5383 static bool inject_on_this_cmd(void)
5384 {
5385 if (sdebug_every_nth == 0)
5386 return false;
5387 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5388 }
5389
5390 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5391
5392 /* Complete the processing of the thread that queued a SCSI command to this
5393 * driver. It either completes the command by calling cmnd_done() or
5394 * schedules a hr timer or work queue then returns 0. Returns
5395 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5396 */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)5397 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5398 int scsi_result,
5399 int (*pfp)(struct scsi_cmnd *,
5400 struct sdebug_dev_info *),
5401 int delta_jiff, int ndelay)
5402 {
5403 bool new_sd_dp;
5404 bool inject = false;
5405 int k, num_in_q, qdepth;
5406 unsigned long iflags;
5407 u64 ns_from_boot = 0;
5408 struct sdebug_queue *sqp;
5409 struct sdebug_queued_cmd *sqcp;
5410 struct scsi_device *sdp;
5411 struct sdebug_defer *sd_dp;
5412
5413 if (unlikely(devip == NULL)) {
5414 if (scsi_result == 0)
5415 scsi_result = DID_NO_CONNECT << 16;
5416 goto respond_in_thread;
5417 }
5418 sdp = cmnd->device;
5419
5420 if (delta_jiff == 0)
5421 goto respond_in_thread;
5422
5423 sqp = get_queue(cmnd);
5424 spin_lock_irqsave(&sqp->qc_lock, iflags);
5425 if (unlikely(atomic_read(&sqp->blocked))) {
5426 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5427 return SCSI_MLQUEUE_HOST_BUSY;
5428 }
5429 num_in_q = atomic_read(&devip->num_in_q);
5430 qdepth = cmnd->device->queue_depth;
5431 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5432 if (scsi_result) {
5433 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5434 goto respond_in_thread;
5435 } else
5436 scsi_result = device_qfull_result;
5437 } else if (unlikely(sdebug_every_nth &&
5438 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5439 (scsi_result == 0))) {
5440 if ((num_in_q == (qdepth - 1)) &&
5441 (atomic_inc_return(&sdebug_a_tsf) >=
5442 abs(sdebug_every_nth))) {
5443 atomic_set(&sdebug_a_tsf, 0);
5444 inject = true;
5445 scsi_result = device_qfull_result;
5446 }
5447 }
5448
5449 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5450 if (unlikely(k >= sdebug_max_queue)) {
5451 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5452 if (scsi_result)
5453 goto respond_in_thread;
5454 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5455 scsi_result = device_qfull_result;
5456 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5457 sdev_printk(KERN_INFO, sdp,
5458 "%s: max_queue=%d exceeded, %s\n",
5459 __func__, sdebug_max_queue,
5460 (scsi_result ? "status: TASK SET FULL" :
5461 "report: host busy"));
5462 if (scsi_result)
5463 goto respond_in_thread;
5464 else
5465 return SCSI_MLQUEUE_HOST_BUSY;
5466 }
5467 set_bit(k, sqp->in_use_bm);
5468 atomic_inc(&devip->num_in_q);
5469 sqcp = &sqp->qc_arr[k];
5470 sqcp->a_cmnd = cmnd;
5471 cmnd->host_scribble = (unsigned char *)sqcp;
5472 sd_dp = sqcp->sd_dp;
5473 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5474 if (!sd_dp) {
5475 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5476 if (!sd_dp) {
5477 atomic_dec(&devip->num_in_q);
5478 clear_bit(k, sqp->in_use_bm);
5479 return SCSI_MLQUEUE_HOST_BUSY;
5480 }
5481 new_sd_dp = true;
5482 } else {
5483 new_sd_dp = false;
5484 }
5485
5486 /* Set the hostwide tag */
5487 if (sdebug_host_max_queue)
5488 sd_dp->hc_idx = get_tag(cmnd);
5489
5490 if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5491 ns_from_boot = ktime_get_boottime_ns();
5492
5493 /* one of the resp_*() response functions is called here */
5494 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5495 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5496 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5497 delta_jiff = ndelay = 0;
5498 }
5499 if (cmnd->result == 0 && scsi_result != 0)
5500 cmnd->result = scsi_result;
5501 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5502 if (atomic_read(&sdeb_inject_pending)) {
5503 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5504 atomic_set(&sdeb_inject_pending, 0);
5505 cmnd->result = check_condition_result;
5506 }
5507 }
5508
5509 if (unlikely(sdebug_verbose && cmnd->result))
5510 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5511 __func__, cmnd->result);
5512
5513 if (delta_jiff > 0 || ndelay > 0) {
5514 ktime_t kt;
5515
5516 if (delta_jiff > 0) {
5517 u64 ns = jiffies_to_nsecs(delta_jiff);
5518
5519 if (sdebug_random && ns < U32_MAX) {
5520 ns = prandom_u32_max((u32)ns);
5521 } else if (sdebug_random) {
5522 ns >>= 12; /* scale to 4 usec precision */
5523 if (ns < U32_MAX) /* over 4 hours max */
5524 ns = prandom_u32_max((u32)ns);
5525 ns <<= 12;
5526 }
5527 kt = ns_to_ktime(ns);
5528 } else { /* ndelay has a 4.2 second max */
5529 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5530 (u32)ndelay;
5531 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5532 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5533
5534 if (kt <= d) { /* elapsed duration >= kt */
5535 spin_lock_irqsave(&sqp->qc_lock, iflags);
5536 sqcp->a_cmnd = NULL;
5537 atomic_dec(&devip->num_in_q);
5538 clear_bit(k, sqp->in_use_bm);
5539 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5540 if (new_sd_dp)
5541 kfree(sd_dp);
5542 /* call scsi_done() from this thread */
5543 cmnd->scsi_done(cmnd);
5544 return 0;
5545 }
5546 /* otherwise reduce kt by elapsed time */
5547 kt -= d;
5548 }
5549 }
5550 if (!sd_dp->init_hrt) {
5551 sd_dp->init_hrt = true;
5552 sqcp->sd_dp = sd_dp;
5553 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5554 HRTIMER_MODE_REL_PINNED);
5555 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5556 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5557 sd_dp->qc_idx = k;
5558 }
5559 if (sdebug_statistics)
5560 sd_dp->issuing_cpu = raw_smp_processor_id();
5561 sd_dp->defer_t = SDEB_DEFER_HRT;
5562 /* schedule the invocation of scsi_done() for a later time */
5563 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5564 } else { /* jdelay < 0, use work queue */
5565 if (!sd_dp->init_wq) {
5566 sd_dp->init_wq = true;
5567 sqcp->sd_dp = sd_dp;
5568 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5569 sd_dp->qc_idx = k;
5570 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5571 }
5572 if (sdebug_statistics)
5573 sd_dp->issuing_cpu = raw_smp_processor_id();
5574 sd_dp->defer_t = SDEB_DEFER_WQ;
5575 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5576 atomic_read(&sdeb_inject_pending)))
5577 sd_dp->aborted = true;
5578 schedule_work(&sd_dp->ew.work);
5579 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5580 atomic_read(&sdeb_inject_pending))) {
5581 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5582 blk_abort_request(cmnd->request);
5583 atomic_set(&sdeb_inject_pending, 0);
5584 }
5585 }
5586 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5587 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5588 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5589 return 0;
5590
5591 respond_in_thread: /* call back to mid-layer using invocation thread */
5592 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5593 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5594 if (cmnd->result == 0 && scsi_result != 0)
5595 cmnd->result = scsi_result;
5596 cmnd->scsi_done(cmnd);
5597 return 0;
5598 }
5599
5600 /* Note: The following macros create attribute files in the
5601 /sys/module/scsi_debug/parameters directory. Unfortunately this
5602 driver is unaware of a change and cannot trigger auxiliary actions
5603 as it can when the corresponding attribute in the
5604 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5605 */
5606 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5607 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5608 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5609 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5610 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5611 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5612 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5613 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5614 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5615 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5616 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5617 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5618 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5619 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5620 module_param_string(inq_product, sdebug_inq_product_id,
5621 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5622 module_param_string(inq_rev, sdebug_inq_product_rev,
5623 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5624 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5625 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5626 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5627 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5628 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5629 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5630 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5631 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5632 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5633 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5634 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5635 S_IRUGO | S_IWUSR);
5636 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5637 S_IRUGO | S_IWUSR);
5638 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5639 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5640 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5641 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5642 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5643 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5644 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5645 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5646 module_param_named(per_host_store, sdebug_per_host_store, bool,
5647 S_IRUGO | S_IWUSR);
5648 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5649 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5650 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5651 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5652 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5653 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5654 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5655 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5656 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5657 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5658 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5659 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5660 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5661 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5662 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5663 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5664 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5665 S_IRUGO | S_IWUSR);
5666 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5667 module_param_named(write_same_length, sdebug_write_same_length, int,
5668 S_IRUGO | S_IWUSR);
5669 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5670 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5671 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5672 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5673
5674 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5675 MODULE_DESCRIPTION("SCSI debug adapter driver");
5676 MODULE_LICENSE("GPL");
5677 MODULE_VERSION(SDEBUG_VERSION);
5678
5679 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5680 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5681 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5682 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5683 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5684 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5685 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5686 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5687 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5688 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5689 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5690 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5691 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5692 MODULE_PARM_DESC(host_max_queue,
5693 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5694 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5695 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5696 SDEBUG_VERSION "\")");
5697 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5698 MODULE_PARM_DESC(lbprz,
5699 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5700 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5701 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5702 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5703 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5704 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5705 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5706 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5707 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5708 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5709 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5710 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5711 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5712 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5713 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5714 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5715 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5716 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5717 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5718 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5719 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5720 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5721 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5722 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5723 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5724 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5725 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5726 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5727 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5728 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5729 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5730 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5731 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5732 MODULE_PARM_DESC(uuid_ctl,
5733 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5734 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5735 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5736 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5737 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5738 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5739 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5740 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5741 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5742
5743 #define SDEBUG_INFO_LEN 256
5744 static char sdebug_info[SDEBUG_INFO_LEN];
5745
scsi_debug_info(struct Scsi_Host * shp)5746 static const char *scsi_debug_info(struct Scsi_Host *shp)
5747 {
5748 int k;
5749
5750 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5751 my_name, SDEBUG_VERSION, sdebug_version_date);
5752 if (k >= (SDEBUG_INFO_LEN - 1))
5753 return sdebug_info;
5754 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5755 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5756 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5757 "statistics", (int)sdebug_statistics);
5758 return sdebug_info;
5759 }
5760
5761 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)5762 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5763 int length)
5764 {
5765 char arr[16];
5766 int opts;
5767 int minLen = length > 15 ? 15 : length;
5768
5769 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5770 return -EACCES;
5771 memcpy(arr, buffer, minLen);
5772 arr[minLen] = '\0';
5773 if (1 != sscanf(arr, "%d", &opts))
5774 return -EINVAL;
5775 sdebug_opts = opts;
5776 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5777 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5778 if (sdebug_every_nth != 0)
5779 tweak_cmnd_count();
5780 return length;
5781 }
5782
5783 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5784 * same for each scsi_debug host (if more than one). Some of the counters
5785 * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)5786 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5787 {
5788 int f, j, l;
5789 struct sdebug_queue *sqp;
5790 struct sdebug_host_info *sdhp;
5791
5792 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5793 SDEBUG_VERSION, sdebug_version_date);
5794 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5795 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5796 sdebug_opts, sdebug_every_nth);
5797 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5798 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5799 sdebug_sector_size, "bytes");
5800 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5801 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5802 num_aborts);
5803 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5804 num_dev_resets, num_target_resets, num_bus_resets,
5805 num_host_resets);
5806 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5807 dix_reads, dix_writes, dif_errors);
5808 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5809 sdebug_statistics);
5810 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5811 atomic_read(&sdebug_cmnd_count),
5812 atomic_read(&sdebug_completions),
5813 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5814 atomic_read(&sdebug_a_tsf));
5815
5816 seq_printf(m, "submit_queues=%d\n", submit_queues);
5817 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5818 seq_printf(m, " queue %d:\n", j);
5819 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5820 if (f != sdebug_max_queue) {
5821 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5822 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5823 "first,last bits", f, l);
5824 }
5825 }
5826
5827 seq_printf(m, "this host_no=%d\n", host->host_no);
5828 if (!xa_empty(per_store_ap)) {
5829 bool niu;
5830 int idx;
5831 unsigned long l_idx;
5832 struct sdeb_store_info *sip;
5833
5834 seq_puts(m, "\nhost list:\n");
5835 j = 0;
5836 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5837 idx = sdhp->si_idx;
5838 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5839 sdhp->shost->host_no, idx);
5840 ++j;
5841 }
5842 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5843 sdeb_most_recent_idx);
5844 j = 0;
5845 xa_for_each(per_store_ap, l_idx, sip) {
5846 niu = xa_get_mark(per_store_ap, l_idx,
5847 SDEB_XA_NOT_IN_USE);
5848 idx = (int)l_idx;
5849 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5850 (niu ? " not_in_use" : ""));
5851 ++j;
5852 }
5853 }
5854 return 0;
5855 }
5856
delay_show(struct device_driver * ddp,char * buf)5857 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5858 {
5859 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5860 }
5861 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5862 * of delay is jiffies.
5863 */
delay_store(struct device_driver * ddp,const char * buf,size_t count)5864 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5865 size_t count)
5866 {
5867 int jdelay, res;
5868
5869 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5870 res = count;
5871 if (sdebug_jdelay != jdelay) {
5872 int j, k;
5873 struct sdebug_queue *sqp;
5874
5875 block_unblock_all_queues(true);
5876 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5877 ++j, ++sqp) {
5878 k = find_first_bit(sqp->in_use_bm,
5879 sdebug_max_queue);
5880 if (k != sdebug_max_queue) {
5881 res = -EBUSY; /* queued commands */
5882 break;
5883 }
5884 }
5885 if (res > 0) {
5886 sdebug_jdelay = jdelay;
5887 sdebug_ndelay = 0;
5888 }
5889 block_unblock_all_queues(false);
5890 }
5891 return res;
5892 }
5893 return -EINVAL;
5894 }
5895 static DRIVER_ATTR_RW(delay);
5896
ndelay_show(struct device_driver * ddp,char * buf)5897 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5898 {
5899 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5900 }
5901 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5902 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)5903 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5904 size_t count)
5905 {
5906 int ndelay, res;
5907
5908 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5909 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5910 res = count;
5911 if (sdebug_ndelay != ndelay) {
5912 int j, k;
5913 struct sdebug_queue *sqp;
5914
5915 block_unblock_all_queues(true);
5916 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5917 ++j, ++sqp) {
5918 k = find_first_bit(sqp->in_use_bm,
5919 sdebug_max_queue);
5920 if (k != sdebug_max_queue) {
5921 res = -EBUSY; /* queued commands */
5922 break;
5923 }
5924 }
5925 if (res > 0) {
5926 sdebug_ndelay = ndelay;
5927 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5928 : DEF_JDELAY;
5929 }
5930 block_unblock_all_queues(false);
5931 }
5932 return res;
5933 }
5934 return -EINVAL;
5935 }
5936 static DRIVER_ATTR_RW(ndelay);
5937
opts_show(struct device_driver * ddp,char * buf)5938 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5939 {
5940 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5941 }
5942
opts_store(struct device_driver * ddp,const char * buf,size_t count)5943 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5944 size_t count)
5945 {
5946 int opts;
5947 char work[20];
5948
5949 if (sscanf(buf, "%10s", work) == 1) {
5950 if (strncasecmp(work, "0x", 2) == 0) {
5951 if (kstrtoint(work + 2, 16, &opts) == 0)
5952 goto opts_done;
5953 } else {
5954 if (kstrtoint(work, 10, &opts) == 0)
5955 goto opts_done;
5956 }
5957 }
5958 return -EINVAL;
5959 opts_done:
5960 sdebug_opts = opts;
5961 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5962 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5963 tweak_cmnd_count();
5964 return count;
5965 }
5966 static DRIVER_ATTR_RW(opts);
5967
ptype_show(struct device_driver * ddp,char * buf)5968 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5969 {
5970 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5971 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)5972 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5973 size_t count)
5974 {
5975 int n;
5976
5977 /* Cannot change from or to TYPE_ZBC with sysfs */
5978 if (sdebug_ptype == TYPE_ZBC)
5979 return -EINVAL;
5980
5981 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5982 if (n == TYPE_ZBC)
5983 return -EINVAL;
5984 sdebug_ptype = n;
5985 return count;
5986 }
5987 return -EINVAL;
5988 }
5989 static DRIVER_ATTR_RW(ptype);
5990
dsense_show(struct device_driver * ddp,char * buf)5991 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5992 {
5993 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5994 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)5995 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5996 size_t count)
5997 {
5998 int n;
5999
6000 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6001 sdebug_dsense = n;
6002 return count;
6003 }
6004 return -EINVAL;
6005 }
6006 static DRIVER_ATTR_RW(dsense);
6007
fake_rw_show(struct device_driver * ddp,char * buf)6008 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6009 {
6010 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6011 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)6012 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6013 size_t count)
6014 {
6015 int n, idx;
6016
6017 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6018 bool want_store = (n == 0);
6019 struct sdebug_host_info *sdhp;
6020
6021 n = (n > 0);
6022 sdebug_fake_rw = (sdebug_fake_rw > 0);
6023 if (sdebug_fake_rw == n)
6024 return count; /* not transitioning so do nothing */
6025
6026 if (want_store) { /* 1 --> 0 transition, set up store */
6027 if (sdeb_first_idx < 0) {
6028 idx = sdebug_add_store();
6029 if (idx < 0)
6030 return idx;
6031 } else {
6032 idx = sdeb_first_idx;
6033 xa_clear_mark(per_store_ap, idx,
6034 SDEB_XA_NOT_IN_USE);
6035 }
6036 /* make all hosts use same store */
6037 list_for_each_entry(sdhp, &sdebug_host_list,
6038 host_list) {
6039 if (sdhp->si_idx != idx) {
6040 xa_set_mark(per_store_ap, sdhp->si_idx,
6041 SDEB_XA_NOT_IN_USE);
6042 sdhp->si_idx = idx;
6043 }
6044 }
6045 sdeb_most_recent_idx = idx;
6046 } else { /* 0 --> 1 transition is trigger for shrink */
6047 sdebug_erase_all_stores(true /* apart from first */);
6048 }
6049 sdebug_fake_rw = n;
6050 return count;
6051 }
6052 return -EINVAL;
6053 }
6054 static DRIVER_ATTR_RW(fake_rw);
6055
no_lun_0_show(struct device_driver * ddp,char * buf)6056 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6057 {
6058 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6059 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)6060 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6061 size_t count)
6062 {
6063 int n;
6064
6065 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6066 sdebug_no_lun_0 = n;
6067 return count;
6068 }
6069 return -EINVAL;
6070 }
6071 static DRIVER_ATTR_RW(no_lun_0);
6072
num_tgts_show(struct device_driver * ddp,char * buf)6073 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6074 {
6075 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6076 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)6077 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6078 size_t count)
6079 {
6080 int n;
6081
6082 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6083 sdebug_num_tgts = n;
6084 sdebug_max_tgts_luns();
6085 return count;
6086 }
6087 return -EINVAL;
6088 }
6089 static DRIVER_ATTR_RW(num_tgts);
6090
dev_size_mb_show(struct device_driver * ddp,char * buf)6091 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6092 {
6093 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6094 }
6095 static DRIVER_ATTR_RO(dev_size_mb);
6096
per_host_store_show(struct device_driver * ddp,char * buf)6097 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6098 {
6099 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6100 }
6101
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)6102 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6103 size_t count)
6104 {
6105 bool v;
6106
6107 if (kstrtobool(buf, &v))
6108 return -EINVAL;
6109
6110 sdebug_per_host_store = v;
6111 return count;
6112 }
6113 static DRIVER_ATTR_RW(per_host_store);
6114
num_parts_show(struct device_driver * ddp,char * buf)6115 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6116 {
6117 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6118 }
6119 static DRIVER_ATTR_RO(num_parts);
6120
every_nth_show(struct device_driver * ddp,char * buf)6121 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6122 {
6123 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6124 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)6125 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6126 size_t count)
6127 {
6128 int nth;
6129 char work[20];
6130
6131 if (sscanf(buf, "%10s", work) == 1) {
6132 if (strncasecmp(work, "0x", 2) == 0) {
6133 if (kstrtoint(work + 2, 16, &nth) == 0)
6134 goto every_nth_done;
6135 } else {
6136 if (kstrtoint(work, 10, &nth) == 0)
6137 goto every_nth_done;
6138 }
6139 }
6140 return -EINVAL;
6141
6142 every_nth_done:
6143 sdebug_every_nth = nth;
6144 if (nth && !sdebug_statistics) {
6145 pr_info("every_nth needs statistics=1, set it\n");
6146 sdebug_statistics = true;
6147 }
6148 tweak_cmnd_count();
6149 return count;
6150 }
6151 static DRIVER_ATTR_RW(every_nth);
6152
lun_format_show(struct device_driver * ddp,char * buf)6153 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6154 {
6155 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6156 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)6157 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6158 size_t count)
6159 {
6160 int n;
6161 bool changed;
6162
6163 if (kstrtoint(buf, 0, &n))
6164 return -EINVAL;
6165 if (n >= 0) {
6166 if (n > (int)SAM_LUN_AM_FLAT) {
6167 pr_warn("only LUN address methods 0 and 1 are supported\n");
6168 return -EINVAL;
6169 }
6170 changed = ((int)sdebug_lun_am != n);
6171 sdebug_lun_am = n;
6172 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6173 struct sdebug_host_info *sdhp;
6174 struct sdebug_dev_info *dp;
6175
6176 spin_lock(&sdebug_host_list_lock);
6177 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6178 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6179 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6180 }
6181 }
6182 spin_unlock(&sdebug_host_list_lock);
6183 }
6184 return count;
6185 }
6186 return -EINVAL;
6187 }
6188 static DRIVER_ATTR_RW(lun_format);
6189
max_luns_show(struct device_driver * ddp,char * buf)6190 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6191 {
6192 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6193 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)6194 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6195 size_t count)
6196 {
6197 int n;
6198 bool changed;
6199
6200 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6201 if (n > 256) {
6202 pr_warn("max_luns can be no more than 256\n");
6203 return -EINVAL;
6204 }
6205 changed = (sdebug_max_luns != n);
6206 sdebug_max_luns = n;
6207 sdebug_max_tgts_luns();
6208 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6209 struct sdebug_host_info *sdhp;
6210 struct sdebug_dev_info *dp;
6211
6212 spin_lock(&sdebug_host_list_lock);
6213 list_for_each_entry(sdhp, &sdebug_host_list,
6214 host_list) {
6215 list_for_each_entry(dp, &sdhp->dev_info_list,
6216 dev_list) {
6217 set_bit(SDEBUG_UA_LUNS_CHANGED,
6218 dp->uas_bm);
6219 }
6220 }
6221 spin_unlock(&sdebug_host_list_lock);
6222 }
6223 return count;
6224 }
6225 return -EINVAL;
6226 }
6227 static DRIVER_ATTR_RW(max_luns);
6228
max_queue_show(struct device_driver * ddp,char * buf)6229 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6230 {
6231 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6232 }
6233 /* N.B. max_queue can be changed while there are queued commands. In flight
6234 * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)6235 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6236 size_t count)
6237 {
6238 int j, n, k, a;
6239 struct sdebug_queue *sqp;
6240
6241 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6242 (n <= SDEBUG_CANQUEUE) &&
6243 (sdebug_host_max_queue == 0)) {
6244 block_unblock_all_queues(true);
6245 k = 0;
6246 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6247 ++j, ++sqp) {
6248 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6249 if (a > k)
6250 k = a;
6251 }
6252 sdebug_max_queue = n;
6253 if (k == SDEBUG_CANQUEUE)
6254 atomic_set(&retired_max_queue, 0);
6255 else if (k >= n)
6256 atomic_set(&retired_max_queue, k + 1);
6257 else
6258 atomic_set(&retired_max_queue, 0);
6259 block_unblock_all_queues(false);
6260 return count;
6261 }
6262 return -EINVAL;
6263 }
6264 static DRIVER_ATTR_RW(max_queue);
6265
host_max_queue_show(struct device_driver * ddp,char * buf)6266 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6267 {
6268 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6269 }
6270
6271 /*
6272 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6273 * in range [0, sdebug_host_max_queue), we can't change it.
6274 */
6275 static DRIVER_ATTR_RO(host_max_queue);
6276
no_uld_show(struct device_driver * ddp,char * buf)6277 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6278 {
6279 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6280 }
6281 static DRIVER_ATTR_RO(no_uld);
6282
scsi_level_show(struct device_driver * ddp,char * buf)6283 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6284 {
6285 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6286 }
6287 static DRIVER_ATTR_RO(scsi_level);
6288
virtual_gb_show(struct device_driver * ddp,char * buf)6289 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6290 {
6291 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6292 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)6293 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6294 size_t count)
6295 {
6296 int n;
6297 bool changed;
6298
6299 /* Ignore capacity change for ZBC drives for now */
6300 if (sdeb_zbc_in_use)
6301 return -ENOTSUPP;
6302
6303 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6304 changed = (sdebug_virtual_gb != n);
6305 sdebug_virtual_gb = n;
6306 sdebug_capacity = get_sdebug_capacity();
6307 if (changed) {
6308 struct sdebug_host_info *sdhp;
6309 struct sdebug_dev_info *dp;
6310
6311 spin_lock(&sdebug_host_list_lock);
6312 list_for_each_entry(sdhp, &sdebug_host_list,
6313 host_list) {
6314 list_for_each_entry(dp, &sdhp->dev_info_list,
6315 dev_list) {
6316 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6317 dp->uas_bm);
6318 }
6319 }
6320 spin_unlock(&sdebug_host_list_lock);
6321 }
6322 return count;
6323 }
6324 return -EINVAL;
6325 }
6326 static DRIVER_ATTR_RW(virtual_gb);
6327
add_host_show(struct device_driver * ddp,char * buf)6328 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6329 {
6330 /* absolute number of hosts currently active is what is shown */
6331 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6332 }
6333
add_host_store(struct device_driver * ddp,const char * buf,size_t count)6334 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6335 size_t count)
6336 {
6337 bool found;
6338 unsigned long idx;
6339 struct sdeb_store_info *sip;
6340 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6341 int delta_hosts;
6342
6343 if (sscanf(buf, "%d", &delta_hosts) != 1)
6344 return -EINVAL;
6345 if (delta_hosts > 0) {
6346 do {
6347 found = false;
6348 if (want_phs) {
6349 xa_for_each_marked(per_store_ap, idx, sip,
6350 SDEB_XA_NOT_IN_USE) {
6351 sdeb_most_recent_idx = (int)idx;
6352 found = true;
6353 break;
6354 }
6355 if (found) /* re-use case */
6356 sdebug_add_host_helper((int)idx);
6357 else
6358 sdebug_do_add_host(true);
6359 } else {
6360 sdebug_do_add_host(false);
6361 }
6362 } while (--delta_hosts);
6363 } else if (delta_hosts < 0) {
6364 do {
6365 sdebug_do_remove_host(false);
6366 } while (++delta_hosts);
6367 }
6368 return count;
6369 }
6370 static DRIVER_ATTR_RW(add_host);
6371
vpd_use_hostno_show(struct device_driver * ddp,char * buf)6372 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6373 {
6374 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6375 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)6376 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6377 size_t count)
6378 {
6379 int n;
6380
6381 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6382 sdebug_vpd_use_hostno = n;
6383 return count;
6384 }
6385 return -EINVAL;
6386 }
6387 static DRIVER_ATTR_RW(vpd_use_hostno);
6388
statistics_show(struct device_driver * ddp,char * buf)6389 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6390 {
6391 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6392 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)6393 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6394 size_t count)
6395 {
6396 int n;
6397
6398 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6399 if (n > 0)
6400 sdebug_statistics = true;
6401 else {
6402 clear_queue_stats();
6403 sdebug_statistics = false;
6404 }
6405 return count;
6406 }
6407 return -EINVAL;
6408 }
6409 static DRIVER_ATTR_RW(statistics);
6410
sector_size_show(struct device_driver * ddp,char * buf)6411 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6412 {
6413 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6414 }
6415 static DRIVER_ATTR_RO(sector_size);
6416
submit_queues_show(struct device_driver * ddp,char * buf)6417 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6418 {
6419 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6420 }
6421 static DRIVER_ATTR_RO(submit_queues);
6422
dix_show(struct device_driver * ddp,char * buf)6423 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6424 {
6425 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6426 }
6427 static DRIVER_ATTR_RO(dix);
6428
dif_show(struct device_driver * ddp,char * buf)6429 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6430 {
6431 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6432 }
6433 static DRIVER_ATTR_RO(dif);
6434
guard_show(struct device_driver * ddp,char * buf)6435 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6436 {
6437 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6438 }
6439 static DRIVER_ATTR_RO(guard);
6440
ato_show(struct device_driver * ddp,char * buf)6441 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6442 {
6443 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6444 }
6445 static DRIVER_ATTR_RO(ato);
6446
map_show(struct device_driver * ddp,char * buf)6447 static ssize_t map_show(struct device_driver *ddp, char *buf)
6448 {
6449 ssize_t count = 0;
6450
6451 if (!scsi_debug_lbp())
6452 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6453 sdebug_store_sectors);
6454
6455 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6456 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6457
6458 if (sip)
6459 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6460 (int)map_size, sip->map_storep);
6461 }
6462 buf[count++] = '\n';
6463 buf[count] = '\0';
6464
6465 return count;
6466 }
6467 static DRIVER_ATTR_RO(map);
6468
random_show(struct device_driver * ddp,char * buf)6469 static ssize_t random_show(struct device_driver *ddp, char *buf)
6470 {
6471 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6472 }
6473
random_store(struct device_driver * ddp,const char * buf,size_t count)6474 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6475 size_t count)
6476 {
6477 bool v;
6478
6479 if (kstrtobool(buf, &v))
6480 return -EINVAL;
6481
6482 sdebug_random = v;
6483 return count;
6484 }
6485 static DRIVER_ATTR_RW(random);
6486
removable_show(struct device_driver * ddp,char * buf)6487 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6488 {
6489 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6490 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)6491 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6492 size_t count)
6493 {
6494 int n;
6495
6496 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6497 sdebug_removable = (n > 0);
6498 return count;
6499 }
6500 return -EINVAL;
6501 }
6502 static DRIVER_ATTR_RW(removable);
6503
host_lock_show(struct device_driver * ddp,char * buf)6504 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6505 {
6506 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6507 }
6508 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)6509 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6510 size_t count)
6511 {
6512 int n;
6513
6514 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6515 sdebug_host_lock = (n > 0);
6516 return count;
6517 }
6518 return -EINVAL;
6519 }
6520 static DRIVER_ATTR_RW(host_lock);
6521
strict_show(struct device_driver * ddp,char * buf)6522 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6523 {
6524 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6525 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)6526 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6527 size_t count)
6528 {
6529 int n;
6530
6531 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6532 sdebug_strict = (n > 0);
6533 return count;
6534 }
6535 return -EINVAL;
6536 }
6537 static DRIVER_ATTR_RW(strict);
6538
uuid_ctl_show(struct device_driver * ddp,char * buf)6539 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6540 {
6541 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6542 }
6543 static DRIVER_ATTR_RO(uuid_ctl);
6544
cdb_len_show(struct device_driver * ddp,char * buf)6545 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6546 {
6547 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6548 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)6549 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6550 size_t count)
6551 {
6552 int ret, n;
6553
6554 ret = kstrtoint(buf, 0, &n);
6555 if (ret)
6556 return ret;
6557 sdebug_cdb_len = n;
6558 all_config_cdb_len();
6559 return count;
6560 }
6561 static DRIVER_ATTR_RW(cdb_len);
6562
6563 static const char * const zbc_model_strs_a[] = {
6564 [BLK_ZONED_NONE] = "none",
6565 [BLK_ZONED_HA] = "host-aware",
6566 [BLK_ZONED_HM] = "host-managed",
6567 };
6568
6569 static const char * const zbc_model_strs_b[] = {
6570 [BLK_ZONED_NONE] = "no",
6571 [BLK_ZONED_HA] = "aware",
6572 [BLK_ZONED_HM] = "managed",
6573 };
6574
6575 static const char * const zbc_model_strs_c[] = {
6576 [BLK_ZONED_NONE] = "0",
6577 [BLK_ZONED_HA] = "1",
6578 [BLK_ZONED_HM] = "2",
6579 };
6580
sdeb_zbc_model_str(const char * cp)6581 static int sdeb_zbc_model_str(const char *cp)
6582 {
6583 int res = sysfs_match_string(zbc_model_strs_a, cp);
6584
6585 if (res < 0) {
6586 res = sysfs_match_string(zbc_model_strs_b, cp);
6587 if (res < 0) {
6588 res = sysfs_match_string(zbc_model_strs_c, cp);
6589 if (res < 0)
6590 return -EINVAL;
6591 }
6592 }
6593 return res;
6594 }
6595
zbc_show(struct device_driver * ddp,char * buf)6596 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6597 {
6598 return scnprintf(buf, PAGE_SIZE, "%s\n",
6599 zbc_model_strs_a[sdeb_zbc_model]);
6600 }
6601 static DRIVER_ATTR_RO(zbc);
6602
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)6603 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6604 {
6605 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6606 }
6607 static DRIVER_ATTR_RO(tur_ms_to_ready);
6608
6609 /* Note: The following array creates attribute files in the
6610 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6611 files (over those found in the /sys/module/scsi_debug/parameters
6612 directory) is that auxiliary actions can be triggered when an attribute
6613 is changed. For example see: add_host_store() above.
6614 */
6615
6616 static struct attribute *sdebug_drv_attrs[] = {
6617 &driver_attr_delay.attr,
6618 &driver_attr_opts.attr,
6619 &driver_attr_ptype.attr,
6620 &driver_attr_dsense.attr,
6621 &driver_attr_fake_rw.attr,
6622 &driver_attr_host_max_queue.attr,
6623 &driver_attr_no_lun_0.attr,
6624 &driver_attr_num_tgts.attr,
6625 &driver_attr_dev_size_mb.attr,
6626 &driver_attr_num_parts.attr,
6627 &driver_attr_every_nth.attr,
6628 &driver_attr_lun_format.attr,
6629 &driver_attr_max_luns.attr,
6630 &driver_attr_max_queue.attr,
6631 &driver_attr_no_uld.attr,
6632 &driver_attr_scsi_level.attr,
6633 &driver_attr_virtual_gb.attr,
6634 &driver_attr_add_host.attr,
6635 &driver_attr_per_host_store.attr,
6636 &driver_attr_vpd_use_hostno.attr,
6637 &driver_attr_sector_size.attr,
6638 &driver_attr_statistics.attr,
6639 &driver_attr_submit_queues.attr,
6640 &driver_attr_dix.attr,
6641 &driver_attr_dif.attr,
6642 &driver_attr_guard.attr,
6643 &driver_attr_ato.attr,
6644 &driver_attr_map.attr,
6645 &driver_attr_random.attr,
6646 &driver_attr_removable.attr,
6647 &driver_attr_host_lock.attr,
6648 &driver_attr_ndelay.attr,
6649 &driver_attr_strict.attr,
6650 &driver_attr_uuid_ctl.attr,
6651 &driver_attr_cdb_len.attr,
6652 &driver_attr_tur_ms_to_ready.attr,
6653 &driver_attr_zbc.attr,
6654 NULL,
6655 };
6656 ATTRIBUTE_GROUPS(sdebug_drv);
6657
6658 static struct device *pseudo_primary;
6659
scsi_debug_init(void)6660 static int __init scsi_debug_init(void)
6661 {
6662 bool want_store = (sdebug_fake_rw == 0);
6663 unsigned long sz;
6664 int k, ret, hosts_to_add;
6665 int idx = -1;
6666
6667 ramdisk_lck_a[0] = &atomic_rw;
6668 ramdisk_lck_a[1] = &atomic_rw2;
6669 atomic_set(&retired_max_queue, 0);
6670
6671 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6672 pr_warn("ndelay must be less than 1 second, ignored\n");
6673 sdebug_ndelay = 0;
6674 } else if (sdebug_ndelay > 0)
6675 sdebug_jdelay = JDELAY_OVERRIDDEN;
6676
6677 switch (sdebug_sector_size) {
6678 case 512:
6679 case 1024:
6680 case 2048:
6681 case 4096:
6682 break;
6683 default:
6684 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6685 return -EINVAL;
6686 }
6687
6688 switch (sdebug_dif) {
6689 case T10_PI_TYPE0_PROTECTION:
6690 break;
6691 case T10_PI_TYPE1_PROTECTION:
6692 case T10_PI_TYPE2_PROTECTION:
6693 case T10_PI_TYPE3_PROTECTION:
6694 have_dif_prot = true;
6695 break;
6696
6697 default:
6698 pr_err("dif must be 0, 1, 2 or 3\n");
6699 return -EINVAL;
6700 }
6701
6702 if (sdebug_num_tgts < 0) {
6703 pr_err("num_tgts must be >= 0\n");
6704 return -EINVAL;
6705 }
6706
6707 if (sdebug_guard > 1) {
6708 pr_err("guard must be 0 or 1\n");
6709 return -EINVAL;
6710 }
6711
6712 if (sdebug_ato > 1) {
6713 pr_err("ato must be 0 or 1\n");
6714 return -EINVAL;
6715 }
6716
6717 if (sdebug_physblk_exp > 15) {
6718 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6719 return -EINVAL;
6720 }
6721
6722 sdebug_lun_am = sdebug_lun_am_i;
6723 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6724 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6725 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6726 }
6727
6728 if (sdebug_max_luns > 256) {
6729 if (sdebug_max_luns > 16384) {
6730 pr_warn("max_luns can be no more than 16384, use default\n");
6731 sdebug_max_luns = DEF_MAX_LUNS;
6732 }
6733 sdebug_lun_am = SAM_LUN_AM_FLAT;
6734 }
6735
6736 if (sdebug_lowest_aligned > 0x3fff) {
6737 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6738 return -EINVAL;
6739 }
6740
6741 if (submit_queues < 1) {
6742 pr_err("submit_queues must be 1 or more\n");
6743 return -EINVAL;
6744 }
6745
6746 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6747 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6748 return -EINVAL;
6749 }
6750
6751 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6752 (sdebug_host_max_queue < 0)) {
6753 pr_err("host_max_queue must be in range [0 %d]\n",
6754 SDEBUG_CANQUEUE);
6755 return -EINVAL;
6756 }
6757
6758 if (sdebug_host_max_queue &&
6759 (sdebug_max_queue != sdebug_host_max_queue)) {
6760 sdebug_max_queue = sdebug_host_max_queue;
6761 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6762 sdebug_max_queue);
6763 }
6764
6765 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6766 GFP_KERNEL);
6767 if (sdebug_q_arr == NULL)
6768 return -ENOMEM;
6769 for (k = 0; k < submit_queues; ++k)
6770 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6771
6772 /*
6773 * check for host managed zoned block device specified with
6774 * ptype=0x14 or zbc=XXX.
6775 */
6776 if (sdebug_ptype == TYPE_ZBC) {
6777 sdeb_zbc_model = BLK_ZONED_HM;
6778 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6779 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6780 if (k < 0) {
6781 ret = k;
6782 goto free_q_arr;
6783 }
6784 sdeb_zbc_model = k;
6785 switch (sdeb_zbc_model) {
6786 case BLK_ZONED_NONE:
6787 case BLK_ZONED_HA:
6788 sdebug_ptype = TYPE_DISK;
6789 break;
6790 case BLK_ZONED_HM:
6791 sdebug_ptype = TYPE_ZBC;
6792 break;
6793 default:
6794 pr_err("Invalid ZBC model\n");
6795 ret = -EINVAL;
6796 goto free_q_arr;
6797 }
6798 }
6799 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6800 sdeb_zbc_in_use = true;
6801 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6802 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6803 }
6804
6805 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6806 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6807 if (sdebug_dev_size_mb < 1)
6808 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6809 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6810 sdebug_store_sectors = sz / sdebug_sector_size;
6811 sdebug_capacity = get_sdebug_capacity();
6812
6813 /* play around with geometry, don't waste too much on track 0 */
6814 sdebug_heads = 8;
6815 sdebug_sectors_per = 32;
6816 if (sdebug_dev_size_mb >= 256)
6817 sdebug_heads = 64;
6818 else if (sdebug_dev_size_mb >= 16)
6819 sdebug_heads = 32;
6820 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6821 (sdebug_sectors_per * sdebug_heads);
6822 if (sdebug_cylinders_per >= 1024) {
6823 /* other LLDs do this; implies >= 1GB ram disk ... */
6824 sdebug_heads = 255;
6825 sdebug_sectors_per = 63;
6826 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6827 (sdebug_sectors_per * sdebug_heads);
6828 }
6829 if (scsi_debug_lbp()) {
6830 sdebug_unmap_max_blocks =
6831 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6832
6833 sdebug_unmap_max_desc =
6834 clamp(sdebug_unmap_max_desc, 0U, 256U);
6835
6836 sdebug_unmap_granularity =
6837 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6838
6839 if (sdebug_unmap_alignment &&
6840 sdebug_unmap_granularity <=
6841 sdebug_unmap_alignment) {
6842 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6843 ret = -EINVAL;
6844 goto free_q_arr;
6845 }
6846 }
6847 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6848 if (want_store) {
6849 idx = sdebug_add_store();
6850 if (idx < 0) {
6851 ret = idx;
6852 goto free_q_arr;
6853 }
6854 }
6855
6856 pseudo_primary = root_device_register("pseudo_0");
6857 if (IS_ERR(pseudo_primary)) {
6858 pr_warn("root_device_register() error\n");
6859 ret = PTR_ERR(pseudo_primary);
6860 goto free_vm;
6861 }
6862 ret = bus_register(&pseudo_lld_bus);
6863 if (ret < 0) {
6864 pr_warn("bus_register error: %d\n", ret);
6865 goto dev_unreg;
6866 }
6867 ret = driver_register(&sdebug_driverfs_driver);
6868 if (ret < 0) {
6869 pr_warn("driver_register error: %d\n", ret);
6870 goto bus_unreg;
6871 }
6872
6873 hosts_to_add = sdebug_add_host;
6874 sdebug_add_host = 0;
6875
6876 for (k = 0; k < hosts_to_add; k++) {
6877 if (want_store && k == 0) {
6878 ret = sdebug_add_host_helper(idx);
6879 if (ret < 0) {
6880 pr_err("add_host_helper k=%d, error=%d\n",
6881 k, -ret);
6882 break;
6883 }
6884 } else {
6885 ret = sdebug_do_add_host(want_store &&
6886 sdebug_per_host_store);
6887 if (ret < 0) {
6888 pr_err("add_host k=%d error=%d\n", k, -ret);
6889 break;
6890 }
6891 }
6892 }
6893 if (sdebug_verbose)
6894 pr_info("built %d host(s)\n", sdebug_num_hosts);
6895
6896 return 0;
6897
6898 bus_unreg:
6899 bus_unregister(&pseudo_lld_bus);
6900 dev_unreg:
6901 root_device_unregister(pseudo_primary);
6902 free_vm:
6903 sdebug_erase_store(idx, NULL);
6904 free_q_arr:
6905 kfree(sdebug_q_arr);
6906 return ret;
6907 }
6908
scsi_debug_exit(void)6909 static void __exit scsi_debug_exit(void)
6910 {
6911 int k = sdebug_num_hosts;
6912
6913 stop_all_queued();
6914 for (; k; k--)
6915 sdebug_do_remove_host(true);
6916 free_all_queued();
6917 driver_unregister(&sdebug_driverfs_driver);
6918 bus_unregister(&pseudo_lld_bus);
6919 root_device_unregister(pseudo_primary);
6920
6921 sdebug_erase_all_stores(false);
6922 xa_destroy(per_store_ap);
6923 kfree(sdebug_q_arr);
6924 }
6925
6926 device_initcall(scsi_debug_init);
6927 module_exit(scsi_debug_exit);
6928
sdebug_release_adapter(struct device * dev)6929 static void sdebug_release_adapter(struct device *dev)
6930 {
6931 struct sdebug_host_info *sdbg_host;
6932
6933 sdbg_host = to_sdebug_host(dev);
6934 kfree(sdbg_host);
6935 }
6936
6937 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)6938 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6939 {
6940 if (idx < 0)
6941 return;
6942 if (!sip) {
6943 if (xa_empty(per_store_ap))
6944 return;
6945 sip = xa_load(per_store_ap, idx);
6946 if (!sip)
6947 return;
6948 }
6949 vfree(sip->map_storep);
6950 vfree(sip->dif_storep);
6951 vfree(sip->storep);
6952 xa_erase(per_store_ap, idx);
6953 kfree(sip);
6954 }
6955
6956 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)6957 static void sdebug_erase_all_stores(bool apart_from_first)
6958 {
6959 unsigned long idx;
6960 struct sdeb_store_info *sip = NULL;
6961
6962 xa_for_each(per_store_ap, idx, sip) {
6963 if (apart_from_first)
6964 apart_from_first = false;
6965 else
6966 sdebug_erase_store(idx, sip);
6967 }
6968 if (apart_from_first)
6969 sdeb_most_recent_idx = sdeb_first_idx;
6970 }
6971
6972 /*
6973 * Returns store xarray new element index (idx) if >=0 else negated errno.
6974 * Limit the number of stores to 65536.
6975 */
sdebug_add_store(void)6976 static int sdebug_add_store(void)
6977 {
6978 int res;
6979 u32 n_idx;
6980 unsigned long iflags;
6981 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6982 struct sdeb_store_info *sip = NULL;
6983 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6984
6985 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6986 if (!sip)
6987 return -ENOMEM;
6988
6989 xa_lock_irqsave(per_store_ap, iflags);
6990 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6991 if (unlikely(res < 0)) {
6992 xa_unlock_irqrestore(per_store_ap, iflags);
6993 kfree(sip);
6994 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6995 return res;
6996 }
6997 sdeb_most_recent_idx = n_idx;
6998 if (sdeb_first_idx < 0)
6999 sdeb_first_idx = n_idx;
7000 xa_unlock_irqrestore(per_store_ap, iflags);
7001
7002 res = -ENOMEM;
7003 sip->storep = vzalloc(sz);
7004 if (!sip->storep) {
7005 pr_err("user data oom\n");
7006 goto err;
7007 }
7008 if (sdebug_num_parts > 0)
7009 sdebug_build_parts(sip->storep, sz);
7010
7011 /* DIF/DIX: what T10 calls Protection Information (PI) */
7012 if (sdebug_dix) {
7013 int dif_size;
7014
7015 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7016 sip->dif_storep = vmalloc(dif_size);
7017
7018 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7019 sip->dif_storep);
7020
7021 if (!sip->dif_storep) {
7022 pr_err("DIX oom\n");
7023 goto err;
7024 }
7025 memset(sip->dif_storep, 0xff, dif_size);
7026 }
7027 /* Logical Block Provisioning */
7028 if (scsi_debug_lbp()) {
7029 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7030 sip->map_storep = vmalloc(array_size(sizeof(long),
7031 BITS_TO_LONGS(map_size)));
7032
7033 pr_info("%lu provisioning blocks\n", map_size);
7034
7035 if (!sip->map_storep) {
7036 pr_err("LBP map oom\n");
7037 goto err;
7038 }
7039
7040 bitmap_zero(sip->map_storep, map_size);
7041
7042 /* Map first 1KB for partition table */
7043 if (sdebug_num_parts)
7044 map_region(sip, 0, 2);
7045 }
7046
7047 rwlock_init(&sip->macc_lck);
7048 return (int)n_idx;
7049 err:
7050 sdebug_erase_store((int)n_idx, sip);
7051 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7052 return res;
7053 }
7054
sdebug_add_host_helper(int per_host_idx)7055 static int sdebug_add_host_helper(int per_host_idx)
7056 {
7057 int k, devs_per_host, idx;
7058 int error = -ENOMEM;
7059 struct sdebug_host_info *sdbg_host;
7060 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7061
7062 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7063 if (!sdbg_host)
7064 return -ENOMEM;
7065 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7066 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7067 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7068 sdbg_host->si_idx = idx;
7069
7070 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7071
7072 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7073 for (k = 0; k < devs_per_host; k++) {
7074 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7075 if (!sdbg_devinfo)
7076 goto clean;
7077 }
7078
7079 spin_lock(&sdebug_host_list_lock);
7080 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7081 spin_unlock(&sdebug_host_list_lock);
7082
7083 sdbg_host->dev.bus = &pseudo_lld_bus;
7084 sdbg_host->dev.parent = pseudo_primary;
7085 sdbg_host->dev.release = &sdebug_release_adapter;
7086 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7087
7088 error = device_register(&sdbg_host->dev);
7089 if (error) {
7090 spin_lock(&sdebug_host_list_lock);
7091 list_del(&sdbg_host->host_list);
7092 spin_unlock(&sdebug_host_list_lock);
7093 goto clean;
7094 }
7095
7096 ++sdebug_num_hosts;
7097 return 0;
7098
7099 clean:
7100 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7101 dev_list) {
7102 list_del(&sdbg_devinfo->dev_list);
7103 kfree(sdbg_devinfo->zstate);
7104 kfree(sdbg_devinfo);
7105 }
7106 if (sdbg_host->dev.release)
7107 put_device(&sdbg_host->dev);
7108 else
7109 kfree(sdbg_host);
7110 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7111 return error;
7112 }
7113
sdebug_do_add_host(bool mk_new_store)7114 static int sdebug_do_add_host(bool mk_new_store)
7115 {
7116 int ph_idx = sdeb_most_recent_idx;
7117
7118 if (mk_new_store) {
7119 ph_idx = sdebug_add_store();
7120 if (ph_idx < 0)
7121 return ph_idx;
7122 }
7123 return sdebug_add_host_helper(ph_idx);
7124 }
7125
sdebug_do_remove_host(bool the_end)7126 static void sdebug_do_remove_host(bool the_end)
7127 {
7128 int idx = -1;
7129 struct sdebug_host_info *sdbg_host = NULL;
7130 struct sdebug_host_info *sdbg_host2;
7131
7132 spin_lock(&sdebug_host_list_lock);
7133 if (!list_empty(&sdebug_host_list)) {
7134 sdbg_host = list_entry(sdebug_host_list.prev,
7135 struct sdebug_host_info, host_list);
7136 idx = sdbg_host->si_idx;
7137 }
7138 if (!the_end && idx >= 0) {
7139 bool unique = true;
7140
7141 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7142 if (sdbg_host2 == sdbg_host)
7143 continue;
7144 if (idx == sdbg_host2->si_idx) {
7145 unique = false;
7146 break;
7147 }
7148 }
7149 if (unique) {
7150 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7151 if (idx == sdeb_most_recent_idx)
7152 --sdeb_most_recent_idx;
7153 }
7154 }
7155 if (sdbg_host)
7156 list_del(&sdbg_host->host_list);
7157 spin_unlock(&sdebug_host_list_lock);
7158
7159 if (!sdbg_host)
7160 return;
7161
7162 device_unregister(&sdbg_host->dev);
7163 --sdebug_num_hosts;
7164 }
7165
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)7166 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7167 {
7168 int num_in_q = 0;
7169 struct sdebug_dev_info *devip;
7170
7171 block_unblock_all_queues(true);
7172 devip = (struct sdebug_dev_info *)sdev->hostdata;
7173 if (NULL == devip) {
7174 block_unblock_all_queues(false);
7175 return -ENODEV;
7176 }
7177 num_in_q = atomic_read(&devip->num_in_q);
7178
7179 if (qdepth < 1)
7180 qdepth = 1;
7181 /* allow to exceed max host qc_arr elements for testing */
7182 if (qdepth > SDEBUG_CANQUEUE + 10)
7183 qdepth = SDEBUG_CANQUEUE + 10;
7184 scsi_change_queue_depth(sdev, qdepth);
7185
7186 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7187 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7188 __func__, qdepth, num_in_q);
7189 }
7190 block_unblock_all_queues(false);
7191 return sdev->queue_depth;
7192 }
7193
fake_timeout(struct scsi_cmnd * scp)7194 static bool fake_timeout(struct scsi_cmnd *scp)
7195 {
7196 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7197 if (sdebug_every_nth < -1)
7198 sdebug_every_nth = -1;
7199 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7200 return true; /* ignore command causing timeout */
7201 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7202 scsi_medium_access_command(scp))
7203 return true; /* time out reads and writes */
7204 }
7205 return false;
7206 }
7207
7208 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)7209 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7210 {
7211 int stopped_state;
7212 u64 diff_ns = 0;
7213 ktime_t now_ts = ktime_get_boottime();
7214 struct scsi_device *sdp = scp->device;
7215
7216 stopped_state = atomic_read(&devip->stopped);
7217 if (stopped_state == 2) {
7218 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7219 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7220 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7221 /* tur_ms_to_ready timer extinguished */
7222 atomic_set(&devip->stopped, 0);
7223 return 0;
7224 }
7225 }
7226 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7227 if (sdebug_verbose)
7228 sdev_printk(KERN_INFO, sdp,
7229 "%s: Not ready: in process of becoming ready\n", my_name);
7230 if (scp->cmnd[0] == TEST_UNIT_READY) {
7231 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7232
7233 if (diff_ns <= tur_nanosecs_to_ready)
7234 diff_ns = tur_nanosecs_to_ready - diff_ns;
7235 else
7236 diff_ns = tur_nanosecs_to_ready;
7237 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7238 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7239 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7240 diff_ns);
7241 return check_condition_result;
7242 }
7243 }
7244 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7245 if (sdebug_verbose)
7246 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7247 my_name);
7248 return check_condition_result;
7249 }
7250
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)7251 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7252 struct scsi_cmnd *scp)
7253 {
7254 u8 sdeb_i;
7255 struct scsi_device *sdp = scp->device;
7256 const struct opcode_info_t *oip;
7257 const struct opcode_info_t *r_oip;
7258 struct sdebug_dev_info *devip;
7259 u8 *cmd = scp->cmnd;
7260 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7261 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7262 int k, na;
7263 int errsts = 0;
7264 u64 lun_index = sdp->lun & 0x3FFF;
7265 u32 flags;
7266 u16 sa;
7267 u8 opcode = cmd[0];
7268 bool has_wlun_rl;
7269 bool inject_now;
7270
7271 scsi_set_resid(scp, 0);
7272 if (sdebug_statistics) {
7273 atomic_inc(&sdebug_cmnd_count);
7274 inject_now = inject_on_this_cmd();
7275 } else {
7276 inject_now = false;
7277 }
7278 if (unlikely(sdebug_verbose &&
7279 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7280 char b[120];
7281 int n, len, sb;
7282
7283 len = scp->cmd_len;
7284 sb = (int)sizeof(b);
7285 if (len > 32)
7286 strcpy(b, "too long, over 32 bytes");
7287 else {
7288 for (k = 0, n = 0; k < len && n < sb; ++k)
7289 n += scnprintf(b + n, sb - n, "%02x ",
7290 (u32)cmd[k]);
7291 }
7292 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7293 blk_mq_unique_tag(scp->request), b);
7294 }
7295 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7296 return SCSI_MLQUEUE_HOST_BUSY;
7297 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7298 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7299 goto err_out;
7300
7301 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7302 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7303 devip = (struct sdebug_dev_info *)sdp->hostdata;
7304 if (unlikely(!devip)) {
7305 devip = find_build_dev_info(sdp);
7306 if (NULL == devip)
7307 goto err_out;
7308 }
7309 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7310 atomic_set(&sdeb_inject_pending, 1);
7311
7312 na = oip->num_attached;
7313 r_pfp = oip->pfp;
7314 if (na) { /* multiple commands with this opcode */
7315 r_oip = oip;
7316 if (FF_SA & r_oip->flags) {
7317 if (F_SA_LOW & oip->flags)
7318 sa = 0x1f & cmd[1];
7319 else
7320 sa = get_unaligned_be16(cmd + 8);
7321 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7322 if (opcode == oip->opcode && sa == oip->sa)
7323 break;
7324 }
7325 } else { /* since no service action only check opcode */
7326 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7327 if (opcode == oip->opcode)
7328 break;
7329 }
7330 }
7331 if (k > na) {
7332 if (F_SA_LOW & r_oip->flags)
7333 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7334 else if (F_SA_HIGH & r_oip->flags)
7335 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7336 else
7337 mk_sense_invalid_opcode(scp);
7338 goto check_cond;
7339 }
7340 } /* else (when na==0) we assume the oip is a match */
7341 flags = oip->flags;
7342 if (unlikely(F_INV_OP & flags)) {
7343 mk_sense_invalid_opcode(scp);
7344 goto check_cond;
7345 }
7346 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7347 if (sdebug_verbose)
7348 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7349 my_name, opcode, " supported for wlun");
7350 mk_sense_invalid_opcode(scp);
7351 goto check_cond;
7352 }
7353 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7354 u8 rem;
7355 int j;
7356
7357 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7358 rem = ~oip->len_mask[k] & cmd[k];
7359 if (rem) {
7360 for (j = 7; j >= 0; --j, rem <<= 1) {
7361 if (0x80 & rem)
7362 break;
7363 }
7364 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7365 goto check_cond;
7366 }
7367 }
7368 }
7369 if (unlikely(!(F_SKIP_UA & flags) &&
7370 find_first_bit(devip->uas_bm,
7371 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7372 errsts = make_ua(scp, devip);
7373 if (errsts)
7374 goto check_cond;
7375 }
7376 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7377 atomic_read(&devip->stopped))) {
7378 errsts = resp_not_ready(scp, devip);
7379 if (errsts)
7380 goto fini;
7381 }
7382 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7383 goto fini;
7384 if (unlikely(sdebug_every_nth)) {
7385 if (fake_timeout(scp))
7386 return 0; /* ignore command: make trouble */
7387 }
7388 if (likely(oip->pfp))
7389 pfp = oip->pfp; /* calls a resp_* function */
7390 else
7391 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7392
7393 fini:
7394 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7395 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7396 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7397 sdebug_ndelay > 10000)) {
7398 /*
7399 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7400 * for Start Stop Unit (SSU) want at least 1 second delay and
7401 * if sdebug_jdelay>1 want a long delay of that many seconds.
7402 * For Synchronize Cache want 1/20 of SSU's delay.
7403 */
7404 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7405 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7406
7407 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7408 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7409 } else
7410 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7411 sdebug_ndelay);
7412 check_cond:
7413 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7414 err_out:
7415 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7416 }
7417
7418 static struct scsi_host_template sdebug_driver_template = {
7419 .show_info = scsi_debug_show_info,
7420 .write_info = scsi_debug_write_info,
7421 .proc_name = sdebug_proc_name,
7422 .name = "SCSI DEBUG",
7423 .info = scsi_debug_info,
7424 .slave_alloc = scsi_debug_slave_alloc,
7425 .slave_configure = scsi_debug_slave_configure,
7426 .slave_destroy = scsi_debug_slave_destroy,
7427 .ioctl = scsi_debug_ioctl,
7428 .queuecommand = scsi_debug_queuecommand,
7429 .change_queue_depth = sdebug_change_qdepth,
7430 .eh_abort_handler = scsi_debug_abort,
7431 .eh_device_reset_handler = scsi_debug_device_reset,
7432 .eh_target_reset_handler = scsi_debug_target_reset,
7433 .eh_bus_reset_handler = scsi_debug_bus_reset,
7434 .eh_host_reset_handler = scsi_debug_host_reset,
7435 .can_queue = SDEBUG_CANQUEUE,
7436 .this_id = 7,
7437 .sg_tablesize = SG_MAX_SEGMENTS,
7438 .cmd_per_lun = DEF_CMD_PER_LUN,
7439 .max_sectors = -1U,
7440 .max_segment_size = -1U,
7441 .module = THIS_MODULE,
7442 .track_queue_depth = 1,
7443 };
7444
sdebug_driver_probe(struct device * dev)7445 static int sdebug_driver_probe(struct device *dev)
7446 {
7447 int error = 0;
7448 struct sdebug_host_info *sdbg_host;
7449 struct Scsi_Host *hpnt;
7450 int hprot;
7451
7452 sdbg_host = to_sdebug_host(dev);
7453
7454 sdebug_driver_template.can_queue = sdebug_max_queue;
7455 if (!sdebug_clustering)
7456 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7457
7458 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7459 if (NULL == hpnt) {
7460 pr_err("scsi_host_alloc failed\n");
7461 error = -ENODEV;
7462 return error;
7463 }
7464 if (submit_queues > nr_cpu_ids) {
7465 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7466 my_name, submit_queues, nr_cpu_ids);
7467 submit_queues = nr_cpu_ids;
7468 }
7469 /*
7470 * Decide whether to tell scsi subsystem that we want mq. The
7471 * following should give the same answer for each host.
7472 */
7473 hpnt->nr_hw_queues = submit_queues;
7474 if (sdebug_host_max_queue)
7475 hpnt->host_tagset = 1;
7476
7477 sdbg_host->shost = hpnt;
7478 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7479 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7480 hpnt->max_id = sdebug_num_tgts + 1;
7481 else
7482 hpnt->max_id = sdebug_num_tgts;
7483 /* = sdebug_max_luns; */
7484 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7485
7486 hprot = 0;
7487
7488 switch (sdebug_dif) {
7489
7490 case T10_PI_TYPE1_PROTECTION:
7491 hprot = SHOST_DIF_TYPE1_PROTECTION;
7492 if (sdebug_dix)
7493 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7494 break;
7495
7496 case T10_PI_TYPE2_PROTECTION:
7497 hprot = SHOST_DIF_TYPE2_PROTECTION;
7498 if (sdebug_dix)
7499 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7500 break;
7501
7502 case T10_PI_TYPE3_PROTECTION:
7503 hprot = SHOST_DIF_TYPE3_PROTECTION;
7504 if (sdebug_dix)
7505 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7506 break;
7507
7508 default:
7509 if (sdebug_dix)
7510 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7511 break;
7512 }
7513
7514 scsi_host_set_prot(hpnt, hprot);
7515
7516 if (have_dif_prot || sdebug_dix)
7517 pr_info("host protection%s%s%s%s%s%s%s\n",
7518 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7519 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7520 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7521 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7522 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7523 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7524 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7525
7526 if (sdebug_guard == 1)
7527 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7528 else
7529 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7530
7531 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7532 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7533 if (sdebug_every_nth) /* need stats counters for every_nth */
7534 sdebug_statistics = true;
7535 error = scsi_add_host(hpnt, &sdbg_host->dev);
7536 if (error) {
7537 pr_err("scsi_add_host failed\n");
7538 error = -ENODEV;
7539 scsi_host_put(hpnt);
7540 } else {
7541 scsi_scan_host(hpnt);
7542 }
7543
7544 return error;
7545 }
7546
sdebug_driver_remove(struct device * dev)7547 static int sdebug_driver_remove(struct device *dev)
7548 {
7549 struct sdebug_host_info *sdbg_host;
7550 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7551
7552 sdbg_host = to_sdebug_host(dev);
7553
7554 if (!sdbg_host) {
7555 pr_err("Unable to locate host info\n");
7556 return -ENODEV;
7557 }
7558
7559 scsi_remove_host(sdbg_host->shost);
7560
7561 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7562 dev_list) {
7563 list_del(&sdbg_devinfo->dev_list);
7564 kfree(sdbg_devinfo->zstate);
7565 kfree(sdbg_devinfo);
7566 }
7567
7568 scsi_host_put(sdbg_host->shost);
7569 return 0;
7570 }
7571
pseudo_lld_bus_match(struct device * dev,struct device_driver * dev_driver)7572 static int pseudo_lld_bus_match(struct device *dev,
7573 struct device_driver *dev_driver)
7574 {
7575 return 1;
7576 }
7577
7578 static struct bus_type pseudo_lld_bus = {
7579 .name = "pseudo",
7580 .match = pseudo_lld_bus_match,
7581 .probe = sdebug_driver_probe,
7582 .remove = sdebug_driver_remove,
7583 .drv_groups = sdebug_drv_groups,
7584 };
7585