1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
11 *
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
13 */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58
59 #include "sd.h"
60 #include "scsi_logging.h"
61
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65
66 #define MY_NAME "scsi_debug"
67
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112 */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
135 #define DEF_OPTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
162
163 #define SDEBUG_LUN_0_VAL 0
164
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
211
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
218 */
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN 255
222
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
227 #define F_D_UNKN 8
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
238
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
244
245 #define SDEBUG_MAX_PARTS 4
246
247 #define SDEBUG_MAX_CMD_LEN 32
248
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
256 };
257
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260 ZBC_NOT_WRITE_POINTER = 0x0,
261 ZC1_EMPTY = 0x1,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
264 ZC4_CLOSED = 0x4,
265 ZC6_READ_ONLY = 0xd,
266 ZC5_FULL = 0xe,
267 ZC7_OFFLINE = 0xf,
268 };
269
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
274 unsigned int z_size;
275 sector_t z_start;
276 sector_t z_wp;
277 };
278
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
282 unsigned int target;
283 u64 lun;
284 uuid_t lu_name;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
287 atomic_t num_in_q;
288 atomic_t stopped; /* 1: by SSU, 2: device start */
289 bool used;
290
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
293 unsigned int zsize;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
303 };
304
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
309 struct device dev;
310 struct list_head dev_info_list;
311 };
312
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
319 };
320
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
323
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2};
326
327 struct sdebug_defer {
328 struct hrtimer hrt;
329 struct execute_work ew;
330 int sqa_idx; /* index of sdebug_queue array */
331 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
332 int hc_idx; /* hostwide tag index */
333 int issuing_cpu;
334 bool init_hrt;
335 bool init_wq;
336 bool aborted; /* true when blk_abort_request() already called */
337 enum sdeb_defer_type defer_t;
338 };
339
340 struct sdebug_queued_cmd {
341 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342 * instance indicates this slot is in use.
343 */
344 struct sdebug_defer *sd_dp;
345 struct scsi_cmnd *a_cmnd;
346 };
347
348 struct sdebug_queue {
349 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
350 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
351 spinlock_t qc_lock;
352 atomic_t blocked; /* to temporarily stop more being queued */
353 };
354
355 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
356 static atomic_t sdebug_completions; /* count of deferred completions */
357 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
358 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
359 static atomic_t sdeb_inject_pending;
360
361 struct opcode_info_t {
362 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
363 /* for terminating element */
364 u8 opcode; /* if num_attached > 0, preferred */
365 u16 sa; /* service action */
366 u32 flags; /* OR-ed set of SDEB_F_* */
367 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
368 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
369 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
370 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
371 };
372
373 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
374 enum sdeb_opcode_index {
375 SDEB_I_INVALID_OPCODE = 0,
376 SDEB_I_INQUIRY = 1,
377 SDEB_I_REPORT_LUNS = 2,
378 SDEB_I_REQUEST_SENSE = 3,
379 SDEB_I_TEST_UNIT_READY = 4,
380 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
381 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
382 SDEB_I_LOG_SENSE = 7,
383 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
384 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
385 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
386 SDEB_I_START_STOP = 11,
387 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
388 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
389 SDEB_I_MAINT_IN = 14,
390 SDEB_I_MAINT_OUT = 15,
391 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
392 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
393 SDEB_I_RESERVE = 18, /* 6, 10 */
394 SDEB_I_RELEASE = 19, /* 6, 10 */
395 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
396 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
397 SDEB_I_ATA_PT = 22, /* 12, 16 */
398 SDEB_I_SEND_DIAG = 23,
399 SDEB_I_UNMAP = 24,
400 SDEB_I_WRITE_BUFFER = 25,
401 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
402 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
403 SDEB_I_COMP_WRITE = 28,
404 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
405 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
406 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
407 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
408 };
409
410
411 static const unsigned char opcode_ind_arr[256] = {
412 /* 0x0; 0x0->0x1f: 6 byte cdbs */
413 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
414 0, 0, 0, 0,
415 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
416 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
417 SDEB_I_RELEASE,
418 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
419 SDEB_I_ALLOW_REMOVAL, 0,
420 /* 0x20; 0x20->0x3f: 10 byte cdbs */
421 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
422 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
423 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
424 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
425 /* 0x40; 0x40->0x5f: 10 byte cdbs */
426 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
427 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
428 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
429 SDEB_I_RELEASE,
430 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
431 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
432 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
433 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
434 0, SDEB_I_VARIABLE_LEN,
435 /* 0x80; 0x80->0x9f: 16 byte cdbs */
436 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
437 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
438 0, 0, 0, SDEB_I_VERIFY,
439 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
440 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
441 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
442 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
443 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
444 SDEB_I_MAINT_OUT, 0, 0, 0,
445 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
446 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
447 0, 0, 0, 0, 0, 0, 0, 0,
448 0, 0, 0, 0, 0, 0, 0, 0,
449 /* 0xc0; 0xc0->0xff: vendor specific */
450 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 };
455
456 /*
457 * The following "response" functions return the SCSI mid-level's 4 byte
458 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
459 * command completion, they can mask their return value with
460 * SDEG_RES_IMMED_MASK .
461 */
462 #define SDEG_RES_IMMED_MASK 0x40000000
463
464 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
465 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
466 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493
494 static int sdebug_do_add_host(bool mk_new_store);
495 static int sdebug_add_host_helper(int per_host_idx);
496 static void sdebug_do_remove_host(bool the_end);
497 static int sdebug_add_store(void);
498 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
499 static void sdebug_erase_all_stores(bool apart_from_first);
500
501 /*
502 * The following are overflow arrays for cdbs that "hit" the same index in
503 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
504 * should be placed in opcode_info_arr[], the others should be placed here.
505 */
506 static const struct opcode_info_t msense_iarr[] = {
507 {0, 0x1a, 0, F_D_IN, NULL, NULL,
508 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510
511 static const struct opcode_info_t mselect_iarr[] = {
512 {0, 0x15, 0, F_D_OUT, NULL, NULL,
513 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 };
515
516 static const struct opcode_info_t read_iarr[] = {
517 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
518 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
519 0, 0, 0, 0} },
520 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
521 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
523 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
524 0xc7, 0, 0, 0, 0} },
525 };
526
527 static const struct opcode_info_t write_iarr[] = {
528 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
529 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
530 0, 0, 0, 0, 0, 0} },
531 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
532 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
533 0, 0, 0} },
534 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
535 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
536 0xbf, 0xc7, 0, 0, 0, 0} },
537 };
538
539 static const struct opcode_info_t verify_iarr[] = {
540 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
541 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
542 0, 0, 0, 0, 0, 0} },
543 };
544
545 static const struct opcode_info_t sa_in_16_iarr[] = {
546 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
547 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
549 };
550
551 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
552 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
553 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
554 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
555 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
557 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
558 };
559
560 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
561 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
562 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
563 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
564 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
565 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
566 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
567 };
568
569 static const struct opcode_info_t write_same_iarr[] = {
570 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
571 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
572 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
573 };
574
575 static const struct opcode_info_t reserve_iarr[] = {
576 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
577 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
578 };
579
580 static const struct opcode_info_t release_iarr[] = {
581 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
582 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 };
584
585 static const struct opcode_info_t sync_cache_iarr[] = {
586 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
587 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
588 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
589 };
590
591 static const struct opcode_info_t pre_fetch_iarr[] = {
592 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
593 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
595 };
596
597 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
598 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
599 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
601 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
604 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
605 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
607 };
608
609 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
610 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
611 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
613 };
614
615
616 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
617 * plus the terminating elements for logic that scans this table such as
618 * REPORT SUPPORTED OPERATION CODES. */
619 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
620 /* 0 */
621 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
622 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
623 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
624 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
626 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627 0, 0} }, /* REPORT LUNS */
628 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
629 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
631 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 /* 5 */
633 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
634 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
635 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
637 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
638 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
640 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
641 0, 0, 0} },
642 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
643 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
644 0, 0} },
645 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
646 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
647 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
648 /* 10 */
649 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
650 resp_write_dt0, write_iarr, /* WRITE(16) */
651 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
652 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
654 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
656 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
657 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
659 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
660 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
662 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
663 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
664 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
665 0xff, 0, 0xc7, 0, 0, 0, 0} },
666 /* 15 */
667 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
668 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
669 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
670 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
671 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
673 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
674 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
675 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
676 0xff, 0xff} },
677 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
678 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
679 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
680 0} },
681 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
682 NULL, release_iarr, /* RELEASE(10) <no response function> */
683 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 0} },
685 /* 20 */
686 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
687 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
688 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
689 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
691 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
693 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
695 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 /* 25 */
697 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
698 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
699 0, 0, 0, 0} }, /* WRITE_BUFFER */
700 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
701 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
702 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
703 0, 0, 0, 0, 0} },
704 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
705 resp_sync_cache, sync_cache_iarr,
706 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
707 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
708 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
709 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
710 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
711 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
712 resp_pre_fetch, pre_fetch_iarr,
713 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
714 0, 0, 0, 0} }, /* PRE-FETCH (10) */
715
716 /* 30 */
717 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
718 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
719 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
720 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
721 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
722 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
723 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
725 /* sentinel */
726 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
727 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
728 };
729
730 static int sdebug_num_hosts;
731 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
732 static int sdebug_ato = DEF_ATO;
733 static int sdebug_cdb_len = DEF_CDB_LEN;
734 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
735 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
736 static int sdebug_dif = DEF_DIF;
737 static int sdebug_dix = DEF_DIX;
738 static int sdebug_dsense = DEF_D_SENSE;
739 static int sdebug_every_nth = DEF_EVERY_NTH;
740 static int sdebug_fake_rw = DEF_FAKE_RW;
741 static unsigned int sdebug_guard = DEF_GUARD;
742 static int sdebug_host_max_queue; /* per host */
743 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
744 static int sdebug_max_luns = DEF_MAX_LUNS;
745 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
746 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
747 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
748 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
749 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
750 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
751 static int sdebug_no_uld;
752 static int sdebug_num_parts = DEF_NUM_PARTS;
753 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
754 static int sdebug_opt_blks = DEF_OPT_BLKS;
755 static int sdebug_opts = DEF_OPTS;
756 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
757 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
758 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
759 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
760 static int sdebug_sector_size = DEF_SECTOR_SIZE;
761 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
762 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
763 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
764 static unsigned int sdebug_lbpu = DEF_LBPU;
765 static unsigned int sdebug_lbpws = DEF_LBPWS;
766 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
767 static unsigned int sdebug_lbprz = DEF_LBPRZ;
768 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
769 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
770 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
771 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
772 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
773 static int sdebug_uuid_ctl = DEF_UUID_CTL;
774 static bool sdebug_random = DEF_RANDOM;
775 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
776 static bool sdebug_removable = DEF_REMOVABLE;
777 static bool sdebug_clustering;
778 static bool sdebug_host_lock = DEF_HOST_LOCK;
779 static bool sdebug_strict = DEF_STRICT;
780 static bool sdebug_any_injecting_opt;
781 static bool sdebug_verbose;
782 static bool have_dif_prot;
783 static bool write_since_sync;
784 static bool sdebug_statistics = DEF_STATISTICS;
785 static bool sdebug_wp;
786 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
787 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
788 static char *sdeb_zbc_model_s;
789
790 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
791 SAM_LUN_AM_FLAT = 0x1,
792 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
793 SAM_LUN_AM_EXTENDED = 0x3};
794 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
795 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
796
797 static unsigned int sdebug_store_sectors;
798 static sector_t sdebug_capacity; /* in sectors */
799
800 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
801 may still need them */
802 static int sdebug_heads; /* heads per disk */
803 static int sdebug_cylinders_per; /* cylinders per surface */
804 static int sdebug_sectors_per; /* sectors per cylinder */
805
806 static LIST_HEAD(sdebug_host_list);
807 static DEFINE_SPINLOCK(sdebug_host_list_lock);
808
809 static struct xarray per_store_arr;
810 static struct xarray *per_store_ap = &per_store_arr;
811 static int sdeb_first_idx = -1; /* invalid index ==> none created */
812 static int sdeb_most_recent_idx = -1;
813 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
814
815 static unsigned long map_size;
816 static int num_aborts;
817 static int num_dev_resets;
818 static int num_target_resets;
819 static int num_bus_resets;
820 static int num_host_resets;
821 static int dix_writes;
822 static int dix_reads;
823 static int dif_errors;
824
825 /* ZBC global data */
826 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
827 static int sdeb_zbc_zone_size_mb;
828 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
829 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
830
831 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
832 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
833
834 static DEFINE_RWLOCK(atomic_rw);
835 static DEFINE_RWLOCK(atomic_rw2);
836
837 static rwlock_t *ramdisk_lck_a[2];
838
839 static char sdebug_proc_name[] = MY_NAME;
840 static const char *my_name = MY_NAME;
841
842 static struct bus_type pseudo_lld_bus;
843
844 static struct device_driver sdebug_driverfs_driver = {
845 .name = sdebug_proc_name,
846 .bus = &pseudo_lld_bus,
847 };
848
849 static const int check_condition_result =
850 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
851
852 static const int illegal_condition_result =
853 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
854
855 static const int device_qfull_result =
856 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
857
858 static const int condition_met_result = SAM_STAT_CONDITION_MET;
859
860
861 /* Only do the extra work involved in logical block provisioning if one or
862 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
863 * real reads and writes (i.e. not skipping them for speed).
864 */
scsi_debug_lbp(void)865 static inline bool scsi_debug_lbp(void)
866 {
867 return 0 == sdebug_fake_rw &&
868 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
869 }
870
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)871 static void *lba2fake_store(struct sdeb_store_info *sip,
872 unsigned long long lba)
873 {
874 struct sdeb_store_info *lsip = sip;
875
876 lba = do_div(lba, sdebug_store_sectors);
877 if (!sip || !sip->storep) {
878 WARN_ON_ONCE(true);
879 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
880 }
881 return lsip->storep + lba * sdebug_sector_size;
882 }
883
dif_store(struct sdeb_store_info * sip,sector_t sector)884 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
885 sector_t sector)
886 {
887 sector = sector_div(sector, sdebug_store_sectors);
888
889 return sip->dif_storep + sector;
890 }
891
sdebug_max_tgts_luns(void)892 static void sdebug_max_tgts_luns(void)
893 {
894 struct sdebug_host_info *sdbg_host;
895 struct Scsi_Host *hpnt;
896
897 spin_lock(&sdebug_host_list_lock);
898 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899 hpnt = sdbg_host->shost;
900 if ((hpnt->this_id >= 0) &&
901 (sdebug_num_tgts > hpnt->this_id))
902 hpnt->max_id = sdebug_num_tgts + 1;
903 else
904 hpnt->max_id = sdebug_num_tgts;
905 /* sdebug_max_luns; */
906 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
907 }
908 spin_unlock(&sdebug_host_list_lock);
909 }
910
911 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
912
913 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)914 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
915 enum sdeb_cmd_data c_d,
916 int in_byte, int in_bit)
917 {
918 unsigned char *sbuff;
919 u8 sks[4];
920 int sl, asc;
921
922 sbuff = scp->sense_buffer;
923 if (!sbuff) {
924 sdev_printk(KERN_ERR, scp->device,
925 "%s: sense_buffer is NULL\n", __func__);
926 return;
927 }
928 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
929 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
930 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
931 memset(sks, 0, sizeof(sks));
932 sks[0] = 0x80;
933 if (c_d)
934 sks[0] |= 0x40;
935 if (in_bit >= 0) {
936 sks[0] |= 0x8;
937 sks[0] |= 0x7 & in_bit;
938 }
939 put_unaligned_be16(in_byte, sks + 1);
940 if (sdebug_dsense) {
941 sl = sbuff[7] + 8;
942 sbuff[7] = sl;
943 sbuff[sl] = 0x2;
944 sbuff[sl + 1] = 0x6;
945 memcpy(sbuff + sl + 4, sks, 3);
946 } else
947 memcpy(sbuff + 15, sks, 3);
948 if (sdebug_verbose)
949 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
950 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
951 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
952 }
953
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)954 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
955 {
956 unsigned char *sbuff;
957
958 sbuff = scp->sense_buffer;
959 if (!sbuff) {
960 sdev_printk(KERN_ERR, scp->device,
961 "%s: sense_buffer is NULL\n", __func__);
962 return;
963 }
964 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
965
966 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
967
968 if (sdebug_verbose)
969 sdev_printk(KERN_INFO, scp->device,
970 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971 my_name, key, asc, asq);
972 }
973
mk_sense_invalid_opcode(struct scsi_cmnd * scp)974 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
975 {
976 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
977 }
978
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)979 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
980 void __user *arg)
981 {
982 if (sdebug_verbose) {
983 if (0x1261 == cmd)
984 sdev_printk(KERN_INFO, dev,
985 "%s: BLKFLSBUF [0x1261]\n", __func__);
986 else if (0x5331 == cmd)
987 sdev_printk(KERN_INFO, dev,
988 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
989 __func__);
990 else
991 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
992 __func__, cmd);
993 }
994 return -EINVAL;
995 /* return -ENOTTY; // correct return but upsets fdisk */
996 }
997
config_cdb_len(struct scsi_device * sdev)998 static void config_cdb_len(struct scsi_device *sdev)
999 {
1000 switch (sdebug_cdb_len) {
1001 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002 sdev->use_10_for_rw = false;
1003 sdev->use_16_for_rw = false;
1004 sdev->use_10_for_ms = false;
1005 break;
1006 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007 sdev->use_10_for_rw = true;
1008 sdev->use_16_for_rw = false;
1009 sdev->use_10_for_ms = false;
1010 break;
1011 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012 sdev->use_10_for_rw = true;
1013 sdev->use_16_for_rw = false;
1014 sdev->use_10_for_ms = true;
1015 break;
1016 case 16:
1017 sdev->use_10_for_rw = false;
1018 sdev->use_16_for_rw = true;
1019 sdev->use_10_for_ms = true;
1020 break;
1021 case 32: /* No knobs to suggest this so same as 16 for now */
1022 sdev->use_10_for_rw = false;
1023 sdev->use_16_for_rw = true;
1024 sdev->use_10_for_ms = true;
1025 break;
1026 default:
1027 pr_warn("unexpected cdb_len=%d, force to 10\n",
1028 sdebug_cdb_len);
1029 sdev->use_10_for_rw = true;
1030 sdev->use_16_for_rw = false;
1031 sdev->use_10_for_ms = false;
1032 sdebug_cdb_len = 10;
1033 break;
1034 }
1035 }
1036
all_config_cdb_len(void)1037 static void all_config_cdb_len(void)
1038 {
1039 struct sdebug_host_info *sdbg_host;
1040 struct Scsi_Host *shost;
1041 struct scsi_device *sdev;
1042
1043 spin_lock(&sdebug_host_list_lock);
1044 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1045 shost = sdbg_host->shost;
1046 shost_for_each_device(sdev, shost) {
1047 config_cdb_len(sdev);
1048 }
1049 }
1050 spin_unlock(&sdebug_host_list_lock);
1051 }
1052
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1053 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1054 {
1055 struct sdebug_host_info *sdhp;
1056 struct sdebug_dev_info *dp;
1057
1058 spin_lock(&sdebug_host_list_lock);
1059 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1060 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 if ((devip->sdbg_host == dp->sdbg_host) &&
1062 (devip->target == dp->target))
1063 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064 }
1065 }
1066 spin_unlock(&sdebug_host_list_lock);
1067 }
1068
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1069 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1070 {
1071 int k;
1072
1073 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1074 if (k != SDEBUG_NUM_UAS) {
1075 const char *cp = NULL;
1076
1077 switch (k) {
1078 case SDEBUG_UA_POR:
1079 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 POWER_ON_RESET_ASCQ);
1081 if (sdebug_verbose)
1082 cp = "power on reset";
1083 break;
1084 case SDEBUG_UA_BUS_RESET:
1085 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1086 BUS_RESET_ASCQ);
1087 if (sdebug_verbose)
1088 cp = "bus reset";
1089 break;
1090 case SDEBUG_UA_MODE_CHANGED:
1091 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092 MODE_CHANGED_ASCQ);
1093 if (sdebug_verbose)
1094 cp = "mode parameters changed";
1095 break;
1096 case SDEBUG_UA_CAPACITY_CHANGED:
1097 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098 CAPACITY_CHANGED_ASCQ);
1099 if (sdebug_verbose)
1100 cp = "capacity data changed";
1101 break;
1102 case SDEBUG_UA_MICROCODE_CHANGED:
1103 mk_sense_buffer(scp, UNIT_ATTENTION,
1104 TARGET_CHANGED_ASC,
1105 MICROCODE_CHANGED_ASCQ);
1106 if (sdebug_verbose)
1107 cp = "microcode has been changed";
1108 break;
1109 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1110 mk_sense_buffer(scp, UNIT_ATTENTION,
1111 TARGET_CHANGED_ASC,
1112 MICROCODE_CHANGED_WO_RESET_ASCQ);
1113 if (sdebug_verbose)
1114 cp = "microcode has been changed without reset";
1115 break;
1116 case SDEBUG_UA_LUNS_CHANGED:
1117 /*
1118 * SPC-3 behavior is to report a UNIT ATTENTION with
1119 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120 * on the target, until a REPORT LUNS command is
1121 * received. SPC-4 behavior is to report it only once.
1122 * NOTE: sdebug_scsi_level does not use the same
1123 * values as struct scsi_device->scsi_level.
1124 */
1125 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1126 clear_luns_changed_on_target(devip);
1127 mk_sense_buffer(scp, UNIT_ATTENTION,
1128 TARGET_CHANGED_ASC,
1129 LUNS_CHANGED_ASCQ);
1130 if (sdebug_verbose)
1131 cp = "reported luns data has changed";
1132 break;
1133 default:
1134 pr_warn("unexpected unit attention code=%d\n", k);
1135 if (sdebug_verbose)
1136 cp = "unknown";
1137 break;
1138 }
1139 clear_bit(k, devip->uas_bm);
1140 if (sdebug_verbose)
1141 sdev_printk(KERN_INFO, scp->device,
1142 "%s reports: Unit attention: %s\n",
1143 my_name, cp);
1144 return check_condition_result;
1145 }
1146 return 0;
1147 }
1148
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1150 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1151 int arr_len)
1152 {
1153 int act_len;
1154 struct scsi_data_buffer *sdb = &scp->sdb;
1155
1156 if (!sdb->length)
1157 return 0;
1158 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1159 return DID_ERROR << 16;
1160
1161 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1162 arr, arr_len);
1163 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1164
1165 return 0;
1166 }
1167
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170 * calls, not required to write in ascending offset order. Assumes resid
1171 * set to scsi_bufflen() prior to any calls.
1172 */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1173 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1174 int arr_len, unsigned int off_dst)
1175 {
1176 unsigned int act_len, n;
1177 struct scsi_data_buffer *sdb = &scp->sdb;
1178 off_t skip = off_dst;
1179
1180 if (sdb->length <= off_dst)
1181 return 0;
1182 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1183 return DID_ERROR << 16;
1184
1185 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1186 arr, arr_len, skip);
1187 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188 __func__, off_dst, scsi_bufflen(scp), act_len,
1189 scsi_get_resid(scp));
1190 n = scsi_bufflen(scp) - (off_dst + act_len);
1191 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1192 return 0;
1193 }
1194
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196 * 'arr' or -1 if error.
1197 */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1198 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1199 int arr_len)
1200 {
1201 if (!scsi_bufflen(scp))
1202 return 0;
1203 if (scp->sc_data_direction != DMA_TO_DEVICE)
1204 return -1;
1205
1206 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1207 }
1208
1209
1210 static char sdebug_inq_vendor_id[9] = "Linux ";
1211 static char sdebug_inq_product_id[17] = "scsi_debug ";
1212 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1215 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1216 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1217
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1219 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1220 int target_dev_id, int dev_id_num,
1221 const char *dev_id_str, int dev_id_str_len,
1222 const uuid_t *lu_name)
1223 {
1224 int num, port_a;
1225 char b[32];
1226
1227 port_a = target_dev_id + 1;
1228 /* T10 vendor identifier field format (faked) */
1229 arr[0] = 0x2; /* ASCII */
1230 arr[1] = 0x1;
1231 arr[2] = 0x0;
1232 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1233 memcpy(&arr[12], sdebug_inq_product_id, 16);
1234 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1235 num = 8 + 16 + dev_id_str_len;
1236 arr[3] = num;
1237 num += 4;
1238 if (dev_id_num >= 0) {
1239 if (sdebug_uuid_ctl) {
1240 /* Locally assigned UUID */
1241 arr[num++] = 0x1; /* binary (not necessarily sas) */
1242 arr[num++] = 0xa; /* PIV=0, lu, naa */
1243 arr[num++] = 0x0;
1244 arr[num++] = 0x12;
1245 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1246 arr[num++] = 0x0;
1247 memcpy(arr + num, lu_name, 16);
1248 num += 16;
1249 } else {
1250 /* NAA-3, Logical unit identifier (binary) */
1251 arr[num++] = 0x1; /* binary (not necessarily sas) */
1252 arr[num++] = 0x3; /* PIV=0, lu, naa */
1253 arr[num++] = 0x0;
1254 arr[num++] = 0x8;
1255 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1256 num += 8;
1257 }
1258 /* Target relative port number */
1259 arr[num++] = 0x61; /* proto=sas, binary */
1260 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1261 arr[num++] = 0x0; /* reserved */
1262 arr[num++] = 0x4; /* length */
1263 arr[num++] = 0x0; /* reserved */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0;
1266 arr[num++] = 0x1; /* relative port A */
1267 }
1268 /* NAA-3, Target port identifier */
1269 arr[num++] = 0x61; /* proto=sas, binary */
1270 arr[num++] = 0x93; /* piv=1, target port, naa */
1271 arr[num++] = 0x0;
1272 arr[num++] = 0x8;
1273 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1274 num += 8;
1275 /* NAA-3, Target port group identifier */
1276 arr[num++] = 0x61; /* proto=sas, binary */
1277 arr[num++] = 0x95; /* piv=1, target port group id */
1278 arr[num++] = 0x0;
1279 arr[num++] = 0x4;
1280 arr[num++] = 0;
1281 arr[num++] = 0;
1282 put_unaligned_be16(port_group_id, arr + num);
1283 num += 2;
1284 /* NAA-3, Target device identifier */
1285 arr[num++] = 0x61; /* proto=sas, binary */
1286 arr[num++] = 0xa3; /* piv=1, target device, naa */
1287 arr[num++] = 0x0;
1288 arr[num++] = 0x8;
1289 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1290 num += 8;
1291 /* SCSI name string: Target device identifier */
1292 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1293 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1294 arr[num++] = 0x0;
1295 arr[num++] = 24;
1296 memcpy(arr + num, "naa.32222220", 12);
1297 num += 12;
1298 snprintf(b, sizeof(b), "%08X", target_dev_id);
1299 memcpy(arr + num, b, 8);
1300 num += 8;
1301 memset(arr + num, 0, 4);
1302 num += 4;
1303 return num;
1304 }
1305
1306 static unsigned char vpd84_data[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308 0x22,0x22,0x22,0x0,0xbb,0x1,
1309 0x22,0x22,0x22,0x0,0xbb,0x2,
1310 };
1311
1312 /* Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1313 static int inquiry_vpd_84(unsigned char *arr)
1314 {
1315 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1316 return sizeof(vpd84_data);
1317 }
1318
1319 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1320 static int inquiry_vpd_85(unsigned char *arr)
1321 {
1322 int num = 0;
1323 const char *na1 = "https://www.kernel.org/config";
1324 const char *na2 = "http://www.kernel.org/log";
1325 int plen, olen;
1326
1327 arr[num++] = 0x1; /* lu, storage config */
1328 arr[num++] = 0x0; /* reserved */
1329 arr[num++] = 0x0;
1330 olen = strlen(na1);
1331 plen = olen + 1;
1332 if (plen % 4)
1333 plen = ((plen / 4) + 1) * 4;
1334 arr[num++] = plen; /* length, null termianted, padded */
1335 memcpy(arr + num, na1, olen);
1336 memset(arr + num + olen, 0, plen - olen);
1337 num += plen;
1338
1339 arr[num++] = 0x4; /* lu, logging */
1340 arr[num++] = 0x0; /* reserved */
1341 arr[num++] = 0x0;
1342 olen = strlen(na2);
1343 plen = olen + 1;
1344 if (plen % 4)
1345 plen = ((plen / 4) + 1) * 4;
1346 arr[num++] = plen; /* length, null terminated, padded */
1347 memcpy(arr + num, na2, olen);
1348 memset(arr + num + olen, 0, plen - olen);
1349 num += plen;
1350
1351 return num;
1352 }
1353
1354 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1355 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1356 {
1357 int num = 0;
1358 int port_a, port_b;
1359
1360 port_a = target_dev_id + 1;
1361 port_b = port_a + 1;
1362 arr[num++] = 0x0; /* reserved */
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0;
1365 arr[num++] = 0x1; /* relative port 1 (primary) */
1366 memset(arr + num, 0, 6);
1367 num += 6;
1368 arr[num++] = 0x0;
1369 arr[num++] = 12; /* length tp descriptor */
1370 /* naa-5 target port identifier (A) */
1371 arr[num++] = 0x61; /* proto=sas, binary */
1372 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1373 arr[num++] = 0x0; /* reserved */
1374 arr[num++] = 0x8; /* length */
1375 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1376 num += 8;
1377 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0;
1380 arr[num++] = 0x2; /* relative port 2 (secondary) */
1381 memset(arr + num, 0, 6);
1382 num += 6;
1383 arr[num++] = 0x0;
1384 arr[num++] = 12; /* length tp descriptor */
1385 /* naa-5 target port identifier (B) */
1386 arr[num++] = 0x61; /* proto=sas, binary */
1387 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1388 arr[num++] = 0x0; /* reserved */
1389 arr[num++] = 0x8; /* length */
1390 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1391 num += 8;
1392
1393 return num;
1394 }
1395
1396
1397 static unsigned char vpd89_data[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1401 '1','2','3','4',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1403 0xec,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1408 0x53,0x41,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1410 0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x10,0x80,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1439 };
1440
1441 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1442 static int inquiry_vpd_89(unsigned char *arr)
1443 {
1444 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1445 return sizeof(vpd89_data);
1446 }
1447
1448
1449 static unsigned char vpdb0_data[] = {
1450 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 };
1455
1456 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1457 static int inquiry_vpd_b0(unsigned char *arr)
1458 {
1459 unsigned int gran;
1460
1461 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1462
1463 /* Optimal transfer length granularity */
1464 if (sdebug_opt_xferlen_exp != 0 &&
1465 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1466 gran = 1 << sdebug_opt_xferlen_exp;
1467 else
1468 gran = 1 << sdebug_physblk_exp;
1469 put_unaligned_be16(gran, arr + 2);
1470
1471 /* Maximum Transfer Length */
1472 if (sdebug_store_sectors > 0x400)
1473 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1474
1475 /* Optimal Transfer Length */
1476 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1477
1478 if (sdebug_lbpu) {
1479 /* Maximum Unmap LBA Count */
1480 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1481
1482 /* Maximum Unmap Block Descriptor Count */
1483 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1484 }
1485
1486 /* Unmap Granularity Alignment */
1487 if (sdebug_unmap_alignment) {
1488 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1489 arr[28] |= 0x80; /* UGAVALID */
1490 }
1491
1492 /* Optimal Unmap Granularity */
1493 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1494
1495 /* Maximum WRITE SAME Length */
1496 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1497
1498 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1499
1500 return sizeof(vpdb0_data);
1501 }
1502
1503 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1504 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1505 {
1506 memset(arr, 0, 0x3c);
1507 arr[0] = 0;
1508 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1509 arr[2] = 0;
1510 arr[3] = 5; /* less than 1.8" */
1511 if (devip->zmodel == BLK_ZONED_HA)
1512 arr[4] = 1 << 4; /* zoned field = 01b */
1513
1514 return 0x3c;
1515 }
1516
1517 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1518 static int inquiry_vpd_b2(unsigned char *arr)
1519 {
1520 memset(arr, 0, 0x4);
1521 arr[0] = 0; /* threshold exponent */
1522 if (sdebug_lbpu)
1523 arr[1] = 1 << 7;
1524 if (sdebug_lbpws)
1525 arr[1] |= 1 << 6;
1526 if (sdebug_lbpws10)
1527 arr[1] |= 1 << 5;
1528 if (sdebug_lbprz && scsi_debug_lbp())
1529 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1530 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1532 /* threshold_percentage=0 */
1533 return 0x4;
1534 }
1535
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1537 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1538 {
1539 memset(arr, 0, 0x3c);
1540 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1541 /*
1542 * Set Optimal number of open sequential write preferred zones and
1543 * Optimal number of non-sequentially written sequential write
1544 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545 * fields set to zero, apart from Max. number of open swrz_s field.
1546 */
1547 put_unaligned_be32(0xffffffff, &arr[4]);
1548 put_unaligned_be32(0xffffffff, &arr[8]);
1549 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1550 put_unaligned_be32(devip->max_open, &arr[12]);
1551 else
1552 put_unaligned_be32(0xffffffff, &arr[12]);
1553 return 0x3c;
1554 }
1555
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1558
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1559 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1560 {
1561 unsigned char pq_pdt;
1562 unsigned char *arr;
1563 unsigned char *cmd = scp->cmnd;
1564 u32 alloc_len, n;
1565 int ret;
1566 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1567
1568 alloc_len = get_unaligned_be16(cmd + 3);
1569 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1570 if (! arr)
1571 return DID_REQUEUE << 16;
1572 is_disk = (sdebug_ptype == TYPE_DISK);
1573 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574 is_disk_zbc = (is_disk || is_zbc);
1575 have_wlun = scsi_is_wlun(scp->device->lun);
1576 if (have_wlun)
1577 pq_pdt = TYPE_WLUN; /* present, wlun */
1578 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1580 else
1581 pq_pdt = (sdebug_ptype & 0x1f);
1582 arr[0] = pq_pdt;
1583 if (0x2 & cmd[1]) { /* CMDDT bit set */
1584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1585 kfree(arr);
1586 return check_condition_result;
1587 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1588 int lu_id_num, port_group_id, target_dev_id;
1589 u32 len;
1590 char lu_id_str[6];
1591 int host_no = devip->sdbg_host->shost->host_no;
1592
1593 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1594 (devip->channel & 0x7f);
1595 if (sdebug_vpd_use_hostno == 0)
1596 host_no = 0;
1597 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1598 (devip->target * 1000) + devip->lun);
1599 target_dev_id = ((host_no + 1) * 2000) +
1600 (devip->target * 1000) - 3;
1601 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1602 if (0 == cmd[2]) { /* supported vital product data pages */
1603 arr[1] = cmd[2]; /*sanity */
1604 n = 4;
1605 arr[n++] = 0x0; /* this page */
1606 arr[n++] = 0x80; /* unit serial number */
1607 arr[n++] = 0x83; /* device identification */
1608 arr[n++] = 0x84; /* software interface ident. */
1609 arr[n++] = 0x85; /* management network addresses */
1610 arr[n++] = 0x86; /* extended inquiry */
1611 arr[n++] = 0x87; /* mode page policy */
1612 arr[n++] = 0x88; /* SCSI ports */
1613 if (is_disk_zbc) { /* SBC or ZBC */
1614 arr[n++] = 0x89; /* ATA information */
1615 arr[n++] = 0xb0; /* Block limits */
1616 arr[n++] = 0xb1; /* Block characteristics */
1617 if (is_disk)
1618 arr[n++] = 0xb2; /* LB Provisioning */
1619 if (is_zbc)
1620 arr[n++] = 0xb6; /* ZB dev. char. */
1621 }
1622 arr[3] = n - 4; /* number of supported VPD pages */
1623 } else if (0x80 == cmd[2]) { /* unit serial number */
1624 arr[1] = cmd[2]; /*sanity */
1625 arr[3] = len;
1626 memcpy(&arr[4], lu_id_str, len);
1627 } else if (0x83 == cmd[2]) { /* device identification */
1628 arr[1] = cmd[2]; /*sanity */
1629 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1630 target_dev_id, lu_id_num,
1631 lu_id_str, len,
1632 &devip->lu_name);
1633 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1634 arr[1] = cmd[2]; /*sanity */
1635 arr[3] = inquiry_vpd_84(&arr[4]);
1636 } else if (0x85 == cmd[2]) { /* Management network addresses */
1637 arr[1] = cmd[2]; /*sanity */
1638 arr[3] = inquiry_vpd_85(&arr[4]);
1639 } else if (0x86 == cmd[2]) { /* extended inquiry */
1640 arr[1] = cmd[2]; /*sanity */
1641 arr[3] = 0x3c; /* number of following entries */
1642 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1643 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1644 else if (have_dif_prot)
1645 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1646 else
1647 arr[4] = 0x0; /* no protection stuff */
1648 arr[5] = 0x7; /* head of q, ordered + simple q's */
1649 } else if (0x87 == cmd[2]) { /* mode page policy */
1650 arr[1] = cmd[2]; /*sanity */
1651 arr[3] = 0x8; /* number of following entries */
1652 arr[4] = 0x2; /* disconnect-reconnect mp */
1653 arr[6] = 0x80; /* mlus, shared */
1654 arr[8] = 0x18; /* protocol specific lu */
1655 arr[10] = 0x82; /* mlus, per initiator port */
1656 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1657 arr[1] = cmd[2]; /*sanity */
1658 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1659 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1660 arr[1] = cmd[2]; /*sanity */
1661 n = inquiry_vpd_89(&arr[4]);
1662 put_unaligned_be16(n, arr + 2);
1663 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1664 arr[1] = cmd[2]; /*sanity */
1665 arr[3] = inquiry_vpd_b0(&arr[4]);
1666 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1667 arr[1] = cmd[2]; /*sanity */
1668 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1669 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1670 arr[1] = cmd[2]; /*sanity */
1671 arr[3] = inquiry_vpd_b2(&arr[4]);
1672 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1673 arr[1] = cmd[2]; /*sanity */
1674 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1675 } else {
1676 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1677 kfree(arr);
1678 return check_condition_result;
1679 }
1680 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1681 ret = fill_from_dev_buffer(scp, arr,
1682 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1683 kfree(arr);
1684 return ret;
1685 }
1686 /* drops through here for a standard inquiry */
1687 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1688 arr[2] = sdebug_scsi_level;
1689 arr[3] = 2; /* response_data_format==2 */
1690 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1691 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1692 if (sdebug_vpd_use_hostno == 0)
1693 arr[5] |= 0x10; /* claim: implicit TPGS */
1694 arr[6] = 0x10; /* claim: MultiP */
1695 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1696 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1697 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1698 memcpy(&arr[16], sdebug_inq_product_id, 16);
1699 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1700 /* Use Vendor Specific area to place driver date in ASCII hex */
1701 memcpy(&arr[36], sdebug_version_date, 8);
1702 /* version descriptors (2 bytes each) follow */
1703 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1704 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1705 n = 62;
1706 if (is_disk) { /* SBC-4 no version claimed */
1707 put_unaligned_be16(0x600, arr + n);
1708 n += 2;
1709 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1710 put_unaligned_be16(0x525, arr + n);
1711 n += 2;
1712 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1713 put_unaligned_be16(0x624, arr + n);
1714 n += 2;
1715 }
1716 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1717 ret = fill_from_dev_buffer(scp, arr,
1718 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1719 kfree(arr);
1720 return ret;
1721 }
1722
1723 /* See resp_iec_m_pg() for how this data is manipulated */
1724 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1725 0, 0, 0x0, 0x0};
1726
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1727 static int resp_requests(struct scsi_cmnd *scp,
1728 struct sdebug_dev_info *devip)
1729 {
1730 unsigned char *cmd = scp->cmnd;
1731 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1732 bool dsense = !!(cmd[1] & 1);
1733 u32 alloc_len = cmd[4];
1734 u32 len = 18;
1735 int stopped_state = atomic_read(&devip->stopped);
1736
1737 memset(arr, 0, sizeof(arr));
1738 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1739 if (dsense) {
1740 arr[0] = 0x72;
1741 arr[1] = NOT_READY;
1742 arr[2] = LOGICAL_UNIT_NOT_READY;
1743 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1744 len = 8;
1745 } else {
1746 arr[0] = 0x70;
1747 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1748 arr[7] = 0xa; /* 18 byte sense buffer */
1749 arr[12] = LOGICAL_UNIT_NOT_READY;
1750 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1751 }
1752 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1753 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1754 if (dsense) {
1755 arr[0] = 0x72;
1756 arr[1] = 0x0; /* NO_SENSE in sense_key */
1757 arr[2] = THRESHOLD_EXCEEDED;
1758 arr[3] = 0xff; /* Failure prediction(false) */
1759 len = 8;
1760 } else {
1761 arr[0] = 0x70;
1762 arr[2] = 0x0; /* NO_SENSE in sense_key */
1763 arr[7] = 0xa; /* 18 byte sense buffer */
1764 arr[12] = THRESHOLD_EXCEEDED;
1765 arr[13] = 0xff; /* Failure prediction(false) */
1766 }
1767 } else { /* nothing to report */
1768 if (dsense) {
1769 len = 8;
1770 memset(arr, 0, len);
1771 arr[0] = 0x72;
1772 } else {
1773 memset(arr, 0, len);
1774 arr[0] = 0x70;
1775 arr[7] = 0xa;
1776 }
1777 }
1778 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1779 }
1780
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1781 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1782 {
1783 unsigned char *cmd = scp->cmnd;
1784 int power_cond, want_stop, stopped_state;
1785 bool changing;
1786
1787 power_cond = (cmd[4] & 0xf0) >> 4;
1788 if (power_cond) {
1789 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1790 return check_condition_result;
1791 }
1792 want_stop = !(cmd[4] & 1);
1793 stopped_state = atomic_read(&devip->stopped);
1794 if (stopped_state == 2) {
1795 ktime_t now_ts = ktime_get_boottime();
1796
1797 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1798 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1799
1800 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1801 /* tur_ms_to_ready timer extinguished */
1802 atomic_set(&devip->stopped, 0);
1803 stopped_state = 0;
1804 }
1805 }
1806 if (stopped_state == 2) {
1807 if (want_stop) {
1808 stopped_state = 1; /* dummy up success */
1809 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1810 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1811 return check_condition_result;
1812 }
1813 }
1814 }
1815 changing = (stopped_state != want_stop);
1816 if (changing)
1817 atomic_xchg(&devip->stopped, want_stop);
1818 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1819 return SDEG_RES_IMMED_MASK;
1820 else
1821 return 0;
1822 }
1823
get_sdebug_capacity(void)1824 static sector_t get_sdebug_capacity(void)
1825 {
1826 static const unsigned int gibibyte = 1073741824;
1827
1828 if (sdebug_virtual_gb > 0)
1829 return (sector_t)sdebug_virtual_gb *
1830 (gibibyte / sdebug_sector_size);
1831 else
1832 return sdebug_store_sectors;
1833 }
1834
1835 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1836 static int resp_readcap(struct scsi_cmnd *scp,
1837 struct sdebug_dev_info *devip)
1838 {
1839 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1840 unsigned int capac;
1841
1842 /* following just in case virtual_gb changed */
1843 sdebug_capacity = get_sdebug_capacity();
1844 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1845 if (sdebug_capacity < 0xffffffff) {
1846 capac = (unsigned int)sdebug_capacity - 1;
1847 put_unaligned_be32(capac, arr + 0);
1848 } else
1849 put_unaligned_be32(0xffffffff, arr + 0);
1850 put_unaligned_be16(sdebug_sector_size, arr + 6);
1851 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1852 }
1853
1854 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1855 static int resp_readcap16(struct scsi_cmnd *scp,
1856 struct sdebug_dev_info *devip)
1857 {
1858 unsigned char *cmd = scp->cmnd;
1859 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1860 u32 alloc_len;
1861
1862 alloc_len = get_unaligned_be32(cmd + 10);
1863 /* following just in case virtual_gb changed */
1864 sdebug_capacity = get_sdebug_capacity();
1865 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1866 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1867 put_unaligned_be32(sdebug_sector_size, arr + 8);
1868 arr[13] = sdebug_physblk_exp & 0xf;
1869 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1870
1871 if (scsi_debug_lbp()) {
1872 arr[14] |= 0x80; /* LBPME */
1873 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1874 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1875 * in the wider field maps to 0 in this field.
1876 */
1877 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1878 arr[14] |= 0x40;
1879 }
1880
1881 arr[15] = sdebug_lowest_aligned & 0xff;
1882
1883 if (have_dif_prot) {
1884 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1885 arr[12] |= 1; /* PROT_EN */
1886 }
1887
1888 return fill_from_dev_buffer(scp, arr,
1889 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1890 }
1891
1892 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1893
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1894 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1895 struct sdebug_dev_info *devip)
1896 {
1897 unsigned char *cmd = scp->cmnd;
1898 unsigned char *arr;
1899 int host_no = devip->sdbg_host->shost->host_no;
1900 int port_group_a, port_group_b, port_a, port_b;
1901 u32 alen, n, rlen;
1902 int ret;
1903
1904 alen = get_unaligned_be32(cmd + 6);
1905 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1906 if (! arr)
1907 return DID_REQUEUE << 16;
1908 /*
1909 * EVPD page 0x88 states we have two ports, one
1910 * real and a fake port with no device connected.
1911 * So we create two port groups with one port each
1912 * and set the group with port B to unavailable.
1913 */
1914 port_a = 0x1; /* relative port A */
1915 port_b = 0x2; /* relative port B */
1916 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1917 (devip->channel & 0x7f);
1918 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1919 (devip->channel & 0x7f) + 0x80;
1920
1921 /*
1922 * The asymmetric access state is cycled according to the host_id.
1923 */
1924 n = 4;
1925 if (sdebug_vpd_use_hostno == 0) {
1926 arr[n++] = host_no % 3; /* Asymm access state */
1927 arr[n++] = 0x0F; /* claim: all states are supported */
1928 } else {
1929 arr[n++] = 0x0; /* Active/Optimized path */
1930 arr[n++] = 0x01; /* only support active/optimized paths */
1931 }
1932 put_unaligned_be16(port_group_a, arr + n);
1933 n += 2;
1934 arr[n++] = 0; /* Reserved */
1935 arr[n++] = 0; /* Status code */
1936 arr[n++] = 0; /* Vendor unique */
1937 arr[n++] = 0x1; /* One port per group */
1938 arr[n++] = 0; /* Reserved */
1939 arr[n++] = 0; /* Reserved */
1940 put_unaligned_be16(port_a, arr + n);
1941 n += 2;
1942 arr[n++] = 3; /* Port unavailable */
1943 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1944 put_unaligned_be16(port_group_b, arr + n);
1945 n += 2;
1946 arr[n++] = 0; /* Reserved */
1947 arr[n++] = 0; /* Status code */
1948 arr[n++] = 0; /* Vendor unique */
1949 arr[n++] = 0x1; /* One port per group */
1950 arr[n++] = 0; /* Reserved */
1951 arr[n++] = 0; /* Reserved */
1952 put_unaligned_be16(port_b, arr + n);
1953 n += 2;
1954
1955 rlen = n - 4;
1956 put_unaligned_be32(rlen, arr + 0);
1957
1958 /*
1959 * Return the smallest value of either
1960 * - The allocated length
1961 * - The constructed command length
1962 * - The maximum array size
1963 */
1964 rlen = min(alen, n);
1965 ret = fill_from_dev_buffer(scp, arr,
1966 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1967 kfree(arr);
1968 return ret;
1969 }
1970
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1971 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1972 struct sdebug_dev_info *devip)
1973 {
1974 bool rctd;
1975 u8 reporting_opts, req_opcode, sdeb_i, supp;
1976 u16 req_sa, u;
1977 u32 alloc_len, a_len;
1978 int k, offset, len, errsts, count, bump, na;
1979 const struct opcode_info_t *oip;
1980 const struct opcode_info_t *r_oip;
1981 u8 *arr;
1982 u8 *cmd = scp->cmnd;
1983
1984 rctd = !!(cmd[2] & 0x80);
1985 reporting_opts = cmd[2] & 0x7;
1986 req_opcode = cmd[3];
1987 req_sa = get_unaligned_be16(cmd + 4);
1988 alloc_len = get_unaligned_be32(cmd + 6);
1989 if (alloc_len < 4 || alloc_len > 0xffff) {
1990 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1991 return check_condition_result;
1992 }
1993 if (alloc_len > 8192)
1994 a_len = 8192;
1995 else
1996 a_len = alloc_len;
1997 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1998 if (NULL == arr) {
1999 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2000 INSUFF_RES_ASCQ);
2001 return check_condition_result;
2002 }
2003 switch (reporting_opts) {
2004 case 0: /* all commands */
2005 /* count number of commands */
2006 for (count = 0, oip = opcode_info_arr;
2007 oip->num_attached != 0xff; ++oip) {
2008 if (F_INV_OP & oip->flags)
2009 continue;
2010 count += (oip->num_attached + 1);
2011 }
2012 bump = rctd ? 20 : 8;
2013 put_unaligned_be32(count * bump, arr);
2014 for (offset = 4, oip = opcode_info_arr;
2015 oip->num_attached != 0xff && offset < a_len; ++oip) {
2016 if (F_INV_OP & oip->flags)
2017 continue;
2018 na = oip->num_attached;
2019 arr[offset] = oip->opcode;
2020 put_unaligned_be16(oip->sa, arr + offset + 2);
2021 if (rctd)
2022 arr[offset + 5] |= 0x2;
2023 if (FF_SA & oip->flags)
2024 arr[offset + 5] |= 0x1;
2025 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2026 if (rctd)
2027 put_unaligned_be16(0xa, arr + offset + 8);
2028 r_oip = oip;
2029 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2030 if (F_INV_OP & oip->flags)
2031 continue;
2032 offset += bump;
2033 arr[offset] = oip->opcode;
2034 put_unaligned_be16(oip->sa, arr + offset + 2);
2035 if (rctd)
2036 arr[offset + 5] |= 0x2;
2037 if (FF_SA & oip->flags)
2038 arr[offset + 5] |= 0x1;
2039 put_unaligned_be16(oip->len_mask[0],
2040 arr + offset + 6);
2041 if (rctd)
2042 put_unaligned_be16(0xa,
2043 arr + offset + 8);
2044 }
2045 oip = r_oip;
2046 offset += bump;
2047 }
2048 break;
2049 case 1: /* one command: opcode only */
2050 case 2: /* one command: opcode plus service action */
2051 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2052 sdeb_i = opcode_ind_arr[req_opcode];
2053 oip = &opcode_info_arr[sdeb_i];
2054 if (F_INV_OP & oip->flags) {
2055 supp = 1;
2056 offset = 4;
2057 } else {
2058 if (1 == reporting_opts) {
2059 if (FF_SA & oip->flags) {
2060 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2061 2, 2);
2062 kfree(arr);
2063 return check_condition_result;
2064 }
2065 req_sa = 0;
2066 } else if (2 == reporting_opts &&
2067 0 == (FF_SA & oip->flags)) {
2068 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2069 kfree(arr); /* point at requested sa */
2070 return check_condition_result;
2071 }
2072 if (0 == (FF_SA & oip->flags) &&
2073 req_opcode == oip->opcode)
2074 supp = 3;
2075 else if (0 == (FF_SA & oip->flags)) {
2076 na = oip->num_attached;
2077 for (k = 0, oip = oip->arrp; k < na;
2078 ++k, ++oip) {
2079 if (req_opcode == oip->opcode)
2080 break;
2081 }
2082 supp = (k >= na) ? 1 : 3;
2083 } else if (req_sa != oip->sa) {
2084 na = oip->num_attached;
2085 for (k = 0, oip = oip->arrp; k < na;
2086 ++k, ++oip) {
2087 if (req_sa == oip->sa)
2088 break;
2089 }
2090 supp = (k >= na) ? 1 : 3;
2091 } else
2092 supp = 3;
2093 if (3 == supp) {
2094 u = oip->len_mask[0];
2095 put_unaligned_be16(u, arr + 2);
2096 arr[4] = oip->opcode;
2097 for (k = 1; k < u; ++k)
2098 arr[4 + k] = (k < 16) ?
2099 oip->len_mask[k] : 0xff;
2100 offset = 4 + u;
2101 } else
2102 offset = 4;
2103 }
2104 arr[1] = (rctd ? 0x80 : 0) | supp;
2105 if (rctd) {
2106 put_unaligned_be16(0xa, arr + offset);
2107 offset += 12;
2108 }
2109 break;
2110 default:
2111 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2112 kfree(arr);
2113 return check_condition_result;
2114 }
2115 offset = (offset < a_len) ? offset : a_len;
2116 len = (offset < alloc_len) ? offset : alloc_len;
2117 errsts = fill_from_dev_buffer(scp, arr, len);
2118 kfree(arr);
2119 return errsts;
2120 }
2121
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2122 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2123 struct sdebug_dev_info *devip)
2124 {
2125 bool repd;
2126 u32 alloc_len, len;
2127 u8 arr[16];
2128 u8 *cmd = scp->cmnd;
2129
2130 memset(arr, 0, sizeof(arr));
2131 repd = !!(cmd[2] & 0x80);
2132 alloc_len = get_unaligned_be32(cmd + 6);
2133 if (alloc_len < 4) {
2134 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2135 return check_condition_result;
2136 }
2137 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2138 arr[1] = 0x1; /* ITNRS */
2139 if (repd) {
2140 arr[3] = 0xc;
2141 len = 16;
2142 } else
2143 len = 4;
2144
2145 len = (len < alloc_len) ? len : alloc_len;
2146 return fill_from_dev_buffer(scp, arr, len);
2147 }
2148
2149 /* <<Following mode page info copied from ST318451LW>> */
2150
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2151 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2152 { /* Read-Write Error Recovery page for mode_sense */
2153 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2154 5, 0, 0xff, 0xff};
2155
2156 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2157 if (1 == pcontrol)
2158 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2159 return sizeof(err_recov_pg);
2160 }
2161
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2162 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2163 { /* Disconnect-Reconnect page for mode_sense */
2164 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2165 0, 0, 0, 0, 0, 0, 0, 0};
2166
2167 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2168 if (1 == pcontrol)
2169 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2170 return sizeof(disconnect_pg);
2171 }
2172
resp_format_pg(unsigned char * p,int pcontrol,int target)2173 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2174 { /* Format device page for mode_sense */
2175 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2176 0, 0, 0, 0, 0, 0, 0, 0,
2177 0, 0, 0, 0, 0x40, 0, 0, 0};
2178
2179 memcpy(p, format_pg, sizeof(format_pg));
2180 put_unaligned_be16(sdebug_sectors_per, p + 10);
2181 put_unaligned_be16(sdebug_sector_size, p + 12);
2182 if (sdebug_removable)
2183 p[20] |= 0x20; /* should agree with INQUIRY */
2184 if (1 == pcontrol)
2185 memset(p + 2, 0, sizeof(format_pg) - 2);
2186 return sizeof(format_pg);
2187 }
2188
2189 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2190 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2191 0, 0, 0, 0};
2192
resp_caching_pg(unsigned char * p,int pcontrol,int target)2193 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2194 { /* Caching page for mode_sense */
2195 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2196 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2197 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2198 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2199
2200 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2201 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2202 memcpy(p, caching_pg, sizeof(caching_pg));
2203 if (1 == pcontrol)
2204 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2205 else if (2 == pcontrol)
2206 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2207 return sizeof(caching_pg);
2208 }
2209
2210 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2211 0, 0, 0x2, 0x4b};
2212
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2213 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2214 { /* Control mode page for mode_sense */
2215 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2216 0, 0, 0, 0};
2217 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2218 0, 0, 0x2, 0x4b};
2219
2220 if (sdebug_dsense)
2221 ctrl_m_pg[2] |= 0x4;
2222 else
2223 ctrl_m_pg[2] &= ~0x4;
2224
2225 if (sdebug_ato)
2226 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2227
2228 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2229 if (1 == pcontrol)
2230 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2231 else if (2 == pcontrol)
2232 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2233 return sizeof(ctrl_m_pg);
2234 }
2235
2236
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2237 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2238 { /* Informational Exceptions control mode page for mode_sense */
2239 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2240 0, 0, 0x0, 0x0};
2241 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2242 0, 0, 0x0, 0x0};
2243
2244 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2245 if (1 == pcontrol)
2246 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2247 else if (2 == pcontrol)
2248 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2249 return sizeof(iec_m_pg);
2250 }
2251
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2252 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2253 { /* SAS SSP mode page - short format for mode_sense */
2254 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2255 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2256
2257 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2258 if (1 == pcontrol)
2259 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2260 return sizeof(sas_sf_m_pg);
2261 }
2262
2263
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2264 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2265 int target_dev_id)
2266 { /* SAS phy control and discover mode page for mode_sense */
2267 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2268 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2269 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2270 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2271 0x2, 0, 0, 0, 0, 0, 0, 0,
2272 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2273 0, 0, 0, 0, 0, 0, 0, 0,
2274 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2275 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2276 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2277 0x3, 0, 0, 0, 0, 0, 0, 0,
2278 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2279 0, 0, 0, 0, 0, 0, 0, 0,
2280 };
2281 int port_a, port_b;
2282
2283 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2284 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2285 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2286 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2287 port_a = target_dev_id + 1;
2288 port_b = port_a + 1;
2289 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2290 put_unaligned_be32(port_a, p + 20);
2291 put_unaligned_be32(port_b, p + 48 + 20);
2292 if (1 == pcontrol)
2293 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2294 return sizeof(sas_pcd_m_pg);
2295 }
2296
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2297 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2298 { /* SAS SSP shared protocol specific port mode subpage */
2299 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2300 0, 0, 0, 0, 0, 0, 0, 0,
2301 };
2302
2303 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2304 if (1 == pcontrol)
2305 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2306 return sizeof(sas_sha_m_pg);
2307 }
2308
2309 #define SDEBUG_MAX_MSENSE_SZ 256
2310
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2311 static int resp_mode_sense(struct scsi_cmnd *scp,
2312 struct sdebug_dev_info *devip)
2313 {
2314 int pcontrol, pcode, subpcode, bd_len;
2315 unsigned char dev_spec;
2316 u32 alloc_len, offset, len;
2317 int target_dev_id;
2318 int target = scp->device->id;
2319 unsigned char *ap;
2320 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2321 unsigned char *cmd = scp->cmnd;
2322 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2323
2324 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2325 pcontrol = (cmd[2] & 0xc0) >> 6;
2326 pcode = cmd[2] & 0x3f;
2327 subpcode = cmd[3];
2328 msense_6 = (MODE_SENSE == cmd[0]);
2329 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2330 is_disk = (sdebug_ptype == TYPE_DISK);
2331 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2332 if ((is_disk || is_zbc) && !dbd)
2333 bd_len = llbaa ? 16 : 8;
2334 else
2335 bd_len = 0;
2336 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2337 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2338 if (0x3 == pcontrol) { /* Saving values not supported */
2339 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2340 return check_condition_result;
2341 }
2342 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2343 (devip->target * 1000) - 3;
2344 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2345 if (is_disk || is_zbc) {
2346 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2347 if (sdebug_wp)
2348 dev_spec |= 0x80;
2349 } else
2350 dev_spec = 0x0;
2351 if (msense_6) {
2352 arr[2] = dev_spec;
2353 arr[3] = bd_len;
2354 offset = 4;
2355 } else {
2356 arr[3] = dev_spec;
2357 if (16 == bd_len)
2358 arr[4] = 0x1; /* set LONGLBA bit */
2359 arr[7] = bd_len; /* assume 255 or less */
2360 offset = 8;
2361 }
2362 ap = arr + offset;
2363 if ((bd_len > 0) && (!sdebug_capacity))
2364 sdebug_capacity = get_sdebug_capacity();
2365
2366 if (8 == bd_len) {
2367 if (sdebug_capacity > 0xfffffffe)
2368 put_unaligned_be32(0xffffffff, ap + 0);
2369 else
2370 put_unaligned_be32(sdebug_capacity, ap + 0);
2371 put_unaligned_be16(sdebug_sector_size, ap + 6);
2372 offset += bd_len;
2373 ap = arr + offset;
2374 } else if (16 == bd_len) {
2375 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2376 put_unaligned_be32(sdebug_sector_size, ap + 12);
2377 offset += bd_len;
2378 ap = arr + offset;
2379 }
2380
2381 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2382 /* TODO: Control Extension page */
2383 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2384 return check_condition_result;
2385 }
2386 bad_pcode = false;
2387
2388 switch (pcode) {
2389 case 0x1: /* Read-Write error recovery page, direct access */
2390 len = resp_err_recov_pg(ap, pcontrol, target);
2391 offset += len;
2392 break;
2393 case 0x2: /* Disconnect-Reconnect page, all devices */
2394 len = resp_disconnect_pg(ap, pcontrol, target);
2395 offset += len;
2396 break;
2397 case 0x3: /* Format device page, direct access */
2398 if (is_disk) {
2399 len = resp_format_pg(ap, pcontrol, target);
2400 offset += len;
2401 } else
2402 bad_pcode = true;
2403 break;
2404 case 0x8: /* Caching page, direct access */
2405 if (is_disk || is_zbc) {
2406 len = resp_caching_pg(ap, pcontrol, target);
2407 offset += len;
2408 } else
2409 bad_pcode = true;
2410 break;
2411 case 0xa: /* Control Mode page, all devices */
2412 len = resp_ctrl_m_pg(ap, pcontrol, target);
2413 offset += len;
2414 break;
2415 case 0x19: /* if spc==1 then sas phy, control+discover */
2416 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2417 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2418 return check_condition_result;
2419 }
2420 len = 0;
2421 if ((0x0 == subpcode) || (0xff == subpcode))
2422 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423 if ((0x1 == subpcode) || (0xff == subpcode))
2424 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2425 target_dev_id);
2426 if ((0x2 == subpcode) || (0xff == subpcode))
2427 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2428 offset += len;
2429 break;
2430 case 0x1c: /* Informational Exceptions Mode page, all devices */
2431 len = resp_iec_m_pg(ap, pcontrol, target);
2432 offset += len;
2433 break;
2434 case 0x3f: /* Read all Mode pages */
2435 if ((0 == subpcode) || (0xff == subpcode)) {
2436 len = resp_err_recov_pg(ap, pcontrol, target);
2437 len += resp_disconnect_pg(ap + len, pcontrol, target);
2438 if (is_disk) {
2439 len += resp_format_pg(ap + len, pcontrol,
2440 target);
2441 len += resp_caching_pg(ap + len, pcontrol,
2442 target);
2443 } else if (is_zbc) {
2444 len += resp_caching_pg(ap + len, pcontrol,
2445 target);
2446 }
2447 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2448 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449 if (0xff == subpcode) {
2450 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2451 target, target_dev_id);
2452 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2453 }
2454 len += resp_iec_m_pg(ap + len, pcontrol, target);
2455 offset += len;
2456 } else {
2457 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2458 return check_condition_result;
2459 }
2460 break;
2461 default:
2462 bad_pcode = true;
2463 break;
2464 }
2465 if (bad_pcode) {
2466 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2467 return check_condition_result;
2468 }
2469 if (msense_6)
2470 arr[0] = offset - 1;
2471 else
2472 put_unaligned_be16((offset - 2), arr + 0);
2473 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2474 }
2475
2476 #define SDEBUG_MAX_MSELECT_SZ 512
2477
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2478 static int resp_mode_select(struct scsi_cmnd *scp,
2479 struct sdebug_dev_info *devip)
2480 {
2481 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2482 int param_len, res, mpage;
2483 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2484 unsigned char *cmd = scp->cmnd;
2485 int mselect6 = (MODE_SELECT == cmd[0]);
2486
2487 memset(arr, 0, sizeof(arr));
2488 pf = cmd[1] & 0x10;
2489 sp = cmd[1] & 0x1;
2490 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2491 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2492 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2493 return check_condition_result;
2494 }
2495 res = fetch_to_dev_buffer(scp, arr, param_len);
2496 if (-1 == res)
2497 return DID_ERROR << 16;
2498 else if (sdebug_verbose && (res < param_len))
2499 sdev_printk(KERN_INFO, scp->device,
2500 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2501 __func__, param_len, res);
2502 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2503 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2504 off = bd_len + (mselect6 ? 4 : 8);
2505 if (md_len > 2 || off >= res) {
2506 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2507 return check_condition_result;
2508 }
2509 mpage = arr[off] & 0x3f;
2510 ps = !!(arr[off] & 0x80);
2511 if (ps) {
2512 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2513 return check_condition_result;
2514 }
2515 spf = !!(arr[off] & 0x40);
2516 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2517 (arr[off + 1] + 2);
2518 if ((pg_len + off) > param_len) {
2519 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2520 PARAMETER_LIST_LENGTH_ERR, 0);
2521 return check_condition_result;
2522 }
2523 switch (mpage) {
2524 case 0x8: /* Caching Mode page */
2525 if (caching_pg[1] == arr[off + 1]) {
2526 memcpy(caching_pg + 2, arr + off + 2,
2527 sizeof(caching_pg) - 2);
2528 goto set_mode_changed_ua;
2529 }
2530 break;
2531 case 0xa: /* Control Mode page */
2532 if (ctrl_m_pg[1] == arr[off + 1]) {
2533 memcpy(ctrl_m_pg + 2, arr + off + 2,
2534 sizeof(ctrl_m_pg) - 2);
2535 if (ctrl_m_pg[4] & 0x8)
2536 sdebug_wp = true;
2537 else
2538 sdebug_wp = false;
2539 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2540 goto set_mode_changed_ua;
2541 }
2542 break;
2543 case 0x1c: /* Informational Exceptions Mode page */
2544 if (iec_m_pg[1] == arr[off + 1]) {
2545 memcpy(iec_m_pg + 2, arr + off + 2,
2546 sizeof(iec_m_pg) - 2);
2547 goto set_mode_changed_ua;
2548 }
2549 break;
2550 default:
2551 break;
2552 }
2553 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2554 return check_condition_result;
2555 set_mode_changed_ua:
2556 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2557 return 0;
2558 }
2559
resp_temp_l_pg(unsigned char * arr)2560 static int resp_temp_l_pg(unsigned char *arr)
2561 {
2562 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2563 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2564 };
2565
2566 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2567 return sizeof(temp_l_pg);
2568 }
2569
resp_ie_l_pg(unsigned char * arr)2570 static int resp_ie_l_pg(unsigned char *arr)
2571 {
2572 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2573 };
2574
2575 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2576 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2577 arr[4] = THRESHOLD_EXCEEDED;
2578 arr[5] = 0xff;
2579 }
2580 return sizeof(ie_l_pg);
2581 }
2582
2583 #define SDEBUG_MAX_LSENSE_SZ 512
2584
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2585 static int resp_log_sense(struct scsi_cmnd *scp,
2586 struct sdebug_dev_info *devip)
2587 {
2588 int ppc, sp, pcode, subpcode;
2589 u32 alloc_len, len, n;
2590 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2591 unsigned char *cmd = scp->cmnd;
2592
2593 memset(arr, 0, sizeof(arr));
2594 ppc = cmd[1] & 0x2;
2595 sp = cmd[1] & 0x1;
2596 if (ppc || sp) {
2597 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2598 return check_condition_result;
2599 }
2600 pcode = cmd[2] & 0x3f;
2601 subpcode = cmd[3] & 0xff;
2602 alloc_len = get_unaligned_be16(cmd + 7);
2603 arr[0] = pcode;
2604 if (0 == subpcode) {
2605 switch (pcode) {
2606 case 0x0: /* Supported log pages log page */
2607 n = 4;
2608 arr[n++] = 0x0; /* this page */
2609 arr[n++] = 0xd; /* Temperature */
2610 arr[n++] = 0x2f; /* Informational exceptions */
2611 arr[3] = n - 4;
2612 break;
2613 case 0xd: /* Temperature log page */
2614 arr[3] = resp_temp_l_pg(arr + 4);
2615 break;
2616 case 0x2f: /* Informational exceptions log page */
2617 arr[3] = resp_ie_l_pg(arr + 4);
2618 break;
2619 default:
2620 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2621 return check_condition_result;
2622 }
2623 } else if (0xff == subpcode) {
2624 arr[0] |= 0x40;
2625 arr[1] = subpcode;
2626 switch (pcode) {
2627 case 0x0: /* Supported log pages and subpages log page */
2628 n = 4;
2629 arr[n++] = 0x0;
2630 arr[n++] = 0x0; /* 0,0 page */
2631 arr[n++] = 0x0;
2632 arr[n++] = 0xff; /* this page */
2633 arr[n++] = 0xd;
2634 arr[n++] = 0x0; /* Temperature */
2635 arr[n++] = 0x2f;
2636 arr[n++] = 0x0; /* Informational exceptions */
2637 arr[3] = n - 4;
2638 break;
2639 case 0xd: /* Temperature subpages */
2640 n = 4;
2641 arr[n++] = 0xd;
2642 arr[n++] = 0x0; /* Temperature */
2643 arr[3] = n - 4;
2644 break;
2645 case 0x2f: /* Informational exceptions subpages */
2646 n = 4;
2647 arr[n++] = 0x2f;
2648 arr[n++] = 0x0; /* Informational exceptions */
2649 arr[3] = n - 4;
2650 break;
2651 default:
2652 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2653 return check_condition_result;
2654 }
2655 } else {
2656 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2657 return check_condition_result;
2658 }
2659 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2660 return fill_from_dev_buffer(scp, arr,
2661 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2662 }
2663
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)2664 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2665 {
2666 return devip->nr_zones != 0;
2667 }
2668
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)2669 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2670 unsigned long long lba)
2671 {
2672 return &devip->zstate[lba >> devip->zsize_shift];
2673 }
2674
zbc_zone_is_conv(struct sdeb_zone_state * zsp)2675 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2676 {
2677 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2678 }
2679
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2680 static void zbc_close_zone(struct sdebug_dev_info *devip,
2681 struct sdeb_zone_state *zsp)
2682 {
2683 enum sdebug_z_cond zc;
2684
2685 if (zbc_zone_is_conv(zsp))
2686 return;
2687
2688 zc = zsp->z_cond;
2689 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2690 return;
2691
2692 if (zc == ZC2_IMPLICIT_OPEN)
2693 devip->nr_imp_open--;
2694 else
2695 devip->nr_exp_open--;
2696
2697 if (zsp->z_wp == zsp->z_start) {
2698 zsp->z_cond = ZC1_EMPTY;
2699 } else {
2700 zsp->z_cond = ZC4_CLOSED;
2701 devip->nr_closed++;
2702 }
2703 }
2704
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)2705 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2706 {
2707 struct sdeb_zone_state *zsp = &devip->zstate[0];
2708 unsigned int i;
2709
2710 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2711 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2712 zbc_close_zone(devip, zsp);
2713 return;
2714 }
2715 }
2716 }
2717
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)2718 static void zbc_open_zone(struct sdebug_dev_info *devip,
2719 struct sdeb_zone_state *zsp, bool explicit)
2720 {
2721 enum sdebug_z_cond zc;
2722
2723 if (zbc_zone_is_conv(zsp))
2724 return;
2725
2726 zc = zsp->z_cond;
2727 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2728 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2729 return;
2730
2731 /* Close an implicit open zone if necessary */
2732 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2733 zbc_close_zone(devip, zsp);
2734 else if (devip->max_open &&
2735 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2736 zbc_close_imp_open_zone(devip);
2737
2738 if (zsp->z_cond == ZC4_CLOSED)
2739 devip->nr_closed--;
2740 if (explicit) {
2741 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2742 devip->nr_exp_open++;
2743 } else {
2744 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2745 devip->nr_imp_open++;
2746 }
2747 }
2748
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)2749 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2750 unsigned long long lba, unsigned int num)
2751 {
2752 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2753 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2754
2755 if (zbc_zone_is_conv(zsp))
2756 return;
2757
2758 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2759 zsp->z_wp += num;
2760 if (zsp->z_wp >= zend)
2761 zsp->z_cond = ZC5_FULL;
2762 return;
2763 }
2764
2765 while (num) {
2766 if (lba != zsp->z_wp)
2767 zsp->z_non_seq_resource = true;
2768
2769 end = lba + num;
2770 if (end >= zend) {
2771 n = zend - lba;
2772 zsp->z_wp = zend;
2773 } else if (end > zsp->z_wp) {
2774 n = num;
2775 zsp->z_wp = end;
2776 } else {
2777 n = num;
2778 }
2779 if (zsp->z_wp >= zend)
2780 zsp->z_cond = ZC5_FULL;
2781
2782 num -= n;
2783 lba += n;
2784 if (num) {
2785 zsp++;
2786 zend = zsp->z_start + zsp->z_size;
2787 }
2788 }
2789 }
2790
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2791 static int check_zbc_access_params(struct scsi_cmnd *scp,
2792 unsigned long long lba, unsigned int num, bool write)
2793 {
2794 struct scsi_device *sdp = scp->device;
2795 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2796 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2797 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2798
2799 if (!write) {
2800 if (devip->zmodel == BLK_ZONED_HA)
2801 return 0;
2802 /* For host-managed, reads cannot cross zone types boundaries */
2803 if (zsp_end != zsp &&
2804 zbc_zone_is_conv(zsp) &&
2805 !zbc_zone_is_conv(zsp_end)) {
2806 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2807 LBA_OUT_OF_RANGE,
2808 READ_INVDATA_ASCQ);
2809 return check_condition_result;
2810 }
2811 return 0;
2812 }
2813
2814 /* No restrictions for writes within conventional zones */
2815 if (zbc_zone_is_conv(zsp)) {
2816 if (!zbc_zone_is_conv(zsp_end)) {
2817 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2818 LBA_OUT_OF_RANGE,
2819 WRITE_BOUNDARY_ASCQ);
2820 return check_condition_result;
2821 }
2822 return 0;
2823 }
2824
2825 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2826 /* Writes cannot cross sequential zone boundaries */
2827 if (zsp_end != zsp) {
2828 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2829 LBA_OUT_OF_RANGE,
2830 WRITE_BOUNDARY_ASCQ);
2831 return check_condition_result;
2832 }
2833 /* Cannot write full zones */
2834 if (zsp->z_cond == ZC5_FULL) {
2835 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2836 INVALID_FIELD_IN_CDB, 0);
2837 return check_condition_result;
2838 }
2839 /* Writes must be aligned to the zone WP */
2840 if (lba != zsp->z_wp) {
2841 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2842 LBA_OUT_OF_RANGE,
2843 UNALIGNED_WRITE_ASCQ);
2844 return check_condition_result;
2845 }
2846 }
2847
2848 /* Handle implicit open of closed and empty zones */
2849 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2850 if (devip->max_open &&
2851 devip->nr_exp_open >= devip->max_open) {
2852 mk_sense_buffer(scp, DATA_PROTECT,
2853 INSUFF_RES_ASC,
2854 INSUFF_ZONE_ASCQ);
2855 return check_condition_result;
2856 }
2857 zbc_open_zone(devip, zsp, false);
2858 }
2859
2860 return 0;
2861 }
2862
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2863 static inline int check_device_access_params
2864 (struct scsi_cmnd *scp, unsigned long long lba,
2865 unsigned int num, bool write)
2866 {
2867 struct scsi_device *sdp = scp->device;
2868 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2869
2870 if (lba + num > sdebug_capacity) {
2871 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2872 return check_condition_result;
2873 }
2874 /* transfer length excessive (tie in to block limits VPD page) */
2875 if (num > sdebug_store_sectors) {
2876 /* needs work to find which cdb byte 'num' comes from */
2877 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2878 return check_condition_result;
2879 }
2880 if (write && unlikely(sdebug_wp)) {
2881 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2882 return check_condition_result;
2883 }
2884 if (sdebug_dev_is_zoned(devip))
2885 return check_zbc_access_params(scp, lba, num, write);
2886
2887 return 0;
2888 }
2889
2890 /*
2891 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2892 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2893 * that access any of the "stores" in struct sdeb_store_info should call this
2894 * function with bug_if_fake_rw set to true.
2895 */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)2896 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2897 bool bug_if_fake_rw)
2898 {
2899 if (sdebug_fake_rw) {
2900 BUG_ON(bug_if_fake_rw); /* See note above */
2901 return NULL;
2902 }
2903 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2904 }
2905
2906 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,bool do_write)2907 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2908 u32 sg_skip, u64 lba, u32 num, bool do_write)
2909 {
2910 int ret;
2911 u64 block, rest = 0;
2912 enum dma_data_direction dir;
2913 struct scsi_data_buffer *sdb = &scp->sdb;
2914 u8 *fsp;
2915
2916 if (do_write) {
2917 dir = DMA_TO_DEVICE;
2918 write_since_sync = true;
2919 } else {
2920 dir = DMA_FROM_DEVICE;
2921 }
2922
2923 if (!sdb->length || !sip)
2924 return 0;
2925 if (scp->sc_data_direction != dir)
2926 return -1;
2927 fsp = sip->storep;
2928
2929 block = do_div(lba, sdebug_store_sectors);
2930 if (block + num > sdebug_store_sectors)
2931 rest = block + num - sdebug_store_sectors;
2932
2933 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2934 fsp + (block * sdebug_sector_size),
2935 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2936 if (ret != (num - rest) * sdebug_sector_size)
2937 return ret;
2938
2939 if (rest) {
2940 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2941 fsp, rest * sdebug_sector_size,
2942 sg_skip + ((num - rest) * sdebug_sector_size),
2943 do_write);
2944 }
2945
2946 return ret;
2947 }
2948
2949 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)2950 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2951 {
2952 struct scsi_data_buffer *sdb = &scp->sdb;
2953
2954 if (!sdb->length)
2955 return 0;
2956 if (scp->sc_data_direction != DMA_TO_DEVICE)
2957 return -1;
2958 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2959 num * sdebug_sector_size, 0, true);
2960 }
2961
2962 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2963 * arr into sip->storep+lba and return true. If comparison fails then
2964 * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)2965 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2966 const u8 *arr, bool compare_only)
2967 {
2968 bool res;
2969 u64 block, rest = 0;
2970 u32 store_blks = sdebug_store_sectors;
2971 u32 lb_size = sdebug_sector_size;
2972 u8 *fsp = sip->storep;
2973
2974 block = do_div(lba, store_blks);
2975 if (block + num > store_blks)
2976 rest = block + num - store_blks;
2977
2978 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2979 if (!res)
2980 return res;
2981 if (rest)
2982 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2983 rest * lb_size);
2984 if (!res)
2985 return res;
2986 if (compare_only)
2987 return true;
2988 arr += num * lb_size;
2989 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2990 if (rest)
2991 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2992 return res;
2993 }
2994
dif_compute_csum(const void * buf,int len)2995 static __be16 dif_compute_csum(const void *buf, int len)
2996 {
2997 __be16 csum;
2998
2999 if (sdebug_guard)
3000 csum = (__force __be16)ip_compute_csum(buf, len);
3001 else
3002 csum = cpu_to_be16(crc_t10dif(buf, len));
3003
3004 return csum;
3005 }
3006
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3007 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3008 sector_t sector, u32 ei_lba)
3009 {
3010 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3011
3012 if (sdt->guard_tag != csum) {
3013 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3014 (unsigned long)sector,
3015 be16_to_cpu(sdt->guard_tag),
3016 be16_to_cpu(csum));
3017 return 0x01;
3018 }
3019 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3020 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3021 pr_err("REF check failed on sector %lu\n",
3022 (unsigned long)sector);
3023 return 0x03;
3024 }
3025 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3026 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3027 pr_err("REF check failed on sector %lu\n",
3028 (unsigned long)sector);
3029 return 0x03;
3030 }
3031 return 0;
3032 }
3033
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3034 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3035 unsigned int sectors, bool read)
3036 {
3037 size_t resid;
3038 void *paddr;
3039 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3040 scp->device->hostdata, true);
3041 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3042 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3043 struct sg_mapping_iter miter;
3044
3045 /* Bytes of protection data to copy into sgl */
3046 resid = sectors * sizeof(*dif_storep);
3047
3048 sg_miter_start(&miter, scsi_prot_sglist(scp),
3049 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3050 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3051
3052 while (sg_miter_next(&miter) && resid > 0) {
3053 size_t len = min_t(size_t, miter.length, resid);
3054 void *start = dif_store(sip, sector);
3055 size_t rest = 0;
3056
3057 if (dif_store_end < start + len)
3058 rest = start + len - dif_store_end;
3059
3060 paddr = miter.addr;
3061
3062 if (read)
3063 memcpy(paddr, start, len - rest);
3064 else
3065 memcpy(start, paddr, len - rest);
3066
3067 if (rest) {
3068 if (read)
3069 memcpy(paddr + len - rest, dif_storep, rest);
3070 else
3071 memcpy(dif_storep, paddr + len - rest, rest);
3072 }
3073
3074 sector += len / sizeof(*dif_storep);
3075 resid -= len;
3076 }
3077 sg_miter_stop(&miter);
3078 }
3079
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3080 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3081 unsigned int sectors, u32 ei_lba)
3082 {
3083 unsigned int i;
3084 sector_t sector;
3085 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3086 scp->device->hostdata, true);
3087 struct t10_pi_tuple *sdt;
3088
3089 for (i = 0; i < sectors; i++, ei_lba++) {
3090 int ret;
3091
3092 sector = start_sec + i;
3093 sdt = dif_store(sip, sector);
3094
3095 if (sdt->app_tag == cpu_to_be16(0xffff))
3096 continue;
3097
3098 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3099 ei_lba);
3100 if (ret) {
3101 dif_errors++;
3102 return ret;
3103 }
3104 }
3105
3106 dif_copy_prot(scp, start_sec, sectors, true);
3107 dix_reads++;
3108
3109 return 0;
3110 }
3111
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3112 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3113 {
3114 bool check_prot;
3115 u32 num;
3116 u32 ei_lba;
3117 int ret;
3118 u64 lba;
3119 struct sdeb_store_info *sip = devip2sip(devip, true);
3120 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3121 u8 *cmd = scp->cmnd;
3122
3123 switch (cmd[0]) {
3124 case READ_16:
3125 ei_lba = 0;
3126 lba = get_unaligned_be64(cmd + 2);
3127 num = get_unaligned_be32(cmd + 10);
3128 check_prot = true;
3129 break;
3130 case READ_10:
3131 ei_lba = 0;
3132 lba = get_unaligned_be32(cmd + 2);
3133 num = get_unaligned_be16(cmd + 7);
3134 check_prot = true;
3135 break;
3136 case READ_6:
3137 ei_lba = 0;
3138 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3139 (u32)(cmd[1] & 0x1f) << 16;
3140 num = (0 == cmd[4]) ? 256 : cmd[4];
3141 check_prot = true;
3142 break;
3143 case READ_12:
3144 ei_lba = 0;
3145 lba = get_unaligned_be32(cmd + 2);
3146 num = get_unaligned_be32(cmd + 6);
3147 check_prot = true;
3148 break;
3149 case XDWRITEREAD_10:
3150 ei_lba = 0;
3151 lba = get_unaligned_be32(cmd + 2);
3152 num = get_unaligned_be16(cmd + 7);
3153 check_prot = false;
3154 break;
3155 default: /* assume READ(32) */
3156 lba = get_unaligned_be64(cmd + 12);
3157 ei_lba = get_unaligned_be32(cmd + 20);
3158 num = get_unaligned_be32(cmd + 28);
3159 check_prot = false;
3160 break;
3161 }
3162 if (unlikely(have_dif_prot && check_prot)) {
3163 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3164 (cmd[1] & 0xe0)) {
3165 mk_sense_invalid_opcode(scp);
3166 return check_condition_result;
3167 }
3168 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3169 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3170 (cmd[1] & 0xe0) == 0)
3171 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3172 "to DIF device\n");
3173 }
3174 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3175 atomic_read(&sdeb_inject_pending))) {
3176 num /= 2;
3177 atomic_set(&sdeb_inject_pending, 0);
3178 }
3179
3180 ret = check_device_access_params(scp, lba, num, false);
3181 if (ret)
3182 return ret;
3183 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3184 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3185 ((lba + num) > sdebug_medium_error_start))) {
3186 /* claim unrecoverable read error */
3187 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3188 /* set info field and valid bit for fixed descriptor */
3189 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3190 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3191 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3192 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3193 put_unaligned_be32(ret, scp->sense_buffer + 3);
3194 }
3195 scsi_set_resid(scp, scsi_bufflen(scp));
3196 return check_condition_result;
3197 }
3198
3199 read_lock(macc_lckp);
3200
3201 /* DIX + T10 DIF */
3202 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3203 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3204
3205 if (prot_ret) {
3206 read_unlock(macc_lckp);
3207 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3208 return illegal_condition_result;
3209 }
3210 }
3211
3212 ret = do_device_access(sip, scp, 0, lba, num, false);
3213 read_unlock(macc_lckp);
3214 if (unlikely(ret == -1))
3215 return DID_ERROR << 16;
3216
3217 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3218
3219 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3220 atomic_read(&sdeb_inject_pending))) {
3221 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3222 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3223 atomic_set(&sdeb_inject_pending, 0);
3224 return check_condition_result;
3225 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3226 /* Logical block guard check failed */
3227 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3228 atomic_set(&sdeb_inject_pending, 0);
3229 return illegal_condition_result;
3230 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3231 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3232 atomic_set(&sdeb_inject_pending, 0);
3233 return illegal_condition_result;
3234 }
3235 }
3236 return 0;
3237 }
3238
dump_sector(unsigned char * buf,int len)3239 static void dump_sector(unsigned char *buf, int len)
3240 {
3241 int i, j, n;
3242
3243 pr_err(">>> Sector Dump <<<\n");
3244 for (i = 0 ; i < len ; i += 16) {
3245 char b[128];
3246
3247 for (j = 0, n = 0; j < 16; j++) {
3248 unsigned char c = buf[i+j];
3249
3250 if (c >= 0x20 && c < 0x7e)
3251 n += scnprintf(b + n, sizeof(b) - n,
3252 " %c ", buf[i+j]);
3253 else
3254 n += scnprintf(b + n, sizeof(b) - n,
3255 "%02x ", buf[i+j]);
3256 }
3257 pr_err("%04d: %s\n", i, b);
3258 }
3259 }
3260
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)3261 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3262 unsigned int sectors, u32 ei_lba)
3263 {
3264 int ret;
3265 struct t10_pi_tuple *sdt;
3266 void *daddr;
3267 sector_t sector = start_sec;
3268 int ppage_offset;
3269 int dpage_offset;
3270 struct sg_mapping_iter diter;
3271 struct sg_mapping_iter piter;
3272
3273 BUG_ON(scsi_sg_count(SCpnt) == 0);
3274 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3275
3276 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3277 scsi_prot_sg_count(SCpnt),
3278 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3279 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3280 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3281
3282 /* For each protection page */
3283 while (sg_miter_next(&piter)) {
3284 dpage_offset = 0;
3285 if (WARN_ON(!sg_miter_next(&diter))) {
3286 ret = 0x01;
3287 goto out;
3288 }
3289
3290 for (ppage_offset = 0; ppage_offset < piter.length;
3291 ppage_offset += sizeof(struct t10_pi_tuple)) {
3292 /* If we're at the end of the current
3293 * data page advance to the next one
3294 */
3295 if (dpage_offset >= diter.length) {
3296 if (WARN_ON(!sg_miter_next(&diter))) {
3297 ret = 0x01;
3298 goto out;
3299 }
3300 dpage_offset = 0;
3301 }
3302
3303 sdt = piter.addr + ppage_offset;
3304 daddr = diter.addr + dpage_offset;
3305
3306 ret = dif_verify(sdt, daddr, sector, ei_lba);
3307 if (ret) {
3308 dump_sector(daddr, sdebug_sector_size);
3309 goto out;
3310 }
3311
3312 sector++;
3313 ei_lba++;
3314 dpage_offset += sdebug_sector_size;
3315 }
3316 diter.consumed = dpage_offset;
3317 sg_miter_stop(&diter);
3318 }
3319 sg_miter_stop(&piter);
3320
3321 dif_copy_prot(SCpnt, start_sec, sectors, false);
3322 dix_writes++;
3323
3324 return 0;
3325
3326 out:
3327 dif_errors++;
3328 sg_miter_stop(&diter);
3329 sg_miter_stop(&piter);
3330 return ret;
3331 }
3332
lba_to_map_index(sector_t lba)3333 static unsigned long lba_to_map_index(sector_t lba)
3334 {
3335 if (sdebug_unmap_alignment)
3336 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3337 sector_div(lba, sdebug_unmap_granularity);
3338 return lba;
3339 }
3340
map_index_to_lba(unsigned long index)3341 static sector_t map_index_to_lba(unsigned long index)
3342 {
3343 sector_t lba = index * sdebug_unmap_granularity;
3344
3345 if (sdebug_unmap_alignment)
3346 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3347 return lba;
3348 }
3349
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)3350 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3351 unsigned int *num)
3352 {
3353 sector_t end;
3354 unsigned int mapped;
3355 unsigned long index;
3356 unsigned long next;
3357
3358 index = lba_to_map_index(lba);
3359 mapped = test_bit(index, sip->map_storep);
3360
3361 if (mapped)
3362 next = find_next_zero_bit(sip->map_storep, map_size, index);
3363 else
3364 next = find_next_bit(sip->map_storep, map_size, index);
3365
3366 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3367 *num = end - lba;
3368 return mapped;
3369 }
3370
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3371 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3372 unsigned int len)
3373 {
3374 sector_t end = lba + len;
3375
3376 while (lba < end) {
3377 unsigned long index = lba_to_map_index(lba);
3378
3379 if (index < map_size)
3380 set_bit(index, sip->map_storep);
3381
3382 lba = map_index_to_lba(index + 1);
3383 }
3384 }
3385
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3386 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3387 unsigned int len)
3388 {
3389 sector_t end = lba + len;
3390 u8 *fsp = sip->storep;
3391
3392 while (lba < end) {
3393 unsigned long index = lba_to_map_index(lba);
3394
3395 if (lba == map_index_to_lba(index) &&
3396 lba + sdebug_unmap_granularity <= end &&
3397 index < map_size) {
3398 clear_bit(index, sip->map_storep);
3399 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3400 memset(fsp + lba * sdebug_sector_size,
3401 (sdebug_lbprz & 1) ? 0 : 0xff,
3402 sdebug_sector_size *
3403 sdebug_unmap_granularity);
3404 }
3405 if (sip->dif_storep) {
3406 memset(sip->dif_storep + lba, 0xff,
3407 sizeof(*sip->dif_storep) *
3408 sdebug_unmap_granularity);
3409 }
3410 }
3411 lba = map_index_to_lba(index + 1);
3412 }
3413 }
3414
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3415 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3416 {
3417 bool check_prot;
3418 u32 num;
3419 u32 ei_lba;
3420 int ret;
3421 u64 lba;
3422 struct sdeb_store_info *sip = devip2sip(devip, true);
3423 rwlock_t *macc_lckp = &sip->macc_lck;
3424 u8 *cmd = scp->cmnd;
3425
3426 switch (cmd[0]) {
3427 case WRITE_16:
3428 ei_lba = 0;
3429 lba = get_unaligned_be64(cmd + 2);
3430 num = get_unaligned_be32(cmd + 10);
3431 check_prot = true;
3432 break;
3433 case WRITE_10:
3434 ei_lba = 0;
3435 lba = get_unaligned_be32(cmd + 2);
3436 num = get_unaligned_be16(cmd + 7);
3437 check_prot = true;
3438 break;
3439 case WRITE_6:
3440 ei_lba = 0;
3441 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3442 (u32)(cmd[1] & 0x1f) << 16;
3443 num = (0 == cmd[4]) ? 256 : cmd[4];
3444 check_prot = true;
3445 break;
3446 case WRITE_12:
3447 ei_lba = 0;
3448 lba = get_unaligned_be32(cmd + 2);
3449 num = get_unaligned_be32(cmd + 6);
3450 check_prot = true;
3451 break;
3452 case 0x53: /* XDWRITEREAD(10) */
3453 ei_lba = 0;
3454 lba = get_unaligned_be32(cmd + 2);
3455 num = get_unaligned_be16(cmd + 7);
3456 check_prot = false;
3457 break;
3458 default: /* assume WRITE(32) */
3459 lba = get_unaligned_be64(cmd + 12);
3460 ei_lba = get_unaligned_be32(cmd + 20);
3461 num = get_unaligned_be32(cmd + 28);
3462 check_prot = false;
3463 break;
3464 }
3465 if (unlikely(have_dif_prot && check_prot)) {
3466 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3467 (cmd[1] & 0xe0)) {
3468 mk_sense_invalid_opcode(scp);
3469 return check_condition_result;
3470 }
3471 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3472 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3473 (cmd[1] & 0xe0) == 0)
3474 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3475 "to DIF device\n");
3476 }
3477
3478 write_lock(macc_lckp);
3479 ret = check_device_access_params(scp, lba, num, true);
3480 if (ret) {
3481 write_unlock(macc_lckp);
3482 return ret;
3483 }
3484
3485 /* DIX + T10 DIF */
3486 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3487 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3488
3489 if (prot_ret) {
3490 write_unlock(macc_lckp);
3491 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3492 return illegal_condition_result;
3493 }
3494 }
3495
3496 ret = do_device_access(sip, scp, 0, lba, num, true);
3497 if (unlikely(scsi_debug_lbp()))
3498 map_region(sip, lba, num);
3499 /* If ZBC zone then bump its write pointer */
3500 if (sdebug_dev_is_zoned(devip))
3501 zbc_inc_wp(devip, lba, num);
3502 write_unlock(macc_lckp);
3503 if (unlikely(-1 == ret))
3504 return DID_ERROR << 16;
3505 else if (unlikely(sdebug_verbose &&
3506 (ret < (num * sdebug_sector_size))))
3507 sdev_printk(KERN_INFO, scp->device,
3508 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3509 my_name, num * sdebug_sector_size, ret);
3510
3511 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3512 atomic_read(&sdeb_inject_pending))) {
3513 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3514 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3515 atomic_set(&sdeb_inject_pending, 0);
3516 return check_condition_result;
3517 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3518 /* Logical block guard check failed */
3519 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3520 atomic_set(&sdeb_inject_pending, 0);
3521 return illegal_condition_result;
3522 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3523 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3524 atomic_set(&sdeb_inject_pending, 0);
3525 return illegal_condition_result;
3526 }
3527 }
3528 return 0;
3529 }
3530
3531 /*
3532 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3533 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3534 */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3535 static int resp_write_scat(struct scsi_cmnd *scp,
3536 struct sdebug_dev_info *devip)
3537 {
3538 u8 *cmd = scp->cmnd;
3539 u8 *lrdp = NULL;
3540 u8 *up;
3541 struct sdeb_store_info *sip = devip2sip(devip, true);
3542 rwlock_t *macc_lckp = &sip->macc_lck;
3543 u8 wrprotect;
3544 u16 lbdof, num_lrd, k;
3545 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3546 u32 lb_size = sdebug_sector_size;
3547 u32 ei_lba;
3548 u64 lba;
3549 int ret, res;
3550 bool is_16;
3551 static const u32 lrd_size = 32; /* + parameter list header size */
3552
3553 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3554 is_16 = false;
3555 wrprotect = (cmd[10] >> 5) & 0x7;
3556 lbdof = get_unaligned_be16(cmd + 12);
3557 num_lrd = get_unaligned_be16(cmd + 16);
3558 bt_len = get_unaligned_be32(cmd + 28);
3559 } else { /* that leaves WRITE SCATTERED(16) */
3560 is_16 = true;
3561 wrprotect = (cmd[2] >> 5) & 0x7;
3562 lbdof = get_unaligned_be16(cmd + 4);
3563 num_lrd = get_unaligned_be16(cmd + 8);
3564 bt_len = get_unaligned_be32(cmd + 10);
3565 if (unlikely(have_dif_prot)) {
3566 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3567 wrprotect) {
3568 mk_sense_invalid_opcode(scp);
3569 return illegal_condition_result;
3570 }
3571 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3572 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3573 wrprotect == 0)
3574 sdev_printk(KERN_ERR, scp->device,
3575 "Unprotected WR to DIF device\n");
3576 }
3577 }
3578 if ((num_lrd == 0) || (bt_len == 0))
3579 return 0; /* T10 says these do-nothings are not errors */
3580 if (lbdof == 0) {
3581 if (sdebug_verbose)
3582 sdev_printk(KERN_INFO, scp->device,
3583 "%s: %s: LB Data Offset field bad\n",
3584 my_name, __func__);
3585 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3586 return illegal_condition_result;
3587 }
3588 lbdof_blen = lbdof * lb_size;
3589 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3590 if (sdebug_verbose)
3591 sdev_printk(KERN_INFO, scp->device,
3592 "%s: %s: LBA range descriptors don't fit\n",
3593 my_name, __func__);
3594 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3595 return illegal_condition_result;
3596 }
3597 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3598 if (lrdp == NULL)
3599 return SCSI_MLQUEUE_HOST_BUSY;
3600 if (sdebug_verbose)
3601 sdev_printk(KERN_INFO, scp->device,
3602 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3603 my_name, __func__, lbdof_blen);
3604 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3605 if (res == -1) {
3606 ret = DID_ERROR << 16;
3607 goto err_out;
3608 }
3609
3610 write_lock(macc_lckp);
3611 sg_off = lbdof_blen;
3612 /* Spec says Buffer xfer Length field in number of LBs in dout */
3613 cum_lb = 0;
3614 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3615 lba = get_unaligned_be64(up + 0);
3616 num = get_unaligned_be32(up + 8);
3617 if (sdebug_verbose)
3618 sdev_printk(KERN_INFO, scp->device,
3619 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3620 my_name, __func__, k, lba, num, sg_off);
3621 if (num == 0)
3622 continue;
3623 ret = check_device_access_params(scp, lba, num, true);
3624 if (ret)
3625 goto err_out_unlock;
3626 num_by = num * lb_size;
3627 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3628
3629 if ((cum_lb + num) > bt_len) {
3630 if (sdebug_verbose)
3631 sdev_printk(KERN_INFO, scp->device,
3632 "%s: %s: sum of blocks > data provided\n",
3633 my_name, __func__);
3634 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3635 0);
3636 ret = illegal_condition_result;
3637 goto err_out_unlock;
3638 }
3639
3640 /* DIX + T10 DIF */
3641 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3642 int prot_ret = prot_verify_write(scp, lba, num,
3643 ei_lba);
3644
3645 if (prot_ret) {
3646 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3647 prot_ret);
3648 ret = illegal_condition_result;
3649 goto err_out_unlock;
3650 }
3651 }
3652
3653 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3654 /* If ZBC zone then bump its write pointer */
3655 if (sdebug_dev_is_zoned(devip))
3656 zbc_inc_wp(devip, lba, num);
3657 if (unlikely(scsi_debug_lbp()))
3658 map_region(sip, lba, num);
3659 if (unlikely(-1 == ret)) {
3660 ret = DID_ERROR << 16;
3661 goto err_out_unlock;
3662 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3663 sdev_printk(KERN_INFO, scp->device,
3664 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3665 my_name, num_by, ret);
3666
3667 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3668 atomic_read(&sdeb_inject_pending))) {
3669 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3670 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3671 atomic_set(&sdeb_inject_pending, 0);
3672 ret = check_condition_result;
3673 goto err_out_unlock;
3674 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3675 /* Logical block guard check failed */
3676 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3677 atomic_set(&sdeb_inject_pending, 0);
3678 ret = illegal_condition_result;
3679 goto err_out_unlock;
3680 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3681 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3682 atomic_set(&sdeb_inject_pending, 0);
3683 ret = illegal_condition_result;
3684 goto err_out_unlock;
3685 }
3686 }
3687 sg_off += num_by;
3688 cum_lb += num;
3689 }
3690 ret = 0;
3691 err_out_unlock:
3692 write_unlock(macc_lckp);
3693 err_out:
3694 kfree(lrdp);
3695 return ret;
3696 }
3697
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)3698 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3699 u32 ei_lba, bool unmap, bool ndob)
3700 {
3701 struct scsi_device *sdp = scp->device;
3702 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3703 unsigned long long i;
3704 u64 block, lbaa;
3705 u32 lb_size = sdebug_sector_size;
3706 int ret;
3707 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3708 scp->device->hostdata, true);
3709 rwlock_t *macc_lckp = &sip->macc_lck;
3710 u8 *fs1p;
3711 u8 *fsp;
3712
3713 write_lock(macc_lckp);
3714
3715 ret = check_device_access_params(scp, lba, num, true);
3716 if (ret) {
3717 write_unlock(macc_lckp);
3718 return ret;
3719 }
3720
3721 if (unmap && scsi_debug_lbp()) {
3722 unmap_region(sip, lba, num);
3723 goto out;
3724 }
3725 lbaa = lba;
3726 block = do_div(lbaa, sdebug_store_sectors);
3727 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3728 fsp = sip->storep;
3729 fs1p = fsp + (block * lb_size);
3730 if (ndob) {
3731 memset(fs1p, 0, lb_size);
3732 ret = 0;
3733 } else
3734 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3735
3736 if (-1 == ret) {
3737 write_unlock(&sip->macc_lck);
3738 return DID_ERROR << 16;
3739 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3740 sdev_printk(KERN_INFO, scp->device,
3741 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3742 my_name, "write same", lb_size, ret);
3743
3744 /* Copy first sector to remaining blocks */
3745 for (i = 1 ; i < num ; i++) {
3746 lbaa = lba + i;
3747 block = do_div(lbaa, sdebug_store_sectors);
3748 memmove(fsp + (block * lb_size), fs1p, lb_size);
3749 }
3750 if (scsi_debug_lbp())
3751 map_region(sip, lba, num);
3752 /* If ZBC zone then bump its write pointer */
3753 if (sdebug_dev_is_zoned(devip))
3754 zbc_inc_wp(devip, lba, num);
3755 out:
3756 write_unlock(macc_lckp);
3757
3758 return 0;
3759 }
3760
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3761 static int resp_write_same_10(struct scsi_cmnd *scp,
3762 struct sdebug_dev_info *devip)
3763 {
3764 u8 *cmd = scp->cmnd;
3765 u32 lba;
3766 u16 num;
3767 u32 ei_lba = 0;
3768 bool unmap = false;
3769
3770 if (cmd[1] & 0x8) {
3771 if (sdebug_lbpws10 == 0) {
3772 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3773 return check_condition_result;
3774 } else
3775 unmap = true;
3776 }
3777 lba = get_unaligned_be32(cmd + 2);
3778 num = get_unaligned_be16(cmd + 7);
3779 if (num > sdebug_write_same_length) {
3780 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3781 return check_condition_result;
3782 }
3783 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3784 }
3785
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3786 static int resp_write_same_16(struct scsi_cmnd *scp,
3787 struct sdebug_dev_info *devip)
3788 {
3789 u8 *cmd = scp->cmnd;
3790 u64 lba;
3791 u32 num;
3792 u32 ei_lba = 0;
3793 bool unmap = false;
3794 bool ndob = false;
3795
3796 if (cmd[1] & 0x8) { /* UNMAP */
3797 if (sdebug_lbpws == 0) {
3798 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3799 return check_condition_result;
3800 } else
3801 unmap = true;
3802 }
3803 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3804 ndob = true;
3805 lba = get_unaligned_be64(cmd + 2);
3806 num = get_unaligned_be32(cmd + 10);
3807 if (num > sdebug_write_same_length) {
3808 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3809 return check_condition_result;
3810 }
3811 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3812 }
3813
3814 /* Note the mode field is in the same position as the (lower) service action
3815 * field. For the Report supported operation codes command, SPC-4 suggests
3816 * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3817 static int resp_write_buffer(struct scsi_cmnd *scp,
3818 struct sdebug_dev_info *devip)
3819 {
3820 u8 *cmd = scp->cmnd;
3821 struct scsi_device *sdp = scp->device;
3822 struct sdebug_dev_info *dp;
3823 u8 mode;
3824
3825 mode = cmd[1] & 0x1f;
3826 switch (mode) {
3827 case 0x4: /* download microcode (MC) and activate (ACT) */
3828 /* set UAs on this device only */
3829 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3830 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3831 break;
3832 case 0x5: /* download MC, save and ACT */
3833 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3834 break;
3835 case 0x6: /* download MC with offsets and ACT */
3836 /* set UAs on most devices (LUs) in this target */
3837 list_for_each_entry(dp,
3838 &devip->sdbg_host->dev_info_list,
3839 dev_list)
3840 if (dp->target == sdp->id) {
3841 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3842 if (devip != dp)
3843 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3844 dp->uas_bm);
3845 }
3846 break;
3847 case 0x7: /* download MC with offsets, save, and ACT */
3848 /* set UA on all devices (LUs) in this target */
3849 list_for_each_entry(dp,
3850 &devip->sdbg_host->dev_info_list,
3851 dev_list)
3852 if (dp->target == sdp->id)
3853 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3854 dp->uas_bm);
3855 break;
3856 default:
3857 /* do nothing for this command for other mode values */
3858 break;
3859 }
3860 return 0;
3861 }
3862
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3863 static int resp_comp_write(struct scsi_cmnd *scp,
3864 struct sdebug_dev_info *devip)
3865 {
3866 u8 *cmd = scp->cmnd;
3867 u8 *arr;
3868 struct sdeb_store_info *sip = devip2sip(devip, true);
3869 rwlock_t *macc_lckp = &sip->macc_lck;
3870 u64 lba;
3871 u32 dnum;
3872 u32 lb_size = sdebug_sector_size;
3873 u8 num;
3874 int ret;
3875 int retval = 0;
3876
3877 lba = get_unaligned_be64(cmd + 2);
3878 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3879 if (0 == num)
3880 return 0; /* degenerate case, not an error */
3881 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3882 (cmd[1] & 0xe0)) {
3883 mk_sense_invalid_opcode(scp);
3884 return check_condition_result;
3885 }
3886 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3887 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3888 (cmd[1] & 0xe0) == 0)
3889 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3890 "to DIF device\n");
3891 ret = check_device_access_params(scp, lba, num, false);
3892 if (ret)
3893 return ret;
3894 dnum = 2 * num;
3895 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3896 if (NULL == arr) {
3897 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3898 INSUFF_RES_ASCQ);
3899 return check_condition_result;
3900 }
3901
3902 write_lock(macc_lckp);
3903
3904 ret = do_dout_fetch(scp, dnum, arr);
3905 if (ret == -1) {
3906 retval = DID_ERROR << 16;
3907 goto cleanup;
3908 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3909 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3910 "indicated=%u, IO sent=%d bytes\n", my_name,
3911 dnum * lb_size, ret);
3912 if (!comp_write_worker(sip, lba, num, arr, false)) {
3913 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3914 retval = check_condition_result;
3915 goto cleanup;
3916 }
3917 if (scsi_debug_lbp())
3918 map_region(sip, lba, num);
3919 cleanup:
3920 write_unlock(macc_lckp);
3921 kfree(arr);
3922 return retval;
3923 }
3924
3925 struct unmap_block_desc {
3926 __be64 lba;
3927 __be32 blocks;
3928 __be32 __reserved;
3929 };
3930
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3931 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3932 {
3933 unsigned char *buf;
3934 struct unmap_block_desc *desc;
3935 struct sdeb_store_info *sip = devip2sip(devip, true);
3936 rwlock_t *macc_lckp = &sip->macc_lck;
3937 unsigned int i, payload_len, descriptors;
3938 int ret;
3939
3940 if (!scsi_debug_lbp())
3941 return 0; /* fib and say its done */
3942 payload_len = get_unaligned_be16(scp->cmnd + 7);
3943 BUG_ON(scsi_bufflen(scp) != payload_len);
3944
3945 descriptors = (payload_len - 8) / 16;
3946 if (descriptors > sdebug_unmap_max_desc) {
3947 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3948 return check_condition_result;
3949 }
3950
3951 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3952 if (!buf) {
3953 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3954 INSUFF_RES_ASCQ);
3955 return check_condition_result;
3956 }
3957
3958 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3959
3960 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3961 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3962
3963 desc = (void *)&buf[8];
3964
3965 write_lock(macc_lckp);
3966
3967 for (i = 0 ; i < descriptors ; i++) {
3968 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3969 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3970
3971 ret = check_device_access_params(scp, lba, num, true);
3972 if (ret)
3973 goto out;
3974
3975 unmap_region(sip, lba, num);
3976 }
3977
3978 ret = 0;
3979
3980 out:
3981 write_unlock(macc_lckp);
3982 kfree(buf);
3983
3984 return ret;
3985 }
3986
3987 #define SDEBUG_GET_LBA_STATUS_LEN 32
3988
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3989 static int resp_get_lba_status(struct scsi_cmnd *scp,
3990 struct sdebug_dev_info *devip)
3991 {
3992 u8 *cmd = scp->cmnd;
3993 u64 lba;
3994 u32 alloc_len, mapped, num;
3995 int ret;
3996 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3997
3998 lba = get_unaligned_be64(cmd + 2);
3999 alloc_len = get_unaligned_be32(cmd + 10);
4000
4001 if (alloc_len < 24)
4002 return 0;
4003
4004 ret = check_device_access_params(scp, lba, 1, false);
4005 if (ret)
4006 return ret;
4007
4008 if (scsi_debug_lbp()) {
4009 struct sdeb_store_info *sip = devip2sip(devip, true);
4010
4011 mapped = map_state(sip, lba, &num);
4012 } else {
4013 mapped = 1;
4014 /* following just in case virtual_gb changed */
4015 sdebug_capacity = get_sdebug_capacity();
4016 if (sdebug_capacity - lba <= 0xffffffff)
4017 num = sdebug_capacity - lba;
4018 else
4019 num = 0xffffffff;
4020 }
4021
4022 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4023 put_unaligned_be32(20, arr); /* Parameter Data Length */
4024 put_unaligned_be64(lba, arr + 8); /* LBA */
4025 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4026 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4027
4028 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4029 }
4030
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4031 static int resp_sync_cache(struct scsi_cmnd *scp,
4032 struct sdebug_dev_info *devip)
4033 {
4034 int res = 0;
4035 u64 lba;
4036 u32 num_blocks;
4037 u8 *cmd = scp->cmnd;
4038
4039 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4040 lba = get_unaligned_be32(cmd + 2);
4041 num_blocks = get_unaligned_be16(cmd + 7);
4042 } else { /* SYNCHRONIZE_CACHE(16) */
4043 lba = get_unaligned_be64(cmd + 2);
4044 num_blocks = get_unaligned_be32(cmd + 10);
4045 }
4046 if (lba + num_blocks > sdebug_capacity) {
4047 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4048 return check_condition_result;
4049 }
4050 if (!write_since_sync || (cmd[1] & 0x2))
4051 res = SDEG_RES_IMMED_MASK;
4052 else /* delay if write_since_sync and IMMED clear */
4053 write_since_sync = false;
4054 return res;
4055 }
4056
4057 /*
4058 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4059 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4060 * a GOOD status otherwise. Model a disk with a big cache and yield
4061 * CONDITION MET. Actually tries to bring range in main memory into the
4062 * cache associated with the CPU(s).
4063 */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4064 static int resp_pre_fetch(struct scsi_cmnd *scp,
4065 struct sdebug_dev_info *devip)
4066 {
4067 int res = 0;
4068 u64 lba;
4069 u64 block, rest = 0;
4070 u32 nblks;
4071 u8 *cmd = scp->cmnd;
4072 struct sdeb_store_info *sip = devip2sip(devip, true);
4073 rwlock_t *macc_lckp = &sip->macc_lck;
4074 u8 *fsp = sip->storep;
4075
4076 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4077 lba = get_unaligned_be32(cmd + 2);
4078 nblks = get_unaligned_be16(cmd + 7);
4079 } else { /* PRE-FETCH(16) */
4080 lba = get_unaligned_be64(cmd + 2);
4081 nblks = get_unaligned_be32(cmd + 10);
4082 }
4083 if (lba + nblks > sdebug_capacity) {
4084 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4085 return check_condition_result;
4086 }
4087 if (!fsp)
4088 goto fini;
4089 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4090 block = do_div(lba, sdebug_store_sectors);
4091 if (block + nblks > sdebug_store_sectors)
4092 rest = block + nblks - sdebug_store_sectors;
4093
4094 /* Try to bring the PRE-FETCH range into CPU's cache */
4095 read_lock(macc_lckp);
4096 prefetch_range(fsp + (sdebug_sector_size * block),
4097 (nblks - rest) * sdebug_sector_size);
4098 if (rest)
4099 prefetch_range(fsp, rest * sdebug_sector_size);
4100 read_unlock(macc_lckp);
4101 fini:
4102 if (cmd[1] & 0x2)
4103 res = SDEG_RES_IMMED_MASK;
4104 return res | condition_met_result;
4105 }
4106
4107 #define RL_BUCKET_ELEMS 8
4108
4109 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4110 * (W-LUN), the normal Linux scanning logic does not associate it with a
4111 * device (e.g. /dev/sg7). The following magic will make that association:
4112 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4113 * where <n> is a host number. If there are multiple targets in a host then
4114 * the above will associate a W-LUN to each target. To only get a W-LUN
4115 * for target 2, then use "echo '- 2 49409' > scan" .
4116 */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4117 static int resp_report_luns(struct scsi_cmnd *scp,
4118 struct sdebug_dev_info *devip)
4119 {
4120 unsigned char *cmd = scp->cmnd;
4121 unsigned int alloc_len;
4122 unsigned char select_report;
4123 u64 lun;
4124 struct scsi_lun *lun_p;
4125 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4126 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4127 unsigned int wlun_cnt; /* report luns W-LUN count */
4128 unsigned int tlun_cnt; /* total LUN count */
4129 unsigned int rlen; /* response length (in bytes) */
4130 int k, j, n, res;
4131 unsigned int off_rsp = 0;
4132 const int sz_lun = sizeof(struct scsi_lun);
4133
4134 clear_luns_changed_on_target(devip);
4135
4136 select_report = cmd[2];
4137 alloc_len = get_unaligned_be32(cmd + 6);
4138
4139 if (alloc_len < 4) {
4140 pr_err("alloc len too small %d\n", alloc_len);
4141 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4142 return check_condition_result;
4143 }
4144
4145 switch (select_report) {
4146 case 0: /* all LUNs apart from W-LUNs */
4147 lun_cnt = sdebug_max_luns;
4148 wlun_cnt = 0;
4149 break;
4150 case 1: /* only W-LUNs */
4151 lun_cnt = 0;
4152 wlun_cnt = 1;
4153 break;
4154 case 2: /* all LUNs */
4155 lun_cnt = sdebug_max_luns;
4156 wlun_cnt = 1;
4157 break;
4158 case 0x10: /* only administrative LUs */
4159 case 0x11: /* see SPC-5 */
4160 case 0x12: /* only subsiduary LUs owned by referenced LU */
4161 default:
4162 pr_debug("select report invalid %d\n", select_report);
4163 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4164 return check_condition_result;
4165 }
4166
4167 if (sdebug_no_lun_0 && (lun_cnt > 0))
4168 --lun_cnt;
4169
4170 tlun_cnt = lun_cnt + wlun_cnt;
4171 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4172 scsi_set_resid(scp, scsi_bufflen(scp));
4173 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4174 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4175
4176 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4177 lun = sdebug_no_lun_0 ? 1 : 0;
4178 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4179 memset(arr, 0, sizeof(arr));
4180 lun_p = (struct scsi_lun *)&arr[0];
4181 if (k == 0) {
4182 put_unaligned_be32(rlen, &arr[0]);
4183 ++lun_p;
4184 j = 1;
4185 }
4186 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4187 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4188 break;
4189 int_to_scsilun(lun++, lun_p);
4190 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4191 lun_p->scsi_lun[0] |= 0x40;
4192 }
4193 if (j < RL_BUCKET_ELEMS)
4194 break;
4195 n = j * sz_lun;
4196 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4197 if (res)
4198 return res;
4199 off_rsp += n;
4200 }
4201 if (wlun_cnt) {
4202 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4203 ++j;
4204 }
4205 if (j > 0)
4206 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4207 return res;
4208 }
4209
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4210 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4211 {
4212 bool is_bytchk3 = false;
4213 u8 bytchk;
4214 int ret, j;
4215 u32 vnum, a_num, off;
4216 const u32 lb_size = sdebug_sector_size;
4217 u64 lba;
4218 u8 *arr;
4219 u8 *cmd = scp->cmnd;
4220 struct sdeb_store_info *sip = devip2sip(devip, true);
4221 rwlock_t *macc_lckp = &sip->macc_lck;
4222
4223 bytchk = (cmd[1] >> 1) & 0x3;
4224 if (bytchk == 0) {
4225 return 0; /* always claim internal verify okay */
4226 } else if (bytchk == 2) {
4227 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4228 return check_condition_result;
4229 } else if (bytchk == 3) {
4230 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4231 }
4232 switch (cmd[0]) {
4233 case VERIFY_16:
4234 lba = get_unaligned_be64(cmd + 2);
4235 vnum = get_unaligned_be32(cmd + 10);
4236 break;
4237 case VERIFY: /* is VERIFY(10) */
4238 lba = get_unaligned_be32(cmd + 2);
4239 vnum = get_unaligned_be16(cmd + 7);
4240 break;
4241 default:
4242 mk_sense_invalid_opcode(scp);
4243 return check_condition_result;
4244 }
4245 if (vnum == 0)
4246 return 0; /* not an error */
4247 a_num = is_bytchk3 ? 1 : vnum;
4248 /* Treat following check like one for read (i.e. no write) access */
4249 ret = check_device_access_params(scp, lba, a_num, false);
4250 if (ret)
4251 return ret;
4252
4253 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4254 if (!arr) {
4255 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4256 INSUFF_RES_ASCQ);
4257 return check_condition_result;
4258 }
4259 /* Not changing store, so only need read access */
4260 read_lock(macc_lckp);
4261
4262 ret = do_dout_fetch(scp, a_num, arr);
4263 if (ret == -1) {
4264 ret = DID_ERROR << 16;
4265 goto cleanup;
4266 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4267 sdev_printk(KERN_INFO, scp->device,
4268 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4269 my_name, __func__, a_num * lb_size, ret);
4270 }
4271 if (is_bytchk3) {
4272 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4273 memcpy(arr + off, arr, lb_size);
4274 }
4275 ret = 0;
4276 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4277 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4278 ret = check_condition_result;
4279 goto cleanup;
4280 }
4281 cleanup:
4282 read_unlock(macc_lckp);
4283 kfree(arr);
4284 return ret;
4285 }
4286
4287 #define RZONES_DESC_HD 64
4288
4289 /* Report zones depending on start LBA nad reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4290 static int resp_report_zones(struct scsi_cmnd *scp,
4291 struct sdebug_dev_info *devip)
4292 {
4293 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4294 int ret = 0;
4295 u32 alloc_len, rep_opts, rep_len;
4296 bool partial;
4297 u64 lba, zs_lba;
4298 u8 *arr = NULL, *desc;
4299 u8 *cmd = scp->cmnd;
4300 struct sdeb_zone_state *zsp;
4301 struct sdeb_store_info *sip = devip2sip(devip, false);
4302 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4303
4304 if (!sdebug_dev_is_zoned(devip)) {
4305 mk_sense_invalid_opcode(scp);
4306 return check_condition_result;
4307 }
4308 zs_lba = get_unaligned_be64(cmd + 2);
4309 alloc_len = get_unaligned_be32(cmd + 10);
4310 if (alloc_len == 0)
4311 return 0; /* not an error */
4312 rep_opts = cmd[14] & 0x3f;
4313 partial = cmd[14] & 0x80;
4314
4315 if (zs_lba >= sdebug_capacity) {
4316 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4317 return check_condition_result;
4318 }
4319
4320 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4321 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4322 max_zones);
4323
4324 arr = kzalloc(alloc_len, GFP_ATOMIC);
4325 if (!arr) {
4326 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4327 INSUFF_RES_ASCQ);
4328 return check_condition_result;
4329 }
4330
4331 read_lock(macc_lckp);
4332
4333 desc = arr + 64;
4334 for (i = 0; i < max_zones; i++) {
4335 lba = zs_lba + devip->zsize * i;
4336 if (lba > sdebug_capacity)
4337 break;
4338 zsp = zbc_zone(devip, lba);
4339 switch (rep_opts) {
4340 case 0x00:
4341 /* All zones */
4342 break;
4343 case 0x01:
4344 /* Empty zones */
4345 if (zsp->z_cond != ZC1_EMPTY)
4346 continue;
4347 break;
4348 case 0x02:
4349 /* Implicit open zones */
4350 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4351 continue;
4352 break;
4353 case 0x03:
4354 /* Explicit open zones */
4355 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4356 continue;
4357 break;
4358 case 0x04:
4359 /* Closed zones */
4360 if (zsp->z_cond != ZC4_CLOSED)
4361 continue;
4362 break;
4363 case 0x05:
4364 /* Full zones */
4365 if (zsp->z_cond != ZC5_FULL)
4366 continue;
4367 break;
4368 case 0x06:
4369 case 0x07:
4370 case 0x10:
4371 /*
4372 * Read-only, offline, reset WP recommended are
4373 * not emulated: no zones to report;
4374 */
4375 continue;
4376 case 0x11:
4377 /* non-seq-resource set */
4378 if (!zsp->z_non_seq_resource)
4379 continue;
4380 break;
4381 case 0x3f:
4382 /* Not write pointer (conventional) zones */
4383 if (!zbc_zone_is_conv(zsp))
4384 continue;
4385 break;
4386 default:
4387 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4388 INVALID_FIELD_IN_CDB, 0);
4389 ret = check_condition_result;
4390 goto fini;
4391 }
4392
4393 if (nrz < rep_max_zones) {
4394 /* Fill zone descriptor */
4395 desc[0] = zsp->z_type;
4396 desc[1] = zsp->z_cond << 4;
4397 if (zsp->z_non_seq_resource)
4398 desc[1] |= 1 << 1;
4399 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4400 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4401 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4402 desc += 64;
4403 }
4404
4405 if (partial && nrz >= rep_max_zones)
4406 break;
4407
4408 nrz++;
4409 }
4410
4411 /* Report header */
4412 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4413 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4414
4415 rep_len = (unsigned long)desc - (unsigned long)arr;
4416 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4417
4418 fini:
4419 read_unlock(macc_lckp);
4420 kfree(arr);
4421 return ret;
4422 }
4423
4424 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)4425 static void zbc_open_all(struct sdebug_dev_info *devip)
4426 {
4427 struct sdeb_zone_state *zsp = &devip->zstate[0];
4428 unsigned int i;
4429
4430 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4431 if (zsp->z_cond == ZC4_CLOSED)
4432 zbc_open_zone(devip, &devip->zstate[i], true);
4433 }
4434 }
4435
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4436 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4437 {
4438 int res = 0;
4439 u64 z_id;
4440 enum sdebug_z_cond zc;
4441 u8 *cmd = scp->cmnd;
4442 struct sdeb_zone_state *zsp;
4443 bool all = cmd[14] & 0x01;
4444 struct sdeb_store_info *sip = devip2sip(devip, false);
4445 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4446
4447 if (!sdebug_dev_is_zoned(devip)) {
4448 mk_sense_invalid_opcode(scp);
4449 return check_condition_result;
4450 }
4451
4452 write_lock(macc_lckp);
4453
4454 if (all) {
4455 /* Check if all closed zones can be open */
4456 if (devip->max_open &&
4457 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4458 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4459 INSUFF_ZONE_ASCQ);
4460 res = check_condition_result;
4461 goto fini;
4462 }
4463 /* Open all closed zones */
4464 zbc_open_all(devip);
4465 goto fini;
4466 }
4467
4468 /* Open the specified zone */
4469 z_id = get_unaligned_be64(cmd + 2);
4470 if (z_id >= sdebug_capacity) {
4471 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4472 res = check_condition_result;
4473 goto fini;
4474 }
4475
4476 zsp = zbc_zone(devip, z_id);
4477 if (z_id != zsp->z_start) {
4478 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4479 res = check_condition_result;
4480 goto fini;
4481 }
4482 if (zbc_zone_is_conv(zsp)) {
4483 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4484 res = check_condition_result;
4485 goto fini;
4486 }
4487
4488 zc = zsp->z_cond;
4489 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4490 goto fini;
4491
4492 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4493 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4494 INSUFF_ZONE_ASCQ);
4495 res = check_condition_result;
4496 goto fini;
4497 }
4498
4499 zbc_open_zone(devip, zsp, true);
4500 fini:
4501 write_unlock(macc_lckp);
4502 return res;
4503 }
4504
zbc_close_all(struct sdebug_dev_info * devip)4505 static void zbc_close_all(struct sdebug_dev_info *devip)
4506 {
4507 unsigned int i;
4508
4509 for (i = 0; i < devip->nr_zones; i++)
4510 zbc_close_zone(devip, &devip->zstate[i]);
4511 }
4512
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4513 static int resp_close_zone(struct scsi_cmnd *scp,
4514 struct sdebug_dev_info *devip)
4515 {
4516 int res = 0;
4517 u64 z_id;
4518 u8 *cmd = scp->cmnd;
4519 struct sdeb_zone_state *zsp;
4520 bool all = cmd[14] & 0x01;
4521 struct sdeb_store_info *sip = devip2sip(devip, false);
4522 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4523
4524 if (!sdebug_dev_is_zoned(devip)) {
4525 mk_sense_invalid_opcode(scp);
4526 return check_condition_result;
4527 }
4528
4529 write_lock(macc_lckp);
4530
4531 if (all) {
4532 zbc_close_all(devip);
4533 goto fini;
4534 }
4535
4536 /* Close specified zone */
4537 z_id = get_unaligned_be64(cmd + 2);
4538 if (z_id >= sdebug_capacity) {
4539 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4540 res = check_condition_result;
4541 goto fini;
4542 }
4543
4544 zsp = zbc_zone(devip, z_id);
4545 if (z_id != zsp->z_start) {
4546 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4547 res = check_condition_result;
4548 goto fini;
4549 }
4550 if (zbc_zone_is_conv(zsp)) {
4551 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4552 res = check_condition_result;
4553 goto fini;
4554 }
4555
4556 zbc_close_zone(devip, zsp);
4557 fini:
4558 write_unlock(macc_lckp);
4559 return res;
4560 }
4561
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)4562 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4563 struct sdeb_zone_state *zsp, bool empty)
4564 {
4565 enum sdebug_z_cond zc = zsp->z_cond;
4566
4567 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4568 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4569 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4570 zbc_close_zone(devip, zsp);
4571 if (zsp->z_cond == ZC4_CLOSED)
4572 devip->nr_closed--;
4573 zsp->z_wp = zsp->z_start + zsp->z_size;
4574 zsp->z_cond = ZC5_FULL;
4575 }
4576 }
4577
zbc_finish_all(struct sdebug_dev_info * devip)4578 static void zbc_finish_all(struct sdebug_dev_info *devip)
4579 {
4580 unsigned int i;
4581
4582 for (i = 0; i < devip->nr_zones; i++)
4583 zbc_finish_zone(devip, &devip->zstate[i], false);
4584 }
4585
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4586 static int resp_finish_zone(struct scsi_cmnd *scp,
4587 struct sdebug_dev_info *devip)
4588 {
4589 struct sdeb_zone_state *zsp;
4590 int res = 0;
4591 u64 z_id;
4592 u8 *cmd = scp->cmnd;
4593 bool all = cmd[14] & 0x01;
4594 struct sdeb_store_info *sip = devip2sip(devip, false);
4595 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4596
4597 if (!sdebug_dev_is_zoned(devip)) {
4598 mk_sense_invalid_opcode(scp);
4599 return check_condition_result;
4600 }
4601
4602 write_lock(macc_lckp);
4603
4604 if (all) {
4605 zbc_finish_all(devip);
4606 goto fini;
4607 }
4608
4609 /* Finish the specified zone */
4610 z_id = get_unaligned_be64(cmd + 2);
4611 if (z_id >= sdebug_capacity) {
4612 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4613 res = check_condition_result;
4614 goto fini;
4615 }
4616
4617 zsp = zbc_zone(devip, z_id);
4618 if (z_id != zsp->z_start) {
4619 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4620 res = check_condition_result;
4621 goto fini;
4622 }
4623 if (zbc_zone_is_conv(zsp)) {
4624 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4625 res = check_condition_result;
4626 goto fini;
4627 }
4628
4629 zbc_finish_zone(devip, zsp, true);
4630 fini:
4631 write_unlock(macc_lckp);
4632 return res;
4633 }
4634
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)4635 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4636 struct sdeb_zone_state *zsp)
4637 {
4638 enum sdebug_z_cond zc;
4639 struct sdeb_store_info *sip = devip2sip(devip, false);
4640
4641 if (zbc_zone_is_conv(zsp))
4642 return;
4643
4644 zc = zsp->z_cond;
4645 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4646 zbc_close_zone(devip, zsp);
4647
4648 if (zsp->z_cond == ZC4_CLOSED)
4649 devip->nr_closed--;
4650
4651 if (zsp->z_wp > zsp->z_start)
4652 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4653 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4654
4655 zsp->z_non_seq_resource = false;
4656 zsp->z_wp = zsp->z_start;
4657 zsp->z_cond = ZC1_EMPTY;
4658 }
4659
zbc_rwp_all(struct sdebug_dev_info * devip)4660 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4661 {
4662 unsigned int i;
4663
4664 for (i = 0; i < devip->nr_zones; i++)
4665 zbc_rwp_zone(devip, &devip->zstate[i]);
4666 }
4667
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4668 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4669 {
4670 struct sdeb_zone_state *zsp;
4671 int res = 0;
4672 u64 z_id;
4673 u8 *cmd = scp->cmnd;
4674 bool all = cmd[14] & 0x01;
4675 struct sdeb_store_info *sip = devip2sip(devip, false);
4676 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4677
4678 if (!sdebug_dev_is_zoned(devip)) {
4679 mk_sense_invalid_opcode(scp);
4680 return check_condition_result;
4681 }
4682
4683 write_lock(macc_lckp);
4684
4685 if (all) {
4686 zbc_rwp_all(devip);
4687 goto fini;
4688 }
4689
4690 z_id = get_unaligned_be64(cmd + 2);
4691 if (z_id >= sdebug_capacity) {
4692 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4693 res = check_condition_result;
4694 goto fini;
4695 }
4696
4697 zsp = zbc_zone(devip, z_id);
4698 if (z_id != zsp->z_start) {
4699 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4700 res = check_condition_result;
4701 goto fini;
4702 }
4703 if (zbc_zone_is_conv(zsp)) {
4704 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4705 res = check_condition_result;
4706 goto fini;
4707 }
4708
4709 zbc_rwp_zone(devip, zsp);
4710 fini:
4711 write_unlock(macc_lckp);
4712 return res;
4713 }
4714
get_queue(struct scsi_cmnd * cmnd)4715 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4716 {
4717 u16 hwq;
4718 u32 tag = blk_mq_unique_tag(cmnd->request);
4719
4720 hwq = blk_mq_unique_tag_to_hwq(tag);
4721
4722 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4723 if (WARN_ON_ONCE(hwq >= submit_queues))
4724 hwq = 0;
4725
4726 return sdebug_q_arr + hwq;
4727 }
4728
get_tag(struct scsi_cmnd * cmnd)4729 static u32 get_tag(struct scsi_cmnd *cmnd)
4730 {
4731 return blk_mq_unique_tag(cmnd->request);
4732 }
4733
4734 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)4735 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4736 {
4737 bool aborted = sd_dp->aborted;
4738 int qc_idx;
4739 int retiring = 0;
4740 unsigned long iflags;
4741 struct sdebug_queue *sqp;
4742 struct sdebug_queued_cmd *sqcp;
4743 struct scsi_cmnd *scp;
4744 struct sdebug_dev_info *devip;
4745
4746 sd_dp->defer_t = SDEB_DEFER_NONE;
4747 if (unlikely(aborted))
4748 sd_dp->aborted = false;
4749 qc_idx = sd_dp->qc_idx;
4750 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4751 if (sdebug_statistics) {
4752 atomic_inc(&sdebug_completions);
4753 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4754 atomic_inc(&sdebug_miss_cpus);
4755 }
4756 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4757 pr_err("wild qc_idx=%d\n", qc_idx);
4758 return;
4759 }
4760 spin_lock_irqsave(&sqp->qc_lock, iflags);
4761 sqcp = &sqp->qc_arr[qc_idx];
4762 scp = sqcp->a_cmnd;
4763 if (unlikely(scp == NULL)) {
4764 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4765 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4766 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4767 return;
4768 }
4769 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4770 if (likely(devip))
4771 atomic_dec(&devip->num_in_q);
4772 else
4773 pr_err("devip=NULL\n");
4774 if (unlikely(atomic_read(&retired_max_queue) > 0))
4775 retiring = 1;
4776
4777 sqcp->a_cmnd = NULL;
4778 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4779 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4780 pr_err("Unexpected completion\n");
4781 return;
4782 }
4783
4784 if (unlikely(retiring)) { /* user has reduced max_queue */
4785 int k, retval;
4786
4787 retval = atomic_read(&retired_max_queue);
4788 if (qc_idx >= retval) {
4789 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4790 pr_err("index %d too large\n", retval);
4791 return;
4792 }
4793 k = find_last_bit(sqp->in_use_bm, retval);
4794 if ((k < sdebug_max_queue) || (k == retval))
4795 atomic_set(&retired_max_queue, 0);
4796 else
4797 atomic_set(&retired_max_queue, k + 1);
4798 }
4799 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4800 if (unlikely(aborted)) {
4801 if (sdebug_verbose)
4802 pr_info("bypassing scsi_done() due to aborted cmd\n");
4803 return;
4804 }
4805 scp->scsi_done(scp); /* callback to mid level */
4806 }
4807
4808 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)4809 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4810 {
4811 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4812 hrt);
4813 sdebug_q_cmd_complete(sd_dp);
4814 return HRTIMER_NORESTART;
4815 }
4816
4817 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)4818 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4819 {
4820 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4821 ew.work);
4822 sdebug_q_cmd_complete(sd_dp);
4823 }
4824
4825 static bool got_shared_uuid;
4826 static uuid_t shared_uuid;
4827
sdebug_device_create_zones(struct sdebug_dev_info * devip)4828 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4829 {
4830 struct sdeb_zone_state *zsp;
4831 sector_t capacity = get_sdebug_capacity();
4832 sector_t zstart = 0;
4833 unsigned int i;
4834
4835 /*
4836 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4837 * a zone size allowing for at least 4 zones on the device. Otherwise,
4838 * use the specified zone size checking that at least 2 zones can be
4839 * created for the device.
4840 */
4841 if (!sdeb_zbc_zone_size_mb) {
4842 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4843 >> ilog2(sdebug_sector_size);
4844 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4845 devip->zsize >>= 1;
4846 if (devip->zsize < 2) {
4847 pr_err("Device capacity too small\n");
4848 return -EINVAL;
4849 }
4850 } else {
4851 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4852 pr_err("Zone size is not a power of 2\n");
4853 return -EINVAL;
4854 }
4855 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4856 >> ilog2(sdebug_sector_size);
4857 if (devip->zsize >= capacity) {
4858 pr_err("Zone size too large for device capacity\n");
4859 return -EINVAL;
4860 }
4861 }
4862
4863 devip->zsize_shift = ilog2(devip->zsize);
4864 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4865
4866 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4867 pr_err("Number of conventional zones too large\n");
4868 return -EINVAL;
4869 }
4870 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4871
4872 if (devip->zmodel == BLK_ZONED_HM) {
4873 /* zbc_max_open_zones can be 0, meaning "not reported" */
4874 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4875 devip->max_open = (devip->nr_zones - 1) / 2;
4876 else
4877 devip->max_open = sdeb_zbc_max_open;
4878 }
4879
4880 devip->zstate = kcalloc(devip->nr_zones,
4881 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4882 if (!devip->zstate)
4883 return -ENOMEM;
4884
4885 for (i = 0; i < devip->nr_zones; i++) {
4886 zsp = &devip->zstate[i];
4887
4888 zsp->z_start = zstart;
4889
4890 if (i < devip->nr_conv_zones) {
4891 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4892 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4893 zsp->z_wp = (sector_t)-1;
4894 } else {
4895 if (devip->zmodel == BLK_ZONED_HM)
4896 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4897 else
4898 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4899 zsp->z_cond = ZC1_EMPTY;
4900 zsp->z_wp = zsp->z_start;
4901 }
4902
4903 if (zsp->z_start + devip->zsize < capacity)
4904 zsp->z_size = devip->zsize;
4905 else
4906 zsp->z_size = capacity - zsp->z_start;
4907
4908 zstart += zsp->z_size;
4909 }
4910
4911 return 0;
4912 }
4913
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)4914 static struct sdebug_dev_info *sdebug_device_create(
4915 struct sdebug_host_info *sdbg_host, gfp_t flags)
4916 {
4917 struct sdebug_dev_info *devip;
4918
4919 devip = kzalloc(sizeof(*devip), flags);
4920 if (devip) {
4921 if (sdebug_uuid_ctl == 1)
4922 uuid_gen(&devip->lu_name);
4923 else if (sdebug_uuid_ctl == 2) {
4924 if (got_shared_uuid)
4925 devip->lu_name = shared_uuid;
4926 else {
4927 uuid_gen(&shared_uuid);
4928 got_shared_uuid = true;
4929 devip->lu_name = shared_uuid;
4930 }
4931 }
4932 devip->sdbg_host = sdbg_host;
4933 if (sdeb_zbc_in_use) {
4934 devip->zmodel = sdeb_zbc_model;
4935 if (sdebug_device_create_zones(devip)) {
4936 kfree(devip);
4937 return NULL;
4938 }
4939 } else {
4940 devip->zmodel = BLK_ZONED_NONE;
4941 }
4942 devip->sdbg_host = sdbg_host;
4943 devip->create_ts = ktime_get_boottime();
4944 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4945 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4946 }
4947 return devip;
4948 }
4949
find_build_dev_info(struct scsi_device * sdev)4950 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4951 {
4952 struct sdebug_host_info *sdbg_host;
4953 struct sdebug_dev_info *open_devip = NULL;
4954 struct sdebug_dev_info *devip;
4955
4956 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4957 if (!sdbg_host) {
4958 pr_err("Host info NULL\n");
4959 return NULL;
4960 }
4961
4962 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4963 if ((devip->used) && (devip->channel == sdev->channel) &&
4964 (devip->target == sdev->id) &&
4965 (devip->lun == sdev->lun))
4966 return devip;
4967 else {
4968 if ((!devip->used) && (!open_devip))
4969 open_devip = devip;
4970 }
4971 }
4972 if (!open_devip) { /* try and make a new one */
4973 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4974 if (!open_devip) {
4975 pr_err("out of memory at line %d\n", __LINE__);
4976 return NULL;
4977 }
4978 }
4979
4980 open_devip->channel = sdev->channel;
4981 open_devip->target = sdev->id;
4982 open_devip->lun = sdev->lun;
4983 open_devip->sdbg_host = sdbg_host;
4984 atomic_set(&open_devip->num_in_q, 0);
4985 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4986 open_devip->used = true;
4987 return open_devip;
4988 }
4989
scsi_debug_slave_alloc(struct scsi_device * sdp)4990 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4991 {
4992 if (sdebug_verbose)
4993 pr_info("slave_alloc <%u %u %u %llu>\n",
4994 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4995 return 0;
4996 }
4997
scsi_debug_slave_configure(struct scsi_device * sdp)4998 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4999 {
5000 struct sdebug_dev_info *devip =
5001 (struct sdebug_dev_info *)sdp->hostdata;
5002
5003 if (sdebug_verbose)
5004 pr_info("slave_configure <%u %u %u %llu>\n",
5005 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5006 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5007 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5008 if (devip == NULL) {
5009 devip = find_build_dev_info(sdp);
5010 if (devip == NULL)
5011 return 1; /* no resources, will be marked offline */
5012 }
5013 sdp->hostdata = devip;
5014 if (sdebug_no_uld)
5015 sdp->no_uld_attach = 1;
5016 config_cdb_len(sdp);
5017 return 0;
5018 }
5019
scsi_debug_slave_destroy(struct scsi_device * sdp)5020 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5021 {
5022 struct sdebug_dev_info *devip =
5023 (struct sdebug_dev_info *)sdp->hostdata;
5024
5025 if (sdebug_verbose)
5026 pr_info("slave_destroy <%u %u %u %llu>\n",
5027 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5028 if (devip) {
5029 /* make this slot available for re-use */
5030 devip->used = false;
5031 sdp->hostdata = NULL;
5032 }
5033 }
5034
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5035 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5036 enum sdeb_defer_type defer_t)
5037 {
5038 if (!sd_dp)
5039 return;
5040 if (defer_t == SDEB_DEFER_HRT)
5041 hrtimer_cancel(&sd_dp->hrt);
5042 else if (defer_t == SDEB_DEFER_WQ)
5043 cancel_work_sync(&sd_dp->ew.work);
5044 }
5045
5046 /* If @cmnd found deletes its timer or work queue and returns true; else
5047 returns false */
stop_queued_cmnd(struct scsi_cmnd * cmnd)5048 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5049 {
5050 unsigned long iflags;
5051 int j, k, qmax, r_qmax;
5052 enum sdeb_defer_type l_defer_t;
5053 struct sdebug_queue *sqp;
5054 struct sdebug_queued_cmd *sqcp;
5055 struct sdebug_dev_info *devip;
5056 struct sdebug_defer *sd_dp;
5057
5058 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5059 spin_lock_irqsave(&sqp->qc_lock, iflags);
5060 qmax = sdebug_max_queue;
5061 r_qmax = atomic_read(&retired_max_queue);
5062 if (r_qmax > qmax)
5063 qmax = r_qmax;
5064 for (k = 0; k < qmax; ++k) {
5065 if (test_bit(k, sqp->in_use_bm)) {
5066 sqcp = &sqp->qc_arr[k];
5067 if (cmnd != sqcp->a_cmnd)
5068 continue;
5069 /* found */
5070 devip = (struct sdebug_dev_info *)
5071 cmnd->device->hostdata;
5072 if (devip)
5073 atomic_dec(&devip->num_in_q);
5074 sqcp->a_cmnd = NULL;
5075 sd_dp = sqcp->sd_dp;
5076 if (sd_dp) {
5077 l_defer_t = sd_dp->defer_t;
5078 sd_dp->defer_t = SDEB_DEFER_NONE;
5079 } else
5080 l_defer_t = SDEB_DEFER_NONE;
5081 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5082 stop_qc_helper(sd_dp, l_defer_t);
5083 clear_bit(k, sqp->in_use_bm);
5084 return true;
5085 }
5086 }
5087 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5088 }
5089 return false;
5090 }
5091
5092 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)5093 static void stop_all_queued(void)
5094 {
5095 unsigned long iflags;
5096 int j, k;
5097 enum sdeb_defer_type l_defer_t;
5098 struct sdebug_queue *sqp;
5099 struct sdebug_queued_cmd *sqcp;
5100 struct sdebug_dev_info *devip;
5101 struct sdebug_defer *sd_dp;
5102
5103 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5104 spin_lock_irqsave(&sqp->qc_lock, iflags);
5105 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5106 if (test_bit(k, sqp->in_use_bm)) {
5107 sqcp = &sqp->qc_arr[k];
5108 if (sqcp->a_cmnd == NULL)
5109 continue;
5110 devip = (struct sdebug_dev_info *)
5111 sqcp->a_cmnd->device->hostdata;
5112 if (devip)
5113 atomic_dec(&devip->num_in_q);
5114 sqcp->a_cmnd = NULL;
5115 sd_dp = sqcp->sd_dp;
5116 if (sd_dp) {
5117 l_defer_t = sd_dp->defer_t;
5118 sd_dp->defer_t = SDEB_DEFER_NONE;
5119 } else
5120 l_defer_t = SDEB_DEFER_NONE;
5121 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5122 stop_qc_helper(sd_dp, l_defer_t);
5123 clear_bit(k, sqp->in_use_bm);
5124 spin_lock_irqsave(&sqp->qc_lock, iflags);
5125 }
5126 }
5127 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5128 }
5129 }
5130
5131 /* Free queued command memory on heap */
free_all_queued(void)5132 static void free_all_queued(void)
5133 {
5134 int j, k;
5135 struct sdebug_queue *sqp;
5136 struct sdebug_queued_cmd *sqcp;
5137
5138 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5139 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5140 sqcp = &sqp->qc_arr[k];
5141 kfree(sqcp->sd_dp);
5142 sqcp->sd_dp = NULL;
5143 }
5144 }
5145 }
5146
scsi_debug_abort(struct scsi_cmnd * SCpnt)5147 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5148 {
5149 bool ok;
5150
5151 ++num_aborts;
5152 if (SCpnt) {
5153 ok = stop_queued_cmnd(SCpnt);
5154 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5155 sdev_printk(KERN_INFO, SCpnt->device,
5156 "%s: command%s found\n", __func__,
5157 ok ? "" : " not");
5158 }
5159 return SUCCESS;
5160 }
5161
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)5162 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5163 {
5164 ++num_dev_resets;
5165 if (SCpnt && SCpnt->device) {
5166 struct scsi_device *sdp = SCpnt->device;
5167 struct sdebug_dev_info *devip =
5168 (struct sdebug_dev_info *)sdp->hostdata;
5169
5170 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5171 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5172 if (devip)
5173 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5174 }
5175 return SUCCESS;
5176 }
5177
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)5178 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5179 {
5180 struct sdebug_host_info *sdbg_host;
5181 struct sdebug_dev_info *devip;
5182 struct scsi_device *sdp;
5183 struct Scsi_Host *hp;
5184 int k = 0;
5185
5186 ++num_target_resets;
5187 if (!SCpnt)
5188 goto lie;
5189 sdp = SCpnt->device;
5190 if (!sdp)
5191 goto lie;
5192 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5193 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5194 hp = sdp->host;
5195 if (!hp)
5196 goto lie;
5197 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5198 if (sdbg_host) {
5199 list_for_each_entry(devip,
5200 &sdbg_host->dev_info_list,
5201 dev_list)
5202 if (devip->target == sdp->id) {
5203 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5204 ++k;
5205 }
5206 }
5207 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5208 sdev_printk(KERN_INFO, sdp,
5209 "%s: %d device(s) found in target\n", __func__, k);
5210 lie:
5211 return SUCCESS;
5212 }
5213
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)5214 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5215 {
5216 struct sdebug_host_info *sdbg_host;
5217 struct sdebug_dev_info *devip;
5218 struct scsi_device *sdp;
5219 struct Scsi_Host *hp;
5220 int k = 0;
5221
5222 ++num_bus_resets;
5223 if (!(SCpnt && SCpnt->device))
5224 goto lie;
5225 sdp = SCpnt->device;
5226 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5227 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5228 hp = sdp->host;
5229 if (hp) {
5230 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5231 if (sdbg_host) {
5232 list_for_each_entry(devip,
5233 &sdbg_host->dev_info_list,
5234 dev_list) {
5235 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5236 ++k;
5237 }
5238 }
5239 }
5240 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5241 sdev_printk(KERN_INFO, sdp,
5242 "%s: %d device(s) found in host\n", __func__, k);
5243 lie:
5244 return SUCCESS;
5245 }
5246
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)5247 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5248 {
5249 struct sdebug_host_info *sdbg_host;
5250 struct sdebug_dev_info *devip;
5251 int k = 0;
5252
5253 ++num_host_resets;
5254 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5255 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5256 spin_lock(&sdebug_host_list_lock);
5257 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5258 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5259 dev_list) {
5260 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5261 ++k;
5262 }
5263 }
5264 spin_unlock(&sdebug_host_list_lock);
5265 stop_all_queued();
5266 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5267 sdev_printk(KERN_INFO, SCpnt->device,
5268 "%s: %d device(s) found\n", __func__, k);
5269 return SUCCESS;
5270 }
5271
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)5272 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5273 {
5274 struct msdos_partition *pp;
5275 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5276 int sectors_per_part, num_sectors, k;
5277 int heads_by_sects, start_sec, end_sec;
5278
5279 /* assume partition table already zeroed */
5280 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5281 return;
5282 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5283 sdebug_num_parts = SDEBUG_MAX_PARTS;
5284 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5285 }
5286 num_sectors = (int)get_sdebug_capacity();
5287 sectors_per_part = (num_sectors - sdebug_sectors_per)
5288 / sdebug_num_parts;
5289 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5290 starts[0] = sdebug_sectors_per;
5291 max_part_secs = sectors_per_part;
5292 for (k = 1; k < sdebug_num_parts; ++k) {
5293 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5294 * heads_by_sects;
5295 if (starts[k] - starts[k - 1] < max_part_secs)
5296 max_part_secs = starts[k] - starts[k - 1];
5297 }
5298 starts[sdebug_num_parts] = num_sectors;
5299 starts[sdebug_num_parts + 1] = 0;
5300
5301 ramp[510] = 0x55; /* magic partition markings */
5302 ramp[511] = 0xAA;
5303 pp = (struct msdos_partition *)(ramp + 0x1be);
5304 for (k = 0; starts[k + 1]; ++k, ++pp) {
5305 start_sec = starts[k];
5306 end_sec = starts[k] + max_part_secs - 1;
5307 pp->boot_ind = 0;
5308
5309 pp->cyl = start_sec / heads_by_sects;
5310 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5311 / sdebug_sectors_per;
5312 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5313
5314 pp->end_cyl = end_sec / heads_by_sects;
5315 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5316 / sdebug_sectors_per;
5317 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5318
5319 pp->start_sect = cpu_to_le32(start_sec);
5320 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5321 pp->sys_ind = 0x83; /* plain Linux partition */
5322 }
5323 }
5324
block_unblock_all_queues(bool block)5325 static void block_unblock_all_queues(bool block)
5326 {
5327 int j;
5328 struct sdebug_queue *sqp;
5329
5330 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5331 atomic_set(&sqp->blocked, (int)block);
5332 }
5333
5334 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5335 * commands will be processed normally before triggers occur.
5336 */
tweak_cmnd_count(void)5337 static void tweak_cmnd_count(void)
5338 {
5339 int count, modulo;
5340
5341 modulo = abs(sdebug_every_nth);
5342 if (modulo < 2)
5343 return;
5344 block_unblock_all_queues(true);
5345 count = atomic_read(&sdebug_cmnd_count);
5346 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5347 block_unblock_all_queues(false);
5348 }
5349
clear_queue_stats(void)5350 static void clear_queue_stats(void)
5351 {
5352 atomic_set(&sdebug_cmnd_count, 0);
5353 atomic_set(&sdebug_completions, 0);
5354 atomic_set(&sdebug_miss_cpus, 0);
5355 atomic_set(&sdebug_a_tsf, 0);
5356 }
5357
inject_on_this_cmd(void)5358 static bool inject_on_this_cmd(void)
5359 {
5360 if (sdebug_every_nth == 0)
5361 return false;
5362 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5363 }
5364
5365 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5366
5367 /* Complete the processing of the thread that queued a SCSI command to this
5368 * driver. It either completes the command by calling cmnd_done() or
5369 * schedules a hr timer or work queue then returns 0. Returns
5370 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5371 */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)5372 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5373 int scsi_result,
5374 int (*pfp)(struct scsi_cmnd *,
5375 struct sdebug_dev_info *),
5376 int delta_jiff, int ndelay)
5377 {
5378 bool new_sd_dp;
5379 bool inject = false;
5380 int k, num_in_q, qdepth;
5381 unsigned long iflags;
5382 u64 ns_from_boot = 0;
5383 struct sdebug_queue *sqp;
5384 struct sdebug_queued_cmd *sqcp;
5385 struct scsi_device *sdp;
5386 struct sdebug_defer *sd_dp;
5387
5388 if (unlikely(devip == NULL)) {
5389 if (scsi_result == 0)
5390 scsi_result = DID_NO_CONNECT << 16;
5391 goto respond_in_thread;
5392 }
5393 sdp = cmnd->device;
5394
5395 if (delta_jiff == 0)
5396 goto respond_in_thread;
5397
5398 sqp = get_queue(cmnd);
5399 spin_lock_irqsave(&sqp->qc_lock, iflags);
5400 if (unlikely(atomic_read(&sqp->blocked))) {
5401 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5402 return SCSI_MLQUEUE_HOST_BUSY;
5403 }
5404 num_in_q = atomic_read(&devip->num_in_q);
5405 qdepth = cmnd->device->queue_depth;
5406 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5407 if (scsi_result) {
5408 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5409 goto respond_in_thread;
5410 } else
5411 scsi_result = device_qfull_result;
5412 } else if (unlikely(sdebug_every_nth &&
5413 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5414 (scsi_result == 0))) {
5415 if ((num_in_q == (qdepth - 1)) &&
5416 (atomic_inc_return(&sdebug_a_tsf) >=
5417 abs(sdebug_every_nth))) {
5418 atomic_set(&sdebug_a_tsf, 0);
5419 inject = true;
5420 scsi_result = device_qfull_result;
5421 }
5422 }
5423
5424 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5425 if (unlikely(k >= sdebug_max_queue)) {
5426 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5427 if (scsi_result)
5428 goto respond_in_thread;
5429 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5430 scsi_result = device_qfull_result;
5431 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5432 sdev_printk(KERN_INFO, sdp,
5433 "%s: max_queue=%d exceeded, %s\n",
5434 __func__, sdebug_max_queue,
5435 (scsi_result ? "status: TASK SET FULL" :
5436 "report: host busy"));
5437 if (scsi_result)
5438 goto respond_in_thread;
5439 else
5440 return SCSI_MLQUEUE_HOST_BUSY;
5441 }
5442 set_bit(k, sqp->in_use_bm);
5443 atomic_inc(&devip->num_in_q);
5444 sqcp = &sqp->qc_arr[k];
5445 sqcp->a_cmnd = cmnd;
5446 cmnd->host_scribble = (unsigned char *)sqcp;
5447 sd_dp = sqcp->sd_dp;
5448 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5449 if (!sd_dp) {
5450 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5451 if (!sd_dp) {
5452 atomic_dec(&devip->num_in_q);
5453 clear_bit(k, sqp->in_use_bm);
5454 return SCSI_MLQUEUE_HOST_BUSY;
5455 }
5456 new_sd_dp = true;
5457 } else {
5458 new_sd_dp = false;
5459 }
5460
5461 /* Set the hostwide tag */
5462 if (sdebug_host_max_queue)
5463 sd_dp->hc_idx = get_tag(cmnd);
5464
5465 if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5466 ns_from_boot = ktime_get_boottime_ns();
5467
5468 /* one of the resp_*() response functions is called here */
5469 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5470 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5471 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5472 delta_jiff = ndelay = 0;
5473 }
5474 if (cmnd->result == 0 && scsi_result != 0)
5475 cmnd->result = scsi_result;
5476 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5477 if (atomic_read(&sdeb_inject_pending)) {
5478 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5479 atomic_set(&sdeb_inject_pending, 0);
5480 cmnd->result = check_condition_result;
5481 }
5482 }
5483
5484 if (unlikely(sdebug_verbose && cmnd->result))
5485 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5486 __func__, cmnd->result);
5487
5488 if (delta_jiff > 0 || ndelay > 0) {
5489 ktime_t kt;
5490
5491 if (delta_jiff > 0) {
5492 u64 ns = jiffies_to_nsecs(delta_jiff);
5493
5494 if (sdebug_random && ns < U32_MAX) {
5495 ns = prandom_u32_max((u32)ns);
5496 } else if (sdebug_random) {
5497 ns >>= 12; /* scale to 4 usec precision */
5498 if (ns < U32_MAX) /* over 4 hours max */
5499 ns = prandom_u32_max((u32)ns);
5500 ns <<= 12;
5501 }
5502 kt = ns_to_ktime(ns);
5503 } else { /* ndelay has a 4.2 second max */
5504 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5505 (u32)ndelay;
5506 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5507 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5508
5509 if (kt <= d) { /* elapsed duration >= kt */
5510 spin_lock_irqsave(&sqp->qc_lock, iflags);
5511 sqcp->a_cmnd = NULL;
5512 atomic_dec(&devip->num_in_q);
5513 clear_bit(k, sqp->in_use_bm);
5514 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5515 if (new_sd_dp)
5516 kfree(sd_dp);
5517 /* call scsi_done() from this thread */
5518 cmnd->scsi_done(cmnd);
5519 return 0;
5520 }
5521 /* otherwise reduce kt by elapsed time */
5522 kt -= d;
5523 }
5524 }
5525 if (!sd_dp->init_hrt) {
5526 sd_dp->init_hrt = true;
5527 sqcp->sd_dp = sd_dp;
5528 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5529 HRTIMER_MODE_REL_PINNED);
5530 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5531 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5532 sd_dp->qc_idx = k;
5533 }
5534 if (sdebug_statistics)
5535 sd_dp->issuing_cpu = raw_smp_processor_id();
5536 sd_dp->defer_t = SDEB_DEFER_HRT;
5537 /* schedule the invocation of scsi_done() for a later time */
5538 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5539 } else { /* jdelay < 0, use work queue */
5540 if (!sd_dp->init_wq) {
5541 sd_dp->init_wq = true;
5542 sqcp->sd_dp = sd_dp;
5543 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5544 sd_dp->qc_idx = k;
5545 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5546 }
5547 if (sdebug_statistics)
5548 sd_dp->issuing_cpu = raw_smp_processor_id();
5549 sd_dp->defer_t = SDEB_DEFER_WQ;
5550 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5551 atomic_read(&sdeb_inject_pending)))
5552 sd_dp->aborted = true;
5553 schedule_work(&sd_dp->ew.work);
5554 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5555 atomic_read(&sdeb_inject_pending))) {
5556 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5557 blk_abort_request(cmnd->request);
5558 atomic_set(&sdeb_inject_pending, 0);
5559 }
5560 }
5561 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5562 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5563 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5564 return 0;
5565
5566 respond_in_thread: /* call back to mid-layer using invocation thread */
5567 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5568 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5569 if (cmnd->result == 0 && scsi_result != 0)
5570 cmnd->result = scsi_result;
5571 cmnd->scsi_done(cmnd);
5572 return 0;
5573 }
5574
5575 /* Note: The following macros create attribute files in the
5576 /sys/module/scsi_debug/parameters directory. Unfortunately this
5577 driver is unaware of a change and cannot trigger auxiliary actions
5578 as it can when the corresponding attribute in the
5579 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5580 */
5581 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5582 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5583 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5584 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5585 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5586 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5587 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5588 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5589 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5590 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5591 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5592 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5593 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5594 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5595 module_param_string(inq_product, sdebug_inq_product_id,
5596 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5597 module_param_string(inq_rev, sdebug_inq_product_rev,
5598 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5599 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5600 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5601 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5602 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5603 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5604 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5605 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5606 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5607 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5608 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5609 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5610 S_IRUGO | S_IWUSR);
5611 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5612 S_IRUGO | S_IWUSR);
5613 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5614 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5615 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5616 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5617 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5618 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5619 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5620 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5621 module_param_named(per_host_store, sdebug_per_host_store, bool,
5622 S_IRUGO | S_IWUSR);
5623 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5624 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5625 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5626 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5627 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5628 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5629 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5630 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5631 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5632 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5633 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5634 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5635 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5636 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5637 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5638 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5639 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5640 S_IRUGO | S_IWUSR);
5641 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5642 module_param_named(write_same_length, sdebug_write_same_length, int,
5643 S_IRUGO | S_IWUSR);
5644 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5645 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5646 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5647 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5648
5649 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5650 MODULE_DESCRIPTION("SCSI debug adapter driver");
5651 MODULE_LICENSE("GPL");
5652 MODULE_VERSION(SDEBUG_VERSION);
5653
5654 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5655 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5656 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5657 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5658 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5659 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5660 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5661 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5662 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5663 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5664 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5665 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5666 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5667 MODULE_PARM_DESC(host_max_queue,
5668 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5669 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5670 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5671 SDEBUG_VERSION "\")");
5672 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5673 MODULE_PARM_DESC(lbprz,
5674 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5675 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5676 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5677 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5678 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5679 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5680 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5681 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5682 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5683 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5684 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5685 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5686 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5687 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5688 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5689 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5690 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5691 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5692 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5693 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5694 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5695 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5696 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5697 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5698 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5699 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5700 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5701 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5702 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5703 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5704 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5705 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5706 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5707 MODULE_PARM_DESC(uuid_ctl,
5708 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5709 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5710 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5711 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5712 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5713 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5714 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5715 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5716 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5717
5718 #define SDEBUG_INFO_LEN 256
5719 static char sdebug_info[SDEBUG_INFO_LEN];
5720
scsi_debug_info(struct Scsi_Host * shp)5721 static const char *scsi_debug_info(struct Scsi_Host *shp)
5722 {
5723 int k;
5724
5725 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5726 my_name, SDEBUG_VERSION, sdebug_version_date);
5727 if (k >= (SDEBUG_INFO_LEN - 1))
5728 return sdebug_info;
5729 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5730 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5731 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5732 "statistics", (int)sdebug_statistics);
5733 return sdebug_info;
5734 }
5735
5736 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)5737 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5738 int length)
5739 {
5740 char arr[16];
5741 int opts;
5742 int minLen = length > 15 ? 15 : length;
5743
5744 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5745 return -EACCES;
5746 memcpy(arr, buffer, minLen);
5747 arr[minLen] = '\0';
5748 if (1 != sscanf(arr, "%d", &opts))
5749 return -EINVAL;
5750 sdebug_opts = opts;
5751 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5752 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5753 if (sdebug_every_nth != 0)
5754 tweak_cmnd_count();
5755 return length;
5756 }
5757
5758 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5759 * same for each scsi_debug host (if more than one). Some of the counters
5760 * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)5761 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5762 {
5763 int f, j, l;
5764 struct sdebug_queue *sqp;
5765 struct sdebug_host_info *sdhp;
5766
5767 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5768 SDEBUG_VERSION, sdebug_version_date);
5769 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5770 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5771 sdebug_opts, sdebug_every_nth);
5772 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5773 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5774 sdebug_sector_size, "bytes");
5775 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5776 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5777 num_aborts);
5778 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5779 num_dev_resets, num_target_resets, num_bus_resets,
5780 num_host_resets);
5781 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5782 dix_reads, dix_writes, dif_errors);
5783 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5784 sdebug_statistics);
5785 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5786 atomic_read(&sdebug_cmnd_count),
5787 atomic_read(&sdebug_completions),
5788 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5789 atomic_read(&sdebug_a_tsf));
5790
5791 seq_printf(m, "submit_queues=%d\n", submit_queues);
5792 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5793 seq_printf(m, " queue %d:\n", j);
5794 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5795 if (f != sdebug_max_queue) {
5796 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5797 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5798 "first,last bits", f, l);
5799 }
5800 }
5801
5802 seq_printf(m, "this host_no=%d\n", host->host_no);
5803 if (!xa_empty(per_store_ap)) {
5804 bool niu;
5805 int idx;
5806 unsigned long l_idx;
5807 struct sdeb_store_info *sip;
5808
5809 seq_puts(m, "\nhost list:\n");
5810 j = 0;
5811 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5812 idx = sdhp->si_idx;
5813 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5814 sdhp->shost->host_no, idx);
5815 ++j;
5816 }
5817 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5818 sdeb_most_recent_idx);
5819 j = 0;
5820 xa_for_each(per_store_ap, l_idx, sip) {
5821 niu = xa_get_mark(per_store_ap, l_idx,
5822 SDEB_XA_NOT_IN_USE);
5823 idx = (int)l_idx;
5824 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5825 (niu ? " not_in_use" : ""));
5826 ++j;
5827 }
5828 }
5829 return 0;
5830 }
5831
delay_show(struct device_driver * ddp,char * buf)5832 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5833 {
5834 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5835 }
5836 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5837 * of delay is jiffies.
5838 */
delay_store(struct device_driver * ddp,const char * buf,size_t count)5839 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5840 size_t count)
5841 {
5842 int jdelay, res;
5843
5844 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5845 res = count;
5846 if (sdebug_jdelay != jdelay) {
5847 int j, k;
5848 struct sdebug_queue *sqp;
5849
5850 block_unblock_all_queues(true);
5851 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5852 ++j, ++sqp) {
5853 k = find_first_bit(sqp->in_use_bm,
5854 sdebug_max_queue);
5855 if (k != sdebug_max_queue) {
5856 res = -EBUSY; /* queued commands */
5857 break;
5858 }
5859 }
5860 if (res > 0) {
5861 sdebug_jdelay = jdelay;
5862 sdebug_ndelay = 0;
5863 }
5864 block_unblock_all_queues(false);
5865 }
5866 return res;
5867 }
5868 return -EINVAL;
5869 }
5870 static DRIVER_ATTR_RW(delay);
5871
ndelay_show(struct device_driver * ddp,char * buf)5872 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5873 {
5874 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5875 }
5876 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5877 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)5878 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5879 size_t count)
5880 {
5881 int ndelay, res;
5882
5883 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5884 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5885 res = count;
5886 if (sdebug_ndelay != ndelay) {
5887 int j, k;
5888 struct sdebug_queue *sqp;
5889
5890 block_unblock_all_queues(true);
5891 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5892 ++j, ++sqp) {
5893 k = find_first_bit(sqp->in_use_bm,
5894 sdebug_max_queue);
5895 if (k != sdebug_max_queue) {
5896 res = -EBUSY; /* queued commands */
5897 break;
5898 }
5899 }
5900 if (res > 0) {
5901 sdebug_ndelay = ndelay;
5902 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5903 : DEF_JDELAY;
5904 }
5905 block_unblock_all_queues(false);
5906 }
5907 return res;
5908 }
5909 return -EINVAL;
5910 }
5911 static DRIVER_ATTR_RW(ndelay);
5912
opts_show(struct device_driver * ddp,char * buf)5913 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5914 {
5915 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5916 }
5917
opts_store(struct device_driver * ddp,const char * buf,size_t count)5918 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5919 size_t count)
5920 {
5921 int opts;
5922 char work[20];
5923
5924 if (sscanf(buf, "%10s", work) == 1) {
5925 if (strncasecmp(work, "0x", 2) == 0) {
5926 if (kstrtoint(work + 2, 16, &opts) == 0)
5927 goto opts_done;
5928 } else {
5929 if (kstrtoint(work, 10, &opts) == 0)
5930 goto opts_done;
5931 }
5932 }
5933 return -EINVAL;
5934 opts_done:
5935 sdebug_opts = opts;
5936 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5937 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5938 tweak_cmnd_count();
5939 return count;
5940 }
5941 static DRIVER_ATTR_RW(opts);
5942
ptype_show(struct device_driver * ddp,char * buf)5943 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5944 {
5945 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5946 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)5947 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5948 size_t count)
5949 {
5950 int n;
5951
5952 /* Cannot change from or to TYPE_ZBC with sysfs */
5953 if (sdebug_ptype == TYPE_ZBC)
5954 return -EINVAL;
5955
5956 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5957 if (n == TYPE_ZBC)
5958 return -EINVAL;
5959 sdebug_ptype = n;
5960 return count;
5961 }
5962 return -EINVAL;
5963 }
5964 static DRIVER_ATTR_RW(ptype);
5965
dsense_show(struct device_driver * ddp,char * buf)5966 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5967 {
5968 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5969 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)5970 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5971 size_t count)
5972 {
5973 int n;
5974
5975 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5976 sdebug_dsense = n;
5977 return count;
5978 }
5979 return -EINVAL;
5980 }
5981 static DRIVER_ATTR_RW(dsense);
5982
fake_rw_show(struct device_driver * ddp,char * buf)5983 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5984 {
5985 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5986 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)5987 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5988 size_t count)
5989 {
5990 int n, idx;
5991
5992 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5993 bool want_store = (n == 0);
5994 struct sdebug_host_info *sdhp;
5995
5996 n = (n > 0);
5997 sdebug_fake_rw = (sdebug_fake_rw > 0);
5998 if (sdebug_fake_rw == n)
5999 return count; /* not transitioning so do nothing */
6000
6001 if (want_store) { /* 1 --> 0 transition, set up store */
6002 if (sdeb_first_idx < 0) {
6003 idx = sdebug_add_store();
6004 if (idx < 0)
6005 return idx;
6006 } else {
6007 idx = sdeb_first_idx;
6008 xa_clear_mark(per_store_ap, idx,
6009 SDEB_XA_NOT_IN_USE);
6010 }
6011 /* make all hosts use same store */
6012 list_for_each_entry(sdhp, &sdebug_host_list,
6013 host_list) {
6014 if (sdhp->si_idx != idx) {
6015 xa_set_mark(per_store_ap, sdhp->si_idx,
6016 SDEB_XA_NOT_IN_USE);
6017 sdhp->si_idx = idx;
6018 }
6019 }
6020 sdeb_most_recent_idx = idx;
6021 } else { /* 0 --> 1 transition is trigger for shrink */
6022 sdebug_erase_all_stores(true /* apart from first */);
6023 }
6024 sdebug_fake_rw = n;
6025 return count;
6026 }
6027 return -EINVAL;
6028 }
6029 static DRIVER_ATTR_RW(fake_rw);
6030
no_lun_0_show(struct device_driver * ddp,char * buf)6031 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6032 {
6033 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6034 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)6035 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6036 size_t count)
6037 {
6038 int n;
6039
6040 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6041 sdebug_no_lun_0 = n;
6042 return count;
6043 }
6044 return -EINVAL;
6045 }
6046 static DRIVER_ATTR_RW(no_lun_0);
6047
num_tgts_show(struct device_driver * ddp,char * buf)6048 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6049 {
6050 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6051 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)6052 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6053 size_t count)
6054 {
6055 int n;
6056
6057 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6058 sdebug_num_tgts = n;
6059 sdebug_max_tgts_luns();
6060 return count;
6061 }
6062 return -EINVAL;
6063 }
6064 static DRIVER_ATTR_RW(num_tgts);
6065
dev_size_mb_show(struct device_driver * ddp,char * buf)6066 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6067 {
6068 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6069 }
6070 static DRIVER_ATTR_RO(dev_size_mb);
6071
per_host_store_show(struct device_driver * ddp,char * buf)6072 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6073 {
6074 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6075 }
6076
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)6077 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6078 size_t count)
6079 {
6080 bool v;
6081
6082 if (kstrtobool(buf, &v))
6083 return -EINVAL;
6084
6085 sdebug_per_host_store = v;
6086 return count;
6087 }
6088 static DRIVER_ATTR_RW(per_host_store);
6089
num_parts_show(struct device_driver * ddp,char * buf)6090 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6091 {
6092 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6093 }
6094 static DRIVER_ATTR_RO(num_parts);
6095
every_nth_show(struct device_driver * ddp,char * buf)6096 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6097 {
6098 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6099 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)6100 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6101 size_t count)
6102 {
6103 int nth;
6104 char work[20];
6105
6106 if (sscanf(buf, "%10s", work) == 1) {
6107 if (strncasecmp(work, "0x", 2) == 0) {
6108 if (kstrtoint(work + 2, 16, &nth) == 0)
6109 goto every_nth_done;
6110 } else {
6111 if (kstrtoint(work, 10, &nth) == 0)
6112 goto every_nth_done;
6113 }
6114 }
6115 return -EINVAL;
6116
6117 every_nth_done:
6118 sdebug_every_nth = nth;
6119 if (nth && !sdebug_statistics) {
6120 pr_info("every_nth needs statistics=1, set it\n");
6121 sdebug_statistics = true;
6122 }
6123 tweak_cmnd_count();
6124 return count;
6125 }
6126 static DRIVER_ATTR_RW(every_nth);
6127
lun_format_show(struct device_driver * ddp,char * buf)6128 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6129 {
6130 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6131 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)6132 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6133 size_t count)
6134 {
6135 int n;
6136 bool changed;
6137
6138 if (kstrtoint(buf, 0, &n))
6139 return -EINVAL;
6140 if (n >= 0) {
6141 if (n > (int)SAM_LUN_AM_FLAT) {
6142 pr_warn("only LUN address methods 0 and 1 are supported\n");
6143 return -EINVAL;
6144 }
6145 changed = ((int)sdebug_lun_am != n);
6146 sdebug_lun_am = n;
6147 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6148 struct sdebug_host_info *sdhp;
6149 struct sdebug_dev_info *dp;
6150
6151 spin_lock(&sdebug_host_list_lock);
6152 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6153 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6154 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6155 }
6156 }
6157 spin_unlock(&sdebug_host_list_lock);
6158 }
6159 return count;
6160 }
6161 return -EINVAL;
6162 }
6163 static DRIVER_ATTR_RW(lun_format);
6164
max_luns_show(struct device_driver * ddp,char * buf)6165 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6166 {
6167 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6168 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)6169 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6170 size_t count)
6171 {
6172 int n;
6173 bool changed;
6174
6175 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6176 if (n > 256) {
6177 pr_warn("max_luns can be no more than 256\n");
6178 return -EINVAL;
6179 }
6180 changed = (sdebug_max_luns != n);
6181 sdebug_max_luns = n;
6182 sdebug_max_tgts_luns();
6183 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6184 struct sdebug_host_info *sdhp;
6185 struct sdebug_dev_info *dp;
6186
6187 spin_lock(&sdebug_host_list_lock);
6188 list_for_each_entry(sdhp, &sdebug_host_list,
6189 host_list) {
6190 list_for_each_entry(dp, &sdhp->dev_info_list,
6191 dev_list) {
6192 set_bit(SDEBUG_UA_LUNS_CHANGED,
6193 dp->uas_bm);
6194 }
6195 }
6196 spin_unlock(&sdebug_host_list_lock);
6197 }
6198 return count;
6199 }
6200 return -EINVAL;
6201 }
6202 static DRIVER_ATTR_RW(max_luns);
6203
max_queue_show(struct device_driver * ddp,char * buf)6204 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6205 {
6206 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6207 }
6208 /* N.B. max_queue can be changed while there are queued commands. In flight
6209 * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)6210 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6211 size_t count)
6212 {
6213 int j, n, k, a;
6214 struct sdebug_queue *sqp;
6215
6216 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6217 (n <= SDEBUG_CANQUEUE) &&
6218 (sdebug_host_max_queue == 0)) {
6219 block_unblock_all_queues(true);
6220 k = 0;
6221 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6222 ++j, ++sqp) {
6223 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6224 if (a > k)
6225 k = a;
6226 }
6227 sdebug_max_queue = n;
6228 if (k == SDEBUG_CANQUEUE)
6229 atomic_set(&retired_max_queue, 0);
6230 else if (k >= n)
6231 atomic_set(&retired_max_queue, k + 1);
6232 else
6233 atomic_set(&retired_max_queue, 0);
6234 block_unblock_all_queues(false);
6235 return count;
6236 }
6237 return -EINVAL;
6238 }
6239 static DRIVER_ATTR_RW(max_queue);
6240
host_max_queue_show(struct device_driver * ddp,char * buf)6241 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6242 {
6243 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6244 }
6245
6246 /*
6247 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6248 * in range [0, sdebug_host_max_queue), we can't change it.
6249 */
6250 static DRIVER_ATTR_RO(host_max_queue);
6251
no_uld_show(struct device_driver * ddp,char * buf)6252 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6253 {
6254 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6255 }
6256 static DRIVER_ATTR_RO(no_uld);
6257
scsi_level_show(struct device_driver * ddp,char * buf)6258 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6259 {
6260 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6261 }
6262 static DRIVER_ATTR_RO(scsi_level);
6263
virtual_gb_show(struct device_driver * ddp,char * buf)6264 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6265 {
6266 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6267 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)6268 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6269 size_t count)
6270 {
6271 int n;
6272 bool changed;
6273
6274 /* Ignore capacity change for ZBC drives for now */
6275 if (sdeb_zbc_in_use)
6276 return -ENOTSUPP;
6277
6278 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6279 changed = (sdebug_virtual_gb != n);
6280 sdebug_virtual_gb = n;
6281 sdebug_capacity = get_sdebug_capacity();
6282 if (changed) {
6283 struct sdebug_host_info *sdhp;
6284 struct sdebug_dev_info *dp;
6285
6286 spin_lock(&sdebug_host_list_lock);
6287 list_for_each_entry(sdhp, &sdebug_host_list,
6288 host_list) {
6289 list_for_each_entry(dp, &sdhp->dev_info_list,
6290 dev_list) {
6291 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6292 dp->uas_bm);
6293 }
6294 }
6295 spin_unlock(&sdebug_host_list_lock);
6296 }
6297 return count;
6298 }
6299 return -EINVAL;
6300 }
6301 static DRIVER_ATTR_RW(virtual_gb);
6302
add_host_show(struct device_driver * ddp,char * buf)6303 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6304 {
6305 /* absolute number of hosts currently active is what is shown */
6306 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6307 }
6308
add_host_store(struct device_driver * ddp,const char * buf,size_t count)6309 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6310 size_t count)
6311 {
6312 bool found;
6313 unsigned long idx;
6314 struct sdeb_store_info *sip;
6315 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6316 int delta_hosts;
6317
6318 if (sscanf(buf, "%d", &delta_hosts) != 1)
6319 return -EINVAL;
6320 if (delta_hosts > 0) {
6321 do {
6322 found = false;
6323 if (want_phs) {
6324 xa_for_each_marked(per_store_ap, idx, sip,
6325 SDEB_XA_NOT_IN_USE) {
6326 sdeb_most_recent_idx = (int)idx;
6327 found = true;
6328 break;
6329 }
6330 if (found) /* re-use case */
6331 sdebug_add_host_helper((int)idx);
6332 else
6333 sdebug_do_add_host(true);
6334 } else {
6335 sdebug_do_add_host(false);
6336 }
6337 } while (--delta_hosts);
6338 } else if (delta_hosts < 0) {
6339 do {
6340 sdebug_do_remove_host(false);
6341 } while (++delta_hosts);
6342 }
6343 return count;
6344 }
6345 static DRIVER_ATTR_RW(add_host);
6346
vpd_use_hostno_show(struct device_driver * ddp,char * buf)6347 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6348 {
6349 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6350 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)6351 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6352 size_t count)
6353 {
6354 int n;
6355
6356 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6357 sdebug_vpd_use_hostno = n;
6358 return count;
6359 }
6360 return -EINVAL;
6361 }
6362 static DRIVER_ATTR_RW(vpd_use_hostno);
6363
statistics_show(struct device_driver * ddp,char * buf)6364 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6365 {
6366 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6367 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)6368 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6369 size_t count)
6370 {
6371 int n;
6372
6373 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6374 if (n > 0)
6375 sdebug_statistics = true;
6376 else {
6377 clear_queue_stats();
6378 sdebug_statistics = false;
6379 }
6380 return count;
6381 }
6382 return -EINVAL;
6383 }
6384 static DRIVER_ATTR_RW(statistics);
6385
sector_size_show(struct device_driver * ddp,char * buf)6386 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6387 {
6388 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6389 }
6390 static DRIVER_ATTR_RO(sector_size);
6391
submit_queues_show(struct device_driver * ddp,char * buf)6392 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6393 {
6394 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6395 }
6396 static DRIVER_ATTR_RO(submit_queues);
6397
dix_show(struct device_driver * ddp,char * buf)6398 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6399 {
6400 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6401 }
6402 static DRIVER_ATTR_RO(dix);
6403
dif_show(struct device_driver * ddp,char * buf)6404 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6405 {
6406 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6407 }
6408 static DRIVER_ATTR_RO(dif);
6409
guard_show(struct device_driver * ddp,char * buf)6410 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6411 {
6412 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6413 }
6414 static DRIVER_ATTR_RO(guard);
6415
ato_show(struct device_driver * ddp,char * buf)6416 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6417 {
6418 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6419 }
6420 static DRIVER_ATTR_RO(ato);
6421
map_show(struct device_driver * ddp,char * buf)6422 static ssize_t map_show(struct device_driver *ddp, char *buf)
6423 {
6424 ssize_t count = 0;
6425
6426 if (!scsi_debug_lbp())
6427 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6428 sdebug_store_sectors);
6429
6430 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6431 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6432
6433 if (sip)
6434 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6435 (int)map_size, sip->map_storep);
6436 }
6437 buf[count++] = '\n';
6438 buf[count] = '\0';
6439
6440 return count;
6441 }
6442 static DRIVER_ATTR_RO(map);
6443
random_show(struct device_driver * ddp,char * buf)6444 static ssize_t random_show(struct device_driver *ddp, char *buf)
6445 {
6446 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6447 }
6448
random_store(struct device_driver * ddp,const char * buf,size_t count)6449 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6450 size_t count)
6451 {
6452 bool v;
6453
6454 if (kstrtobool(buf, &v))
6455 return -EINVAL;
6456
6457 sdebug_random = v;
6458 return count;
6459 }
6460 static DRIVER_ATTR_RW(random);
6461
removable_show(struct device_driver * ddp,char * buf)6462 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6463 {
6464 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6465 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)6466 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6467 size_t count)
6468 {
6469 int n;
6470
6471 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6472 sdebug_removable = (n > 0);
6473 return count;
6474 }
6475 return -EINVAL;
6476 }
6477 static DRIVER_ATTR_RW(removable);
6478
host_lock_show(struct device_driver * ddp,char * buf)6479 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6480 {
6481 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6482 }
6483 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)6484 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6485 size_t count)
6486 {
6487 int n;
6488
6489 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6490 sdebug_host_lock = (n > 0);
6491 return count;
6492 }
6493 return -EINVAL;
6494 }
6495 static DRIVER_ATTR_RW(host_lock);
6496
strict_show(struct device_driver * ddp,char * buf)6497 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6498 {
6499 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6500 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)6501 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6502 size_t count)
6503 {
6504 int n;
6505
6506 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6507 sdebug_strict = (n > 0);
6508 return count;
6509 }
6510 return -EINVAL;
6511 }
6512 static DRIVER_ATTR_RW(strict);
6513
uuid_ctl_show(struct device_driver * ddp,char * buf)6514 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6515 {
6516 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6517 }
6518 static DRIVER_ATTR_RO(uuid_ctl);
6519
cdb_len_show(struct device_driver * ddp,char * buf)6520 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6521 {
6522 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6523 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)6524 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6525 size_t count)
6526 {
6527 int ret, n;
6528
6529 ret = kstrtoint(buf, 0, &n);
6530 if (ret)
6531 return ret;
6532 sdebug_cdb_len = n;
6533 all_config_cdb_len();
6534 return count;
6535 }
6536 static DRIVER_ATTR_RW(cdb_len);
6537
6538 static const char * const zbc_model_strs_a[] = {
6539 [BLK_ZONED_NONE] = "none",
6540 [BLK_ZONED_HA] = "host-aware",
6541 [BLK_ZONED_HM] = "host-managed",
6542 };
6543
6544 static const char * const zbc_model_strs_b[] = {
6545 [BLK_ZONED_NONE] = "no",
6546 [BLK_ZONED_HA] = "aware",
6547 [BLK_ZONED_HM] = "managed",
6548 };
6549
6550 static const char * const zbc_model_strs_c[] = {
6551 [BLK_ZONED_NONE] = "0",
6552 [BLK_ZONED_HA] = "1",
6553 [BLK_ZONED_HM] = "2",
6554 };
6555
sdeb_zbc_model_str(const char * cp)6556 static int sdeb_zbc_model_str(const char *cp)
6557 {
6558 int res = sysfs_match_string(zbc_model_strs_a, cp);
6559
6560 if (res < 0) {
6561 res = sysfs_match_string(zbc_model_strs_b, cp);
6562 if (res < 0) {
6563 res = sysfs_match_string(zbc_model_strs_c, cp);
6564 if (res < 0)
6565 return -EINVAL;
6566 }
6567 }
6568 return res;
6569 }
6570
zbc_show(struct device_driver * ddp,char * buf)6571 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6572 {
6573 return scnprintf(buf, PAGE_SIZE, "%s\n",
6574 zbc_model_strs_a[sdeb_zbc_model]);
6575 }
6576 static DRIVER_ATTR_RO(zbc);
6577
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)6578 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6579 {
6580 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6581 }
6582 static DRIVER_ATTR_RO(tur_ms_to_ready);
6583
6584 /* Note: The following array creates attribute files in the
6585 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6586 files (over those found in the /sys/module/scsi_debug/parameters
6587 directory) is that auxiliary actions can be triggered when an attribute
6588 is changed. For example see: add_host_store() above.
6589 */
6590
6591 static struct attribute *sdebug_drv_attrs[] = {
6592 &driver_attr_delay.attr,
6593 &driver_attr_opts.attr,
6594 &driver_attr_ptype.attr,
6595 &driver_attr_dsense.attr,
6596 &driver_attr_fake_rw.attr,
6597 &driver_attr_host_max_queue.attr,
6598 &driver_attr_no_lun_0.attr,
6599 &driver_attr_num_tgts.attr,
6600 &driver_attr_dev_size_mb.attr,
6601 &driver_attr_num_parts.attr,
6602 &driver_attr_every_nth.attr,
6603 &driver_attr_lun_format.attr,
6604 &driver_attr_max_luns.attr,
6605 &driver_attr_max_queue.attr,
6606 &driver_attr_no_uld.attr,
6607 &driver_attr_scsi_level.attr,
6608 &driver_attr_virtual_gb.attr,
6609 &driver_attr_add_host.attr,
6610 &driver_attr_per_host_store.attr,
6611 &driver_attr_vpd_use_hostno.attr,
6612 &driver_attr_sector_size.attr,
6613 &driver_attr_statistics.attr,
6614 &driver_attr_submit_queues.attr,
6615 &driver_attr_dix.attr,
6616 &driver_attr_dif.attr,
6617 &driver_attr_guard.attr,
6618 &driver_attr_ato.attr,
6619 &driver_attr_map.attr,
6620 &driver_attr_random.attr,
6621 &driver_attr_removable.attr,
6622 &driver_attr_host_lock.attr,
6623 &driver_attr_ndelay.attr,
6624 &driver_attr_strict.attr,
6625 &driver_attr_uuid_ctl.attr,
6626 &driver_attr_cdb_len.attr,
6627 &driver_attr_tur_ms_to_ready.attr,
6628 &driver_attr_zbc.attr,
6629 NULL,
6630 };
6631 ATTRIBUTE_GROUPS(sdebug_drv);
6632
6633 static struct device *pseudo_primary;
6634
scsi_debug_init(void)6635 static int __init scsi_debug_init(void)
6636 {
6637 bool want_store = (sdebug_fake_rw == 0);
6638 unsigned long sz;
6639 int k, ret, hosts_to_add;
6640 int idx = -1;
6641
6642 ramdisk_lck_a[0] = &atomic_rw;
6643 ramdisk_lck_a[1] = &atomic_rw2;
6644 atomic_set(&retired_max_queue, 0);
6645
6646 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6647 pr_warn("ndelay must be less than 1 second, ignored\n");
6648 sdebug_ndelay = 0;
6649 } else if (sdebug_ndelay > 0)
6650 sdebug_jdelay = JDELAY_OVERRIDDEN;
6651
6652 switch (sdebug_sector_size) {
6653 case 512:
6654 case 1024:
6655 case 2048:
6656 case 4096:
6657 break;
6658 default:
6659 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6660 return -EINVAL;
6661 }
6662
6663 switch (sdebug_dif) {
6664 case T10_PI_TYPE0_PROTECTION:
6665 break;
6666 case T10_PI_TYPE1_PROTECTION:
6667 case T10_PI_TYPE2_PROTECTION:
6668 case T10_PI_TYPE3_PROTECTION:
6669 have_dif_prot = true;
6670 break;
6671
6672 default:
6673 pr_err("dif must be 0, 1, 2 or 3\n");
6674 return -EINVAL;
6675 }
6676
6677 if (sdebug_num_tgts < 0) {
6678 pr_err("num_tgts must be >= 0\n");
6679 return -EINVAL;
6680 }
6681
6682 if (sdebug_guard > 1) {
6683 pr_err("guard must be 0 or 1\n");
6684 return -EINVAL;
6685 }
6686
6687 if (sdebug_ato > 1) {
6688 pr_err("ato must be 0 or 1\n");
6689 return -EINVAL;
6690 }
6691
6692 if (sdebug_physblk_exp > 15) {
6693 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6694 return -EINVAL;
6695 }
6696
6697 sdebug_lun_am = sdebug_lun_am_i;
6698 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6699 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6700 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6701 }
6702
6703 if (sdebug_max_luns > 256) {
6704 if (sdebug_max_luns > 16384) {
6705 pr_warn("max_luns can be no more than 16384, use default\n");
6706 sdebug_max_luns = DEF_MAX_LUNS;
6707 }
6708 sdebug_lun_am = SAM_LUN_AM_FLAT;
6709 }
6710
6711 if (sdebug_lowest_aligned > 0x3fff) {
6712 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6713 return -EINVAL;
6714 }
6715
6716 if (submit_queues < 1) {
6717 pr_err("submit_queues must be 1 or more\n");
6718 return -EINVAL;
6719 }
6720
6721 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6722 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6723 return -EINVAL;
6724 }
6725
6726 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6727 (sdebug_host_max_queue < 0)) {
6728 pr_err("host_max_queue must be in range [0 %d]\n",
6729 SDEBUG_CANQUEUE);
6730 return -EINVAL;
6731 }
6732
6733 if (sdebug_host_max_queue &&
6734 (sdebug_max_queue != sdebug_host_max_queue)) {
6735 sdebug_max_queue = sdebug_host_max_queue;
6736 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6737 sdebug_max_queue);
6738 }
6739
6740 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6741 GFP_KERNEL);
6742 if (sdebug_q_arr == NULL)
6743 return -ENOMEM;
6744 for (k = 0; k < submit_queues; ++k)
6745 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6746
6747 /*
6748 * check for host managed zoned block device specified with
6749 * ptype=0x14 or zbc=XXX.
6750 */
6751 if (sdebug_ptype == TYPE_ZBC) {
6752 sdeb_zbc_model = BLK_ZONED_HM;
6753 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6754 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6755 if (k < 0) {
6756 ret = k;
6757 goto free_q_arr;
6758 }
6759 sdeb_zbc_model = k;
6760 switch (sdeb_zbc_model) {
6761 case BLK_ZONED_NONE:
6762 case BLK_ZONED_HA:
6763 sdebug_ptype = TYPE_DISK;
6764 break;
6765 case BLK_ZONED_HM:
6766 sdebug_ptype = TYPE_ZBC;
6767 break;
6768 default:
6769 pr_err("Invalid ZBC model\n");
6770 ret = -EINVAL;
6771 goto free_q_arr;
6772 }
6773 }
6774 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6775 sdeb_zbc_in_use = true;
6776 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6777 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6778 }
6779
6780 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6781 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6782 if (sdebug_dev_size_mb < 1)
6783 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6784 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6785 sdebug_store_sectors = sz / sdebug_sector_size;
6786 sdebug_capacity = get_sdebug_capacity();
6787
6788 /* play around with geometry, don't waste too much on track 0 */
6789 sdebug_heads = 8;
6790 sdebug_sectors_per = 32;
6791 if (sdebug_dev_size_mb >= 256)
6792 sdebug_heads = 64;
6793 else if (sdebug_dev_size_mb >= 16)
6794 sdebug_heads = 32;
6795 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6796 (sdebug_sectors_per * sdebug_heads);
6797 if (sdebug_cylinders_per >= 1024) {
6798 /* other LLDs do this; implies >= 1GB ram disk ... */
6799 sdebug_heads = 255;
6800 sdebug_sectors_per = 63;
6801 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6802 (sdebug_sectors_per * sdebug_heads);
6803 }
6804 if (scsi_debug_lbp()) {
6805 sdebug_unmap_max_blocks =
6806 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6807
6808 sdebug_unmap_max_desc =
6809 clamp(sdebug_unmap_max_desc, 0U, 256U);
6810
6811 sdebug_unmap_granularity =
6812 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6813
6814 if (sdebug_unmap_alignment &&
6815 sdebug_unmap_granularity <=
6816 sdebug_unmap_alignment) {
6817 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6818 ret = -EINVAL;
6819 goto free_q_arr;
6820 }
6821 }
6822 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6823 if (want_store) {
6824 idx = sdebug_add_store();
6825 if (idx < 0) {
6826 ret = idx;
6827 goto free_q_arr;
6828 }
6829 }
6830
6831 pseudo_primary = root_device_register("pseudo_0");
6832 if (IS_ERR(pseudo_primary)) {
6833 pr_warn("root_device_register() error\n");
6834 ret = PTR_ERR(pseudo_primary);
6835 goto free_vm;
6836 }
6837 ret = bus_register(&pseudo_lld_bus);
6838 if (ret < 0) {
6839 pr_warn("bus_register error: %d\n", ret);
6840 goto dev_unreg;
6841 }
6842 ret = driver_register(&sdebug_driverfs_driver);
6843 if (ret < 0) {
6844 pr_warn("driver_register error: %d\n", ret);
6845 goto bus_unreg;
6846 }
6847
6848 hosts_to_add = sdebug_add_host;
6849 sdebug_add_host = 0;
6850
6851 for (k = 0; k < hosts_to_add; k++) {
6852 if (want_store && k == 0) {
6853 ret = sdebug_add_host_helper(idx);
6854 if (ret < 0) {
6855 pr_err("add_host_helper k=%d, error=%d\n",
6856 k, -ret);
6857 break;
6858 }
6859 } else {
6860 ret = sdebug_do_add_host(want_store &&
6861 sdebug_per_host_store);
6862 if (ret < 0) {
6863 pr_err("add_host k=%d error=%d\n", k, -ret);
6864 break;
6865 }
6866 }
6867 }
6868 if (sdebug_verbose)
6869 pr_info("built %d host(s)\n", sdebug_num_hosts);
6870
6871 return 0;
6872
6873 bus_unreg:
6874 bus_unregister(&pseudo_lld_bus);
6875 dev_unreg:
6876 root_device_unregister(pseudo_primary);
6877 free_vm:
6878 sdebug_erase_store(idx, NULL);
6879 free_q_arr:
6880 kfree(sdebug_q_arr);
6881 return ret;
6882 }
6883
scsi_debug_exit(void)6884 static void __exit scsi_debug_exit(void)
6885 {
6886 int k = sdebug_num_hosts;
6887
6888 stop_all_queued();
6889 for (; k; k--)
6890 sdebug_do_remove_host(true);
6891 free_all_queued();
6892 driver_unregister(&sdebug_driverfs_driver);
6893 bus_unregister(&pseudo_lld_bus);
6894 root_device_unregister(pseudo_primary);
6895
6896 sdebug_erase_all_stores(false);
6897 xa_destroy(per_store_ap);
6898 kfree(sdebug_q_arr);
6899 }
6900
6901 device_initcall(scsi_debug_init);
6902 module_exit(scsi_debug_exit);
6903
sdebug_release_adapter(struct device * dev)6904 static void sdebug_release_adapter(struct device *dev)
6905 {
6906 struct sdebug_host_info *sdbg_host;
6907
6908 sdbg_host = to_sdebug_host(dev);
6909 kfree(sdbg_host);
6910 }
6911
6912 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)6913 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6914 {
6915 if (idx < 0)
6916 return;
6917 if (!sip) {
6918 if (xa_empty(per_store_ap))
6919 return;
6920 sip = xa_load(per_store_ap, idx);
6921 if (!sip)
6922 return;
6923 }
6924 vfree(sip->map_storep);
6925 vfree(sip->dif_storep);
6926 vfree(sip->storep);
6927 xa_erase(per_store_ap, idx);
6928 kfree(sip);
6929 }
6930
6931 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)6932 static void sdebug_erase_all_stores(bool apart_from_first)
6933 {
6934 unsigned long idx;
6935 struct sdeb_store_info *sip = NULL;
6936
6937 xa_for_each(per_store_ap, idx, sip) {
6938 if (apart_from_first)
6939 apart_from_first = false;
6940 else
6941 sdebug_erase_store(idx, sip);
6942 }
6943 if (apart_from_first)
6944 sdeb_most_recent_idx = sdeb_first_idx;
6945 }
6946
6947 /*
6948 * Returns store xarray new element index (idx) if >=0 else negated errno.
6949 * Limit the number of stores to 65536.
6950 */
sdebug_add_store(void)6951 static int sdebug_add_store(void)
6952 {
6953 int res;
6954 u32 n_idx;
6955 unsigned long iflags;
6956 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6957 struct sdeb_store_info *sip = NULL;
6958 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6959
6960 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6961 if (!sip)
6962 return -ENOMEM;
6963
6964 xa_lock_irqsave(per_store_ap, iflags);
6965 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6966 if (unlikely(res < 0)) {
6967 xa_unlock_irqrestore(per_store_ap, iflags);
6968 kfree(sip);
6969 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6970 return res;
6971 }
6972 sdeb_most_recent_idx = n_idx;
6973 if (sdeb_first_idx < 0)
6974 sdeb_first_idx = n_idx;
6975 xa_unlock_irqrestore(per_store_ap, iflags);
6976
6977 res = -ENOMEM;
6978 sip->storep = vzalloc(sz);
6979 if (!sip->storep) {
6980 pr_err("user data oom\n");
6981 goto err;
6982 }
6983 if (sdebug_num_parts > 0)
6984 sdebug_build_parts(sip->storep, sz);
6985
6986 /* DIF/DIX: what T10 calls Protection Information (PI) */
6987 if (sdebug_dix) {
6988 int dif_size;
6989
6990 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6991 sip->dif_storep = vmalloc(dif_size);
6992
6993 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6994 sip->dif_storep);
6995
6996 if (!sip->dif_storep) {
6997 pr_err("DIX oom\n");
6998 goto err;
6999 }
7000 memset(sip->dif_storep, 0xff, dif_size);
7001 }
7002 /* Logical Block Provisioning */
7003 if (scsi_debug_lbp()) {
7004 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7005 sip->map_storep = vmalloc(array_size(sizeof(long),
7006 BITS_TO_LONGS(map_size)));
7007
7008 pr_info("%lu provisioning blocks\n", map_size);
7009
7010 if (!sip->map_storep) {
7011 pr_err("LBP map oom\n");
7012 goto err;
7013 }
7014
7015 bitmap_zero(sip->map_storep, map_size);
7016
7017 /* Map first 1KB for partition table */
7018 if (sdebug_num_parts)
7019 map_region(sip, 0, 2);
7020 }
7021
7022 rwlock_init(&sip->macc_lck);
7023 return (int)n_idx;
7024 err:
7025 sdebug_erase_store((int)n_idx, sip);
7026 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7027 return res;
7028 }
7029
sdebug_add_host_helper(int per_host_idx)7030 static int sdebug_add_host_helper(int per_host_idx)
7031 {
7032 int k, devs_per_host, idx;
7033 int error = -ENOMEM;
7034 struct sdebug_host_info *sdbg_host;
7035 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7036
7037 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7038 if (!sdbg_host)
7039 return -ENOMEM;
7040 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7041 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7042 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7043 sdbg_host->si_idx = idx;
7044
7045 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7046
7047 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7048 for (k = 0; k < devs_per_host; k++) {
7049 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7050 if (!sdbg_devinfo)
7051 goto clean;
7052 }
7053
7054 spin_lock(&sdebug_host_list_lock);
7055 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7056 spin_unlock(&sdebug_host_list_lock);
7057
7058 sdbg_host->dev.bus = &pseudo_lld_bus;
7059 sdbg_host->dev.parent = pseudo_primary;
7060 sdbg_host->dev.release = &sdebug_release_adapter;
7061 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7062
7063 error = device_register(&sdbg_host->dev);
7064 if (error)
7065 goto clean;
7066
7067 ++sdebug_num_hosts;
7068 return 0;
7069
7070 clean:
7071 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7072 dev_list) {
7073 list_del(&sdbg_devinfo->dev_list);
7074 kfree(sdbg_devinfo->zstate);
7075 kfree(sdbg_devinfo);
7076 }
7077 kfree(sdbg_host);
7078 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7079 return error;
7080 }
7081
sdebug_do_add_host(bool mk_new_store)7082 static int sdebug_do_add_host(bool mk_new_store)
7083 {
7084 int ph_idx = sdeb_most_recent_idx;
7085
7086 if (mk_new_store) {
7087 ph_idx = sdebug_add_store();
7088 if (ph_idx < 0)
7089 return ph_idx;
7090 }
7091 return sdebug_add_host_helper(ph_idx);
7092 }
7093
sdebug_do_remove_host(bool the_end)7094 static void sdebug_do_remove_host(bool the_end)
7095 {
7096 int idx = -1;
7097 struct sdebug_host_info *sdbg_host = NULL;
7098 struct sdebug_host_info *sdbg_host2;
7099
7100 spin_lock(&sdebug_host_list_lock);
7101 if (!list_empty(&sdebug_host_list)) {
7102 sdbg_host = list_entry(sdebug_host_list.prev,
7103 struct sdebug_host_info, host_list);
7104 idx = sdbg_host->si_idx;
7105 }
7106 if (!the_end && idx >= 0) {
7107 bool unique = true;
7108
7109 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7110 if (sdbg_host2 == sdbg_host)
7111 continue;
7112 if (idx == sdbg_host2->si_idx) {
7113 unique = false;
7114 break;
7115 }
7116 }
7117 if (unique) {
7118 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7119 if (idx == sdeb_most_recent_idx)
7120 --sdeb_most_recent_idx;
7121 }
7122 }
7123 if (sdbg_host)
7124 list_del(&sdbg_host->host_list);
7125 spin_unlock(&sdebug_host_list_lock);
7126
7127 if (!sdbg_host)
7128 return;
7129
7130 device_unregister(&sdbg_host->dev);
7131 --sdebug_num_hosts;
7132 }
7133
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)7134 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7135 {
7136 int num_in_q = 0;
7137 struct sdebug_dev_info *devip;
7138
7139 block_unblock_all_queues(true);
7140 devip = (struct sdebug_dev_info *)sdev->hostdata;
7141 if (NULL == devip) {
7142 block_unblock_all_queues(false);
7143 return -ENODEV;
7144 }
7145 num_in_q = atomic_read(&devip->num_in_q);
7146
7147 if (qdepth < 1)
7148 qdepth = 1;
7149 /* allow to exceed max host qc_arr elements for testing */
7150 if (qdepth > SDEBUG_CANQUEUE + 10)
7151 qdepth = SDEBUG_CANQUEUE + 10;
7152 scsi_change_queue_depth(sdev, qdepth);
7153
7154 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7155 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7156 __func__, qdepth, num_in_q);
7157 }
7158 block_unblock_all_queues(false);
7159 return sdev->queue_depth;
7160 }
7161
fake_timeout(struct scsi_cmnd * scp)7162 static bool fake_timeout(struct scsi_cmnd *scp)
7163 {
7164 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7165 if (sdebug_every_nth < -1)
7166 sdebug_every_nth = -1;
7167 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7168 return true; /* ignore command causing timeout */
7169 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7170 scsi_medium_access_command(scp))
7171 return true; /* time out reads and writes */
7172 }
7173 return false;
7174 }
7175
7176 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)7177 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7178 {
7179 int stopped_state;
7180 u64 diff_ns = 0;
7181 ktime_t now_ts = ktime_get_boottime();
7182 struct scsi_device *sdp = scp->device;
7183
7184 stopped_state = atomic_read(&devip->stopped);
7185 if (stopped_state == 2) {
7186 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7187 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7188 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7189 /* tur_ms_to_ready timer extinguished */
7190 atomic_set(&devip->stopped, 0);
7191 return 0;
7192 }
7193 }
7194 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7195 if (sdebug_verbose)
7196 sdev_printk(KERN_INFO, sdp,
7197 "%s: Not ready: in process of becoming ready\n", my_name);
7198 if (scp->cmnd[0] == TEST_UNIT_READY) {
7199 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7200
7201 if (diff_ns <= tur_nanosecs_to_ready)
7202 diff_ns = tur_nanosecs_to_ready - diff_ns;
7203 else
7204 diff_ns = tur_nanosecs_to_ready;
7205 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7206 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7207 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7208 diff_ns);
7209 return check_condition_result;
7210 }
7211 }
7212 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7213 if (sdebug_verbose)
7214 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7215 my_name);
7216 return check_condition_result;
7217 }
7218
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)7219 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7220 struct scsi_cmnd *scp)
7221 {
7222 u8 sdeb_i;
7223 struct scsi_device *sdp = scp->device;
7224 const struct opcode_info_t *oip;
7225 const struct opcode_info_t *r_oip;
7226 struct sdebug_dev_info *devip;
7227 u8 *cmd = scp->cmnd;
7228 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7229 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7230 int k, na;
7231 int errsts = 0;
7232 u64 lun_index = sdp->lun & 0x3FFF;
7233 u32 flags;
7234 u16 sa;
7235 u8 opcode = cmd[0];
7236 bool has_wlun_rl;
7237 bool inject_now;
7238
7239 scsi_set_resid(scp, 0);
7240 if (sdebug_statistics) {
7241 atomic_inc(&sdebug_cmnd_count);
7242 inject_now = inject_on_this_cmd();
7243 } else {
7244 inject_now = false;
7245 }
7246 if (unlikely(sdebug_verbose &&
7247 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7248 char b[120];
7249 int n, len, sb;
7250
7251 len = scp->cmd_len;
7252 sb = (int)sizeof(b);
7253 if (len > 32)
7254 strcpy(b, "too long, over 32 bytes");
7255 else {
7256 for (k = 0, n = 0; k < len && n < sb; ++k)
7257 n += scnprintf(b + n, sb - n, "%02x ",
7258 (u32)cmd[k]);
7259 }
7260 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7261 blk_mq_unique_tag(scp->request), b);
7262 }
7263 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7264 return SCSI_MLQUEUE_HOST_BUSY;
7265 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7266 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7267 goto err_out;
7268
7269 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7270 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7271 devip = (struct sdebug_dev_info *)sdp->hostdata;
7272 if (unlikely(!devip)) {
7273 devip = find_build_dev_info(sdp);
7274 if (NULL == devip)
7275 goto err_out;
7276 }
7277 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7278 atomic_set(&sdeb_inject_pending, 1);
7279
7280 na = oip->num_attached;
7281 r_pfp = oip->pfp;
7282 if (na) { /* multiple commands with this opcode */
7283 r_oip = oip;
7284 if (FF_SA & r_oip->flags) {
7285 if (F_SA_LOW & oip->flags)
7286 sa = 0x1f & cmd[1];
7287 else
7288 sa = get_unaligned_be16(cmd + 8);
7289 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7290 if (opcode == oip->opcode && sa == oip->sa)
7291 break;
7292 }
7293 } else { /* since no service action only check opcode */
7294 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7295 if (opcode == oip->opcode)
7296 break;
7297 }
7298 }
7299 if (k > na) {
7300 if (F_SA_LOW & r_oip->flags)
7301 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7302 else if (F_SA_HIGH & r_oip->flags)
7303 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7304 else
7305 mk_sense_invalid_opcode(scp);
7306 goto check_cond;
7307 }
7308 } /* else (when na==0) we assume the oip is a match */
7309 flags = oip->flags;
7310 if (unlikely(F_INV_OP & flags)) {
7311 mk_sense_invalid_opcode(scp);
7312 goto check_cond;
7313 }
7314 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7315 if (sdebug_verbose)
7316 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7317 my_name, opcode, " supported for wlun");
7318 mk_sense_invalid_opcode(scp);
7319 goto check_cond;
7320 }
7321 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7322 u8 rem;
7323 int j;
7324
7325 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7326 rem = ~oip->len_mask[k] & cmd[k];
7327 if (rem) {
7328 for (j = 7; j >= 0; --j, rem <<= 1) {
7329 if (0x80 & rem)
7330 break;
7331 }
7332 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7333 goto check_cond;
7334 }
7335 }
7336 }
7337 if (unlikely(!(F_SKIP_UA & flags) &&
7338 find_first_bit(devip->uas_bm,
7339 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7340 errsts = make_ua(scp, devip);
7341 if (errsts)
7342 goto check_cond;
7343 }
7344 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7345 atomic_read(&devip->stopped))) {
7346 errsts = resp_not_ready(scp, devip);
7347 if (errsts)
7348 goto fini;
7349 }
7350 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7351 goto fini;
7352 if (unlikely(sdebug_every_nth)) {
7353 if (fake_timeout(scp))
7354 return 0; /* ignore command: make trouble */
7355 }
7356 if (likely(oip->pfp))
7357 pfp = oip->pfp; /* calls a resp_* function */
7358 else
7359 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7360
7361 fini:
7362 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7363 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7364 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7365 sdebug_ndelay > 10000)) {
7366 /*
7367 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7368 * for Start Stop Unit (SSU) want at least 1 second delay and
7369 * if sdebug_jdelay>1 want a long delay of that many seconds.
7370 * For Synchronize Cache want 1/20 of SSU's delay.
7371 */
7372 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7373 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7374
7375 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7376 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7377 } else
7378 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7379 sdebug_ndelay);
7380 check_cond:
7381 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7382 err_out:
7383 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7384 }
7385
7386 static struct scsi_host_template sdebug_driver_template = {
7387 .show_info = scsi_debug_show_info,
7388 .write_info = scsi_debug_write_info,
7389 .proc_name = sdebug_proc_name,
7390 .name = "SCSI DEBUG",
7391 .info = scsi_debug_info,
7392 .slave_alloc = scsi_debug_slave_alloc,
7393 .slave_configure = scsi_debug_slave_configure,
7394 .slave_destroy = scsi_debug_slave_destroy,
7395 .ioctl = scsi_debug_ioctl,
7396 .queuecommand = scsi_debug_queuecommand,
7397 .change_queue_depth = sdebug_change_qdepth,
7398 .eh_abort_handler = scsi_debug_abort,
7399 .eh_device_reset_handler = scsi_debug_device_reset,
7400 .eh_target_reset_handler = scsi_debug_target_reset,
7401 .eh_bus_reset_handler = scsi_debug_bus_reset,
7402 .eh_host_reset_handler = scsi_debug_host_reset,
7403 .can_queue = SDEBUG_CANQUEUE,
7404 .this_id = 7,
7405 .sg_tablesize = SG_MAX_SEGMENTS,
7406 .cmd_per_lun = DEF_CMD_PER_LUN,
7407 .max_sectors = -1U,
7408 .max_segment_size = -1U,
7409 .module = THIS_MODULE,
7410 .track_queue_depth = 1,
7411 };
7412
sdebug_driver_probe(struct device * dev)7413 static int sdebug_driver_probe(struct device *dev)
7414 {
7415 int error = 0;
7416 struct sdebug_host_info *sdbg_host;
7417 struct Scsi_Host *hpnt;
7418 int hprot;
7419
7420 sdbg_host = to_sdebug_host(dev);
7421
7422 sdebug_driver_template.can_queue = sdebug_max_queue;
7423 if (!sdebug_clustering)
7424 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7425
7426 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7427 if (NULL == hpnt) {
7428 pr_err("scsi_host_alloc failed\n");
7429 error = -ENODEV;
7430 return error;
7431 }
7432 if (submit_queues > nr_cpu_ids) {
7433 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7434 my_name, submit_queues, nr_cpu_ids);
7435 submit_queues = nr_cpu_ids;
7436 }
7437 /*
7438 * Decide whether to tell scsi subsystem that we want mq. The
7439 * following should give the same answer for each host.
7440 */
7441 hpnt->nr_hw_queues = submit_queues;
7442 if (sdebug_host_max_queue)
7443 hpnt->host_tagset = 1;
7444
7445 sdbg_host->shost = hpnt;
7446 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7447 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7448 hpnt->max_id = sdebug_num_tgts + 1;
7449 else
7450 hpnt->max_id = sdebug_num_tgts;
7451 /* = sdebug_max_luns; */
7452 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7453
7454 hprot = 0;
7455
7456 switch (sdebug_dif) {
7457
7458 case T10_PI_TYPE1_PROTECTION:
7459 hprot = SHOST_DIF_TYPE1_PROTECTION;
7460 if (sdebug_dix)
7461 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7462 break;
7463
7464 case T10_PI_TYPE2_PROTECTION:
7465 hprot = SHOST_DIF_TYPE2_PROTECTION;
7466 if (sdebug_dix)
7467 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7468 break;
7469
7470 case T10_PI_TYPE3_PROTECTION:
7471 hprot = SHOST_DIF_TYPE3_PROTECTION;
7472 if (sdebug_dix)
7473 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7474 break;
7475
7476 default:
7477 if (sdebug_dix)
7478 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7479 break;
7480 }
7481
7482 scsi_host_set_prot(hpnt, hprot);
7483
7484 if (have_dif_prot || sdebug_dix)
7485 pr_info("host protection%s%s%s%s%s%s%s\n",
7486 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7487 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7488 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7489 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7490 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7491 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7492 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7493
7494 if (sdebug_guard == 1)
7495 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7496 else
7497 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7498
7499 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7500 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7501 if (sdebug_every_nth) /* need stats counters for every_nth */
7502 sdebug_statistics = true;
7503 error = scsi_add_host(hpnt, &sdbg_host->dev);
7504 if (error) {
7505 pr_err("scsi_add_host failed\n");
7506 error = -ENODEV;
7507 scsi_host_put(hpnt);
7508 } else {
7509 scsi_scan_host(hpnt);
7510 }
7511
7512 return error;
7513 }
7514
sdebug_driver_remove(struct device * dev)7515 static int sdebug_driver_remove(struct device *dev)
7516 {
7517 struct sdebug_host_info *sdbg_host;
7518 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7519
7520 sdbg_host = to_sdebug_host(dev);
7521
7522 if (!sdbg_host) {
7523 pr_err("Unable to locate host info\n");
7524 return -ENODEV;
7525 }
7526
7527 scsi_remove_host(sdbg_host->shost);
7528
7529 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7530 dev_list) {
7531 list_del(&sdbg_devinfo->dev_list);
7532 kfree(sdbg_devinfo->zstate);
7533 kfree(sdbg_devinfo);
7534 }
7535
7536 scsi_host_put(sdbg_host->shost);
7537 return 0;
7538 }
7539
pseudo_lld_bus_match(struct device * dev,struct device_driver * dev_driver)7540 static int pseudo_lld_bus_match(struct device *dev,
7541 struct device_driver *dev_driver)
7542 {
7543 return 1;
7544 }
7545
7546 static struct bus_type pseudo_lld_bus = {
7547 .name = "pseudo",
7548 .match = pseudo_lld_bus_match,
7549 .probe = sdebug_driver_probe,
7550 .remove = sdebug_driver_remove,
7551 .drv_groups = sdebug_drv_groups,
7552 };
7553