1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SCSI_SCSI_CMND_H
3 #define _SCSI_SCSI_CMND_H
4
5 #include <linux/dma-mapping.h>
6 #include <linux/blkdev.h>
7 #include <linux/t10-pi.h>
8 #include <linux/list.h>
9 #include <linux/types.h>
10 #include <linux/timer.h>
11 #include <linux/scatterlist.h>
12 #include <scsi/scsi_device.h>
13 #include <scsi/scsi_host.h>
14 #include <scsi/scsi_request.h>
15 #include <linux/android_kabi.h>
16
17 struct Scsi_Host;
18 struct scsi_driver;
19
20 /*
21 * MAX_COMMAND_SIZE is:
22 * The longest fixed-length SCSI CDB as per the SCSI standard.
23 * fixed-length means: commands that their size can be determined
24 * by their opcode and the CDB does not carry a length specifier, (unlike
25 * the VARIABLE_LENGTH_CMD(0x7f) command). This is actually not exactly
26 * true and the SCSI standard also defines extended commands and
27 * vendor specific commands that can be bigger than 16 bytes. The kernel
28 * will support these using the same infrastructure used for VARLEN CDB's.
29 * So in effect MAX_COMMAND_SIZE means the maximum size command scsi-ml
30 * supports without specifying a cmd_len by ULD's
31 */
32 #define MAX_COMMAND_SIZE 16
33 #if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
34 # error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
35 #endif
36
37 struct scsi_data_buffer {
38 struct sg_table table;
39 unsigned length;
40 };
41
42 /* embedded in scsi_cmnd */
43 struct scsi_pointer {
44 char *ptr; /* data pointer */
45 int this_residual; /* left in this buffer */
46 struct scatterlist *buffer; /* which buffer */
47 int buffers_residual; /* how many buffers left */
48
49 dma_addr_t dma_handle;
50
51 volatile int Status;
52 volatile int Message;
53 volatile int have_data_in;
54 volatile int sent_command;
55 volatile int phase;
56 };
57
58 /* for scmd->flags */
59 #define SCMD_TAGGED (1 << 0)
60 #define SCMD_INITIALIZED (1 << 1)
61 #define SCMD_LAST (1 << 2)
62 /* flags preserved across unprep / reprep */
63 #define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED)
64
65 /* for scmd->state */
66 #define SCMD_STATE_COMPLETE 0
67 #define SCMD_STATE_INFLIGHT 1
68
69 struct scsi_cmnd {
70 struct scsi_request req;
71 struct scsi_device *device;
72 struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */
73 struct delayed_work abort_work;
74
75 struct rcu_head rcu;
76
77 int eh_eflags; /* Used by error handlr */
78
79 int budget_token;
80
81 /*
82 * This is set to jiffies as it was when the command was first
83 * allocated. It is used to time how long the command has
84 * been outstanding
85 */
86 unsigned long jiffies_at_alloc;
87
88 int retries;
89 int allowed;
90
91 unsigned char prot_op;
92 unsigned char prot_type;
93 unsigned char prot_flags;
94
95 unsigned short cmd_len;
96 enum dma_data_direction sc_data_direction;
97
98 /* These elements define the operation we are about to perform */
99 unsigned char *cmnd;
100
101
102 /* These elements define the operation we ultimately want to perform */
103 struct scsi_data_buffer sdb;
104 struct scsi_data_buffer *prot_sdb;
105
106 unsigned underflow; /* Return error if less than
107 this amount is transferred */
108
109 unsigned transfersize; /* How much we are guaranteed to
110 transfer with each SCSI transfer
111 (ie, between disconnect /
112 reconnects. Probably == sector
113 size */
114
115 unsigned char *sense_buffer;
116 /* obtained by REQUEST SENSE when
117 * CHECK CONDITION is received on original
118 * command (auto-sense). Length must be
119 * SCSI_SENSE_BUFFERSIZE bytes. */
120
121 /* Low-level done function - can be used by low-level driver to point
122 * to completion function. Not used by mid/upper level code. */
123 void (*scsi_done) (struct scsi_cmnd *);
124
125 /*
126 * The following fields can be written to by the host specific code.
127 * Everything else should be left alone.
128 */
129 struct scsi_pointer SCp; /* Scratchpad used by some host adapters */
130
131 unsigned char *host_scribble; /* The host adapter is allowed to
132 * call scsi_malloc and get some memory
133 * and hang it here. The host adapter
134 * is also expected to call scsi_free
135 * to release this memory. (The memory
136 * obtained by scsi_malloc is guaranteed
137 * to be at an address < 16Mb). */
138
139 int result; /* Status code from lower level driver */
140 int flags; /* Command flags */
141 unsigned long state; /* Command completion state */
142
143 unsigned int extra_len; /* length of alignment and padding */
144
145 ANDROID_KABI_RESERVE(1);
146 ANDROID_KABI_RESERVE(2);
147 ANDROID_KABI_RESERVE(3);
148 ANDROID_KABI_RESERVE(4);
149 };
150
151 /* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
scsi_cmd_to_rq(struct scsi_cmnd * scmd)152 static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
153 {
154 return blk_mq_rq_from_pdu(scmd);
155 }
156
157 /*
158 * Return the driver private allocation behind the command.
159 * Only works if cmd_size is set in the host template.
160 */
scsi_cmd_priv(struct scsi_cmnd * cmd)161 static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
162 {
163 return cmd + 1;
164 }
165
166 /* make sure not to use it with passthrough commands */
scsi_cmd_to_driver(struct scsi_cmnd * cmd)167 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
168 {
169 struct request *rq = scsi_cmd_to_rq(cmd);
170
171 return *(struct scsi_driver **)rq->rq_disk->private_data;
172 }
173
174 extern void scsi_finish_command(struct scsi_cmnd *cmd);
175
176 extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
177 size_t *offset, size_t *len);
178 extern void scsi_kunmap_atomic_sg(void *virt);
179
180 blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd);
181 void scsi_free_sgtables(struct scsi_cmnd *cmd);
182
183 #ifdef CONFIG_SCSI_DMA
184 extern int scsi_dma_map(struct scsi_cmnd *cmd);
185 extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
186 #else /* !CONFIG_SCSI_DMA */
scsi_dma_map(struct scsi_cmnd * cmd)187 static inline int scsi_dma_map(struct scsi_cmnd *cmd) { return -ENOSYS; }
scsi_dma_unmap(struct scsi_cmnd * cmd)188 static inline void scsi_dma_unmap(struct scsi_cmnd *cmd) { }
189 #endif /* !CONFIG_SCSI_DMA */
190
scsi_sg_count(struct scsi_cmnd * cmd)191 static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
192 {
193 return cmd->sdb.table.nents;
194 }
195
scsi_sglist(struct scsi_cmnd * cmd)196 static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd)
197 {
198 return cmd->sdb.table.sgl;
199 }
200
scsi_bufflen(struct scsi_cmnd * cmd)201 static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
202 {
203 return cmd->sdb.length;
204 }
205
scsi_set_resid(struct scsi_cmnd * cmd,unsigned int resid)206 static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
207 {
208 cmd->req.resid_len = resid;
209 }
210
scsi_get_resid(struct scsi_cmnd * cmd)211 static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
212 {
213 return cmd->req.resid_len;
214 }
215
216 #define scsi_for_each_sg(cmd, sg, nseg, __i) \
217 for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
218
scsi_sg_copy_from_buffer(struct scsi_cmnd * cmd,const void * buf,int buflen)219 static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
220 const void *buf, int buflen)
221 {
222 return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
223 buf, buflen);
224 }
225
scsi_sg_copy_to_buffer(struct scsi_cmnd * cmd,void * buf,int buflen)226 static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
227 void *buf, int buflen)
228 {
229 return sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
230 buf, buflen);
231 }
232
scsi_get_sector(struct scsi_cmnd * scmd)233 static inline sector_t scsi_get_sector(struct scsi_cmnd *scmd)
234 {
235 return blk_rq_pos(scsi_cmd_to_rq(scmd));
236 }
237
scsi_get_lba(struct scsi_cmnd * scmd)238 static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
239 {
240 unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
241
242 return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift;
243 }
244
scsi_logical_block_count(struct scsi_cmnd * scmd)245 static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
246 {
247 unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
248
249 return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
250 }
251
252 /*
253 * The operations below are hints that tell the controller driver how
254 * to handle I/Os with DIF or similar types of protection information.
255 */
256 enum scsi_prot_operations {
257 /* Normal I/O */
258 SCSI_PROT_NORMAL = 0,
259
260 /* OS-HBA: Protected, HBA-Target: Unprotected */
261 SCSI_PROT_READ_INSERT,
262 SCSI_PROT_WRITE_STRIP,
263
264 /* OS-HBA: Unprotected, HBA-Target: Protected */
265 SCSI_PROT_READ_STRIP,
266 SCSI_PROT_WRITE_INSERT,
267
268 /* OS-HBA: Protected, HBA-Target: Protected */
269 SCSI_PROT_READ_PASS,
270 SCSI_PROT_WRITE_PASS,
271 };
272
scsi_set_prot_op(struct scsi_cmnd * scmd,unsigned char op)273 static inline void scsi_set_prot_op(struct scsi_cmnd *scmd, unsigned char op)
274 {
275 scmd->prot_op = op;
276 }
277
scsi_get_prot_op(struct scsi_cmnd * scmd)278 static inline unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd)
279 {
280 return scmd->prot_op;
281 }
282
283 enum scsi_prot_flags {
284 SCSI_PROT_TRANSFER_PI = 1 << 0,
285 SCSI_PROT_GUARD_CHECK = 1 << 1,
286 SCSI_PROT_REF_CHECK = 1 << 2,
287 SCSI_PROT_REF_INCREMENT = 1 << 3,
288 SCSI_PROT_IP_CHECKSUM = 1 << 4,
289 };
290
291 /*
292 * The controller usually does not know anything about the target it
293 * is communicating with. However, when DIX is enabled the controller
294 * must be know target type so it can verify the protection
295 * information passed along with the I/O.
296 */
297 enum scsi_prot_target_type {
298 SCSI_PROT_DIF_TYPE0 = 0,
299 SCSI_PROT_DIF_TYPE1,
300 SCSI_PROT_DIF_TYPE2,
301 SCSI_PROT_DIF_TYPE3,
302 };
303
scsi_set_prot_type(struct scsi_cmnd * scmd,unsigned char type)304 static inline void scsi_set_prot_type(struct scsi_cmnd *scmd, unsigned char type)
305 {
306 scmd->prot_type = type;
307 }
308
scsi_get_prot_type(struct scsi_cmnd * scmd)309 static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
310 {
311 return scmd->prot_type;
312 }
313
scsi_prot_ref_tag(struct scsi_cmnd * scmd)314 static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
315 {
316 struct request *rq = blk_mq_rq_from_pdu(scmd);
317
318 return t10_pi_ref_tag(rq);
319 }
320
scsi_prot_interval(struct scsi_cmnd * scmd)321 static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
322 {
323 return scmd->device->sector_size;
324 }
325
scsi_prot_sg_count(struct scsi_cmnd * cmd)326 static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
327 {
328 return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
329 }
330
scsi_prot_sglist(struct scsi_cmnd * cmd)331 static inline struct scatterlist *scsi_prot_sglist(struct scsi_cmnd *cmd)
332 {
333 return cmd->prot_sdb ? cmd->prot_sdb->table.sgl : NULL;
334 }
335
scsi_prot(struct scsi_cmnd * cmd)336 static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
337 {
338 return cmd->prot_sdb;
339 }
340
341 #define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \
342 for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i)
343
set_status_byte(struct scsi_cmnd * cmd,char status)344 static inline void set_status_byte(struct scsi_cmnd *cmd, char status)
345 {
346 cmd->result = (cmd->result & 0xffffff00) | status;
347 }
348
get_status_byte(struct scsi_cmnd * cmd)349 static inline u8 get_status_byte(struct scsi_cmnd *cmd)
350 {
351 return cmd->result & 0xff;
352 }
353
set_host_byte(struct scsi_cmnd * cmd,char status)354 static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
355 {
356 cmd->result = (cmd->result & 0xff00ffff) | (status << 16);
357 }
358
get_host_byte(struct scsi_cmnd * cmd)359 static inline u8 get_host_byte(struct scsi_cmnd *cmd)
360 {
361 return (cmd->result >> 16) & 0xff;
362 }
363
364 /**
365 * scsi_msg_to_host_byte() - translate message byte
366 *
367 * Translate the SCSI parallel message byte to a matching
368 * host byte setting. A message of COMMAND_COMPLETE indicates
369 * a successful command execution, any other message indicate
370 * an error. As the messages themselves only have a meaning
371 * for the SCSI parallel protocol this function translates
372 * them into a matching host byte value for SCSI EH.
373 */
scsi_msg_to_host_byte(struct scsi_cmnd * cmd,u8 msg)374 static inline void scsi_msg_to_host_byte(struct scsi_cmnd *cmd, u8 msg)
375 {
376 switch (msg) {
377 case COMMAND_COMPLETE:
378 break;
379 case ABORT_TASK_SET:
380 set_host_byte(cmd, DID_ABORT);
381 break;
382 case TARGET_RESET:
383 set_host_byte(cmd, DID_RESET);
384 break;
385 default:
386 set_host_byte(cmd, DID_ERROR);
387 break;
388 }
389 }
390
scsi_transfer_length(struct scsi_cmnd * scmd)391 static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
392 {
393 unsigned int xfer_len = scmd->sdb.length;
394 unsigned int prot_interval = scsi_prot_interval(scmd);
395
396 if (scmd->prot_flags & SCSI_PROT_TRANSFER_PI)
397 xfer_len += (xfer_len >> ilog2(prot_interval)) * 8;
398
399 return xfer_len;
400 }
401
402 extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc,
403 u8 key, u8 asc, u8 ascq);
404
405 #endif /* _SCSI_SCSI_CMND_H */
406