• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __TARGET_CORE_USER_H
2 #define __TARGET_CORE_USER_H
3 
4 /* This header will be used by application too */
5 
6 #include <linux/types.h>
7 #include <linux/uio.h>
8 
9 #define TCMU_VERSION "2.0"
10 
11 /*
12  * Ring Design
13  * -----------
14  *
15  * The mmaped area is divided into three parts:
16  * 1) The mailbox (struct tcmu_mailbox, below)
17  * 2) The command ring
18  * 3) Everything beyond the command ring (data)
19  *
20  * The mailbox tells userspace the offset of the command ring from the
21  * start of the shared memory region, and how big the command ring is.
22  *
23  * The kernel passes SCSI commands to userspace by putting a struct
24  * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
25  * userspace via uio's interrupt mechanism.
26  *
27  * tcmu_cmd_entry contains a header. If the header type is PAD,
28  * userspace should skip hdr->length bytes (mod cmdr_size) to find the
29  * next cmd_entry.
30  *
31  * Otherwise, the entry will contain offsets into the mmaped area that
32  * contain the cdb and data buffers -- the latter accessible via the
33  * iov array. iov addresses are also offsets into the shared area.
34  *
35  * When userspace is completed handling the command, set
36  * entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate,
37  * and also set mailbox->cmd_tail equal to the old cmd_tail plus
38  * hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it
39  * should process the next packet the same way, and so on.
40  */
41 
42 #define TCMU_MAILBOX_VERSION 2
43 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
44 #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
45 
46 struct tcmu_mailbox {
47 	__u16 version;
48 	__u16 flags;
49 	__u32 cmdr_off;
50 	__u32 cmdr_size;
51 
52 	__u32 cmd_head;
53 
54 	/* Updated by user. On its own cacheline */
55 	__u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE)));
56 
57 } __packed;
58 
59 enum tcmu_opcode {
60 	TCMU_OP_PAD = 0,
61 	TCMU_OP_CMD,
62 };
63 
64 /*
65  * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
66  */
67 struct tcmu_cmd_entry_hdr {
68 	__u32 len_op;
69 	__u16 cmd_id;
70 	__u8 kflags;
71 #define TCMU_UFLAG_UNKNOWN_OP 0x1
72 	__u8 uflags;
73 
74 } __packed;
75 
76 #define TCMU_OP_MASK 0x7
77 
tcmu_hdr_get_op(__u32 len_op)78 static inline enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op)
79 {
80 	return len_op & TCMU_OP_MASK;
81 }
82 
tcmu_hdr_set_op(__u32 * len_op,enum tcmu_opcode op)83 static inline void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op)
84 {
85 	*len_op &= ~TCMU_OP_MASK;
86 	*len_op |= (op & TCMU_OP_MASK);
87 }
88 
tcmu_hdr_get_len(__u32 len_op)89 static inline __u32 tcmu_hdr_get_len(__u32 len_op)
90 {
91 	return len_op & ~TCMU_OP_MASK;
92 }
93 
tcmu_hdr_set_len(__u32 * len_op,__u32 len)94 static inline void tcmu_hdr_set_len(__u32 *len_op, __u32 len)
95 {
96 	*len_op &= TCMU_OP_MASK;
97 	*len_op |= len;
98 }
99 
100 /* Currently the same as SCSI_SENSE_BUFFERSIZE */
101 #define TCMU_SENSE_BUFFERSIZE 96
102 
103 struct tcmu_cmd_entry {
104 	struct tcmu_cmd_entry_hdr hdr;
105 
106 	union {
107 		struct {
108 			uint32_t iov_cnt;
109 			uint32_t iov_bidi_cnt;
110 			uint32_t iov_dif_cnt;
111 			uint64_t cdb_off;
112 			uint64_t __pad1;
113 			uint64_t __pad2;
114 			struct iovec iov[0];
115 		} req;
116 		struct {
117 			uint8_t scsi_status;
118 			uint8_t __pad1;
119 			uint16_t __pad2;
120 			uint32_t __pad3;
121 			char sense_buffer[TCMU_SENSE_BUFFERSIZE];
122 		} rsp;
123 	};
124 
125 } __packed;
126 
127 #define TCMU_OP_ALIGN_SIZE sizeof(uint64_t)
128 
129 enum tcmu_genl_cmd {
130 	TCMU_CMD_UNSPEC,
131 	TCMU_CMD_ADDED_DEVICE,
132 	TCMU_CMD_REMOVED_DEVICE,
133 	__TCMU_CMD_MAX,
134 };
135 #define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
136 
137 enum tcmu_genl_attr {
138 	TCMU_ATTR_UNSPEC,
139 	TCMU_ATTR_DEVICE,
140 	TCMU_ATTR_MINOR,
141 	__TCMU_ATTR_MAX,
142 };
143 #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
144 
145 #endif
146