1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * RDMA Network Block Driver
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9 #ifndef RNBD_PROTO_H
10 #define RNBD_PROTO_H
11
12 #include <linux/types.h>
13 #include <linux/blk-mq.h>
14 #include <linux/limits.h>
15 #include <linux/inet.h>
16 #include <linux/in.h>
17 #include <linux/in6.h>
18 #include <rdma/ib.h>
19
20 #define RNBD_PROTO_VER_MAJOR 2
21 #define RNBD_PROTO_VER_MINOR 0
22
23 /* The default port number the RTRS server is listening on. */
24 #define RTRS_PORT 1234
25
26 /**
27 * enum rnbd_msg_types - RNBD message types
28 * @RNBD_MSG_SESS_INFO: initial session info from client to server
29 * @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
30 * @RNBD_MSG_OPEN: open (map) device request
31 * @RNBD_MSG_OPEN_RSP: response to an @RNBD_MSG_OPEN
32 * @RNBD_MSG_IO: block IO request operation
33 * @RNBD_MSG_CLOSE: close (unmap) device request
34 */
35 enum rnbd_msg_type {
36 RNBD_MSG_SESS_INFO,
37 RNBD_MSG_SESS_INFO_RSP,
38 RNBD_MSG_OPEN,
39 RNBD_MSG_OPEN_RSP,
40 RNBD_MSG_IO,
41 RNBD_MSG_CLOSE,
42 };
43
44 /**
45 * struct rnbd_msg_hdr - header of RNBD messages
46 * @type: Message type, valid values see: enum rnbd_msg_types
47 */
48 struct rnbd_msg_hdr {
49 __le16 type;
50 __le16 __padding;
51 };
52
53 /**
54 * We allow to map RO many times and RW only once. We allow to map yet another
55 * time RW, if MIGRATION is provided (second RW export can be required for
56 * example for VM migration)
57 */
58 enum rnbd_access_mode {
59 RNBD_ACCESS_RO,
60 RNBD_ACCESS_RW,
61 RNBD_ACCESS_MIGRATION,
62 };
63
64 /**
65 * struct rnbd_msg_sess_info - initial session info from client to server
66 * @hdr: message header
67 * @ver: RNBD protocol version
68 */
69 struct rnbd_msg_sess_info {
70 struct rnbd_msg_hdr hdr;
71 u8 ver;
72 u8 reserved[31];
73 };
74
75 /**
76 * struct rnbd_msg_sess_info_rsp - initial session info from server to client
77 * @hdr: message header
78 * @ver: RNBD protocol version
79 */
80 struct rnbd_msg_sess_info_rsp {
81 struct rnbd_msg_hdr hdr;
82 u8 ver;
83 u8 reserved[31];
84 };
85
86 /**
87 * struct rnbd_msg_open - request to open a remote device.
88 * @hdr: message header
89 * @access_mode: the mode to open remote device, valid values see:
90 * enum rnbd_access_mode
91 * @device_name: device path on remote side
92 */
93 struct rnbd_msg_open {
94 struct rnbd_msg_hdr hdr;
95 u8 access_mode;
96 u8 resv1;
97 s8 dev_name[NAME_MAX];
98 u8 reserved[3];
99 };
100
101 /**
102 * struct rnbd_msg_close - request to close a remote device.
103 * @hdr: message header
104 * @device_id: device_id on server side to identify the device
105 */
106 struct rnbd_msg_close {
107 struct rnbd_msg_hdr hdr;
108 __le32 device_id;
109 };
110
111 enum rnbd_cache_policy {
112 RNBD_FUA = 1 << 0,
113 RNBD_WRITEBACK = 1 << 1,
114 };
115
116 /**
117 * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
118 * @hdr: message header
119 * @device_id: device_id on server side to identify the device
120 * @nsectors: number of sectors in the usual 512b unit
121 * @max_hw_sectors: max hardware sectors in the usual 512b unit
122 * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit
123 * @max_discard_sectors: max. sectors that can be discarded at once in 512b
124 * unit.
125 * @discard_granularity: size of the internal discard allocation unit in bytes
126 * @discard_alignment: offset from internal allocation assignment in bytes
127 * @physical_block_size: physical block size device supports in bytes
128 * @logical_block_size: logical block size device supports in bytes
129 * @max_segments: max segments hardware support in one transfer
130 * @secure_discard: supports secure discard
131 * @obsolete_rotational: obsolete, not in used.
132 * @cache_policy: support write-back caching or FUA?
133 */
134 struct rnbd_msg_open_rsp {
135 struct rnbd_msg_hdr hdr;
136 __le32 device_id;
137 __le64 nsectors;
138 __le32 max_hw_sectors;
139 __le32 max_write_same_sectors;
140 __le32 max_discard_sectors;
141 __le32 discard_granularity;
142 __le32 discard_alignment;
143 __le16 physical_block_size;
144 __le16 logical_block_size;
145 __le16 max_segments;
146 __le16 secure_discard;
147 u8 obsolete_rotational;
148 u8 cache_policy;
149 u8 reserved[10];
150 };
151
152 /**
153 * struct rnbd_msg_io - message for I/O read/write
154 * @hdr: message header
155 * @device_id: device_id on server side to find the right device
156 * @sector: bi_sector attribute from struct bio
157 * @rw: valid values are defined in enum rnbd_io_flags
158 * @bi_size: number of bytes for I/O read/write
159 * @prio: priority
160 */
161 struct rnbd_msg_io {
162 struct rnbd_msg_hdr hdr;
163 __le32 device_id;
164 __le64 sector;
165 __le32 rw;
166 __le32 bi_size;
167 __le16 prio;
168 };
169
170 #define RNBD_OP_BITS 8
171 #define RNBD_OP_MASK ((1 << RNBD_OP_BITS) - 1)
172
173 /**
174 * enum rnbd_io_flags - RNBD request types from rq_flag_bits
175 * @RNBD_OP_READ: read sectors from the device
176 * @RNBD_OP_WRITE: write sectors to the device
177 * @RNBD_OP_FLUSH: flush the volatile write cache
178 * @RNBD_OP_DISCARD: discard sectors
179 * @RNBD_OP_SECURE_ERASE: securely erase sectors
180 * @RNBD_OP_WRITE_SAME: write the same sectors many times
181
182 * @RNBD_F_SYNC: request is sync (sync write or read)
183 * @RNBD_F_FUA: forced unit access
184 */
185 enum rnbd_io_flags {
186
187 /* Operations */
188
189 RNBD_OP_READ = 0,
190 RNBD_OP_WRITE = 1,
191 RNBD_OP_FLUSH = 2,
192 RNBD_OP_DISCARD = 3,
193 RNBD_OP_SECURE_ERASE = 4,
194 RNBD_OP_WRITE_SAME = 5,
195
196 RNBD_OP_LAST,
197
198 /* Flags */
199
200 RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0),
201 RNBD_F_FUA = 1<<(RNBD_OP_BITS + 1),
202
203 RNBD_F_ALL = (RNBD_F_SYNC | RNBD_F_FUA)
204
205 };
206
rnbd_op(u32 flags)207 static inline u32 rnbd_op(u32 flags)
208 {
209 return flags & RNBD_OP_MASK;
210 }
211
rnbd_flags(u32 flags)212 static inline u32 rnbd_flags(u32 flags)
213 {
214 return flags & ~RNBD_OP_MASK;
215 }
216
rnbd_flags_supported(u32 flags)217 static inline bool rnbd_flags_supported(u32 flags)
218 {
219 u32 op;
220
221 op = rnbd_op(flags);
222 flags = rnbd_flags(flags);
223
224 if (op >= RNBD_OP_LAST)
225 return false;
226 if (flags & ~RNBD_F_ALL)
227 return false;
228
229 return true;
230 }
231
rnbd_to_bio_flags(u32 rnbd_opf)232 static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
233 {
234 blk_opf_t bio_opf;
235
236 switch (rnbd_op(rnbd_opf)) {
237 case RNBD_OP_READ:
238 bio_opf = REQ_OP_READ;
239 break;
240 case RNBD_OP_WRITE:
241 bio_opf = REQ_OP_WRITE;
242 break;
243 case RNBD_OP_FLUSH:
244 bio_opf = REQ_OP_WRITE | REQ_PREFLUSH;
245 break;
246 case RNBD_OP_DISCARD:
247 bio_opf = REQ_OP_DISCARD;
248 break;
249 case RNBD_OP_SECURE_ERASE:
250 bio_opf = REQ_OP_SECURE_ERASE;
251 break;
252 default:
253 WARN(1, "Unknown RNBD type: %d (flags %d)\n",
254 rnbd_op(rnbd_opf), rnbd_opf);
255 bio_opf = 0;
256 }
257
258 if (rnbd_opf & RNBD_F_SYNC)
259 bio_opf |= REQ_SYNC;
260
261 if (rnbd_opf & RNBD_F_FUA)
262 bio_opf |= REQ_FUA;
263
264 return bio_opf;
265 }
266
rq_to_rnbd_flags(struct request * rq)267 static inline u32 rq_to_rnbd_flags(struct request *rq)
268 {
269 u32 rnbd_opf;
270
271 switch (req_op(rq)) {
272 case REQ_OP_READ:
273 rnbd_opf = RNBD_OP_READ;
274 break;
275 case REQ_OP_WRITE:
276 rnbd_opf = RNBD_OP_WRITE;
277 break;
278 case REQ_OP_DISCARD:
279 rnbd_opf = RNBD_OP_DISCARD;
280 break;
281 case REQ_OP_SECURE_ERASE:
282 rnbd_opf = RNBD_OP_SECURE_ERASE;
283 break;
284 case REQ_OP_FLUSH:
285 rnbd_opf = RNBD_OP_FLUSH;
286 break;
287 default:
288 WARN(1, "Unknown request type %d (flags %llu)\n",
289 (__force u32)req_op(rq),
290 (__force unsigned long long)rq->cmd_flags);
291 rnbd_opf = 0;
292 }
293
294 if (op_is_sync(rq->cmd_flags))
295 rnbd_opf |= RNBD_F_SYNC;
296
297 if (op_is_flush(rq->cmd_flags))
298 rnbd_opf |= RNBD_F_FUA;
299
300 return rnbd_opf;
301 }
302
303 const char *rnbd_access_mode_str(enum rnbd_access_mode mode);
304
305 #endif /* RNBD_PROTO_H */
306