1 // SPDX-License-Identifier: MIT or LGPL-2.1-only
2
3 #include <config.h>
4
5 #include "ublksrv_priv.h"
6
7 #ifdef UBLK_CONTROL
8 #define CTRL_DEV UBLK_CONTROL
9 #else
10 #define CTRL_DEV "/dev/ublk-control"
11 #endif
12
13 #define CTRL_CMD_HAS_DATA 1
14 #define CTRL_CMD_HAS_BUF 2
15 #define CTRL_CMD_NO_TRANS 4
16
17 struct ublksrv_ctrl_cmd_data {
18 unsigned int cmd_op;
19 unsigned short flags;
20 unsigned short _pad;
21
22 __u64 data[1];
23 __u16 dev_path_len;
24 __u16 pad;
25 __u32 reserved;
26
27 __u64 addr;
28 __u32 len;
29 };
30
31 #define ublk_un_privileged_prep_data(dev, data) \
32 char buf[UBLKC_PATH_MAX]; \
33 if (ublk_is_unprivileged(dev)) { \
34 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV, \
35 dev->dev_info.dev_id); \
36 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA; \
37 data.len = sizeof(buf); \
38 data.dev_path_len = UBLKC_PATH_MAX; \
39 data.addr = (__u64)buf; \
40 }
41
42 static const unsigned int ctrl_cmd_op[] = {
43 [UBLK_CMD_GET_QUEUE_AFFINITY] = UBLK_U_CMD_GET_QUEUE_AFFINITY,
44 [UBLK_CMD_GET_DEV_INFO] = UBLK_U_CMD_GET_DEV_INFO,
45 [UBLK_CMD_ADD_DEV] = UBLK_U_CMD_ADD_DEV,
46 [UBLK_CMD_DEL_DEV] = UBLK_U_CMD_DEL_DEV,
47 [UBLK_CMD_START_DEV] = UBLK_U_CMD_START_DEV,
48 [UBLK_CMD_STOP_DEV] = UBLK_U_CMD_STOP_DEV,
49 [UBLK_CMD_SET_PARAMS] = UBLK_U_CMD_SET_PARAMS,
50 [UBLK_CMD_GET_PARAMS] = UBLK_U_CMD_GET_PARAMS,
51 [UBLK_CMD_START_USER_RECOVERY] = UBLK_U_CMD_START_USER_RECOVERY,
52 [UBLK_CMD_END_USER_RECOVERY] = UBLK_U_CMD_END_USER_RECOVERY,
53 [UBLK_CMD_GET_DEV_INFO2] = UBLK_U_CMD_GET_DEV_INFO2,
54 };
55
legacy_op_to_ioctl(unsigned int op)56 static unsigned int legacy_op_to_ioctl(unsigned int op)
57 {
58 assert(_IOC_TYPE(op) == 0);
59 assert(_IOC_DIR(op) == 0);
60 assert(_IOC_SIZE(op) == 0);
61 assert(op >= UBLK_CMD_GET_QUEUE_AFFINITY &&
62 op <= UBLK_CMD_GET_DEV_INFO2);
63
64 return ctrl_cmd_op[op];
65 }
66
67
68 /*******************ctrl dev operation ********************************/
ublksrv_ctrl_init_cmd(struct ublksrv_ctrl_dev * dev,struct io_uring_sqe * sqe,struct ublksrv_ctrl_cmd_data * data)69 static inline void ublksrv_ctrl_init_cmd(struct ublksrv_ctrl_dev *dev,
70 struct io_uring_sqe *sqe,
71 struct ublksrv_ctrl_cmd_data *data)
72 {
73 struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
74 struct ublksrv_ctrl_cmd *cmd = (struct ublksrv_ctrl_cmd *)ublksrv_get_sqe_cmd(sqe);
75 unsigned int cmd_op = data->cmd_op;
76
77 sqe->fd = dev->ctrl_fd;
78 sqe->opcode = IORING_OP_URING_CMD;
79 sqe->ioprio = 0;
80
81 if (data->flags & CTRL_CMD_HAS_BUF) {
82 cmd->addr = data->addr;
83 cmd->len = data->len;
84 }
85
86 if (data->flags & CTRL_CMD_HAS_DATA) {
87 cmd->data[0] = data->data[0];
88 cmd->dev_path_len = data->dev_path_len;
89 }
90
91 cmd->dev_id = info->dev_id;
92 cmd->queue_id = -1;
93
94 if (!(data->flags & CTRL_CMD_NO_TRANS) &&
95 (info->flags & UBLK_F_CMD_IOCTL_ENCODE))
96 cmd_op = legacy_op_to_ioctl(cmd_op);
97 ublksrv_set_sqe_cmd_op(sqe, cmd_op);
98
99 io_uring_sqe_set_data(sqe, cmd);
100
101 ublk_ctrl_dbg(UBLK_DBG_CTRL_CMD, "dev %d cmd_op %x/%x, user_data %p\n",
102 dev->dev_info.dev_id, data->cmd_op, cmd_op, cmd);
103 }
104
__ublksrv_ctrl_cmd(struct ublksrv_ctrl_dev * dev,struct ublksrv_ctrl_cmd_data * data)105 static int __ublksrv_ctrl_cmd(struct ublksrv_ctrl_dev *dev,
106 struct ublksrv_ctrl_cmd_data *data)
107 {
108 struct io_uring_sqe *sqe;
109 struct io_uring_cqe *cqe;
110 int ret = -EINVAL;
111
112 sqe = io_uring_get_sqe(&dev->ring);
113 if (!sqe) {
114 fprintf(stderr, "can't get sqe ret %d\n", ret);
115 return ret;
116 }
117
118 ublksrv_ctrl_init_cmd(dev, sqe, data);
119
120 ret = io_uring_submit(&dev->ring);
121 if (ret < 0) {
122 fprintf(stderr, "uring submit ret %d\n", ret);
123 return ret;
124 }
125
126 do {
127 ret = io_uring_wait_cqe(&dev->ring, &cqe);
128 } while (ret == -EINTR);
129 if (ret < 0) {
130 fprintf(stderr, "wait cqe: %s\n", strerror(-ret));
131 return ret;
132 }
133 io_uring_cqe_seen(&dev->ring, cqe);
134
135 ublk_ctrl_dbg(UBLK_DBG_CTRL_CMD, "dev %d, ctrl cqe res %d, user_data %llx\n",
136 dev->dev_info.dev_id, cqe->res, cqe->user_data);
137 return cqe->res;
138 }
139
ublksrv_ctrl_deinit(struct ublksrv_ctrl_dev * dev)140 void ublksrv_ctrl_deinit(struct ublksrv_ctrl_dev *dev)
141 {
142 close(dev->ring.ring_fd);
143 close(dev->ctrl_fd);
144 free(dev->queues_cpuset);
145 free(dev);
146 }
147
ublksrv_ctrl_init(struct ublksrv_dev_data * data)148 struct ublksrv_ctrl_dev *ublksrv_ctrl_init(struct ublksrv_dev_data *data)
149 {
150 struct io_uring_params p;
151 struct ublksrv_ctrl_dev *dev = (struct ublksrv_ctrl_dev *)calloc(1,
152 sizeof(*dev));
153 struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
154 int ret;
155
156 dev->ctrl_fd = open(CTRL_DEV, O_RDWR);
157 if (dev->ctrl_fd < 0) {
158 fprintf(stderr, "control dev %s can't be opened: %m\n", CTRL_DEV);
159 exit(dev->ctrl_fd);
160 }
161
162 /* -1 means we ask ublk driver to allocate one free to us */
163 info->dev_id = data->dev_id;
164 info->nr_hw_queues = data->nr_hw_queues;
165 info->queue_depth = data->queue_depth;
166 info->max_io_buf_bytes = data->max_io_buf_bytes;
167 info->flags = data->flags;
168 info->ublksrv_flags = data->ublksrv_flags;
169
170 dev->run_dir = data->run_dir;
171 dev->tgt_type = data->tgt_type;
172 dev->tgt_ops = data->tgt_ops;
173 dev->tgt_argc = data->tgt_argc;
174 dev->tgt_argv = data->tgt_argv;
175
176 /* 32 is enough to send ctrl commands */
177 ublksrv_setup_ring_params(&p, 32, IORING_SETUP_SQE128);
178 ret = io_uring_queue_init_params(32, &dev->ring, &p);
179 if (ret < 0) {
180 fprintf(stderr, "queue_init: %s\n", strerror(-ret));
181 free(dev);
182 return NULL;
183 }
184
185 return dev;
186 }
187
188 /* queues_cpuset is only used for setting up queue pthread daemon */
ublksrv_ctrl_get_affinity(struct ublksrv_ctrl_dev * ctrl_dev)189 int ublksrv_ctrl_get_affinity(struct ublksrv_ctrl_dev *ctrl_dev)
190 {
191 struct ublksrv_ctrl_cmd_data data = {
192 .cmd_op = UBLK_CMD_GET_QUEUE_AFFINITY,
193 .flags = CTRL_CMD_HAS_DATA | CTRL_CMD_HAS_BUF,
194 };
195 unsigned char *buf;
196 int i, ret;
197 int len;
198 int path_len;
199
200 if (ublk_is_unprivileged(ctrl_dev))
201 path_len = UBLKC_PATH_MAX;
202 else
203 path_len = 0;
204
205 len = (sizeof(cpu_set_t) + path_len) * ctrl_dev->dev_info.nr_hw_queues;
206 buf = malloc(len);
207
208 if (!buf)
209 return -ENOMEM;
210
211 for (i = 0; i < ctrl_dev->dev_info.nr_hw_queues; i++) {
212 data.data[0] = i;
213 data.dev_path_len = path_len;
214 data.len = sizeof(cpu_set_t) + path_len;
215 data.addr = (__u64)&buf[i * data.len];
216
217 if (path_len)
218 snprintf((char *)data.addr, UBLKC_PATH_MAX, "%s%d",
219 UBLKC_DEV, ctrl_dev->dev_info.dev_id);
220
221 ret = __ublksrv_ctrl_cmd(ctrl_dev, &data);
222 if (ret < 0) {
223 free(buf);
224 return ret;
225 }
226 }
227 ctrl_dev->queues_cpuset = (cpu_set_t *)buf;
228
229 return 0;
230 }
231
232 /*
233 * Start the ublksrv device:
234 *
235 * 1) fork a daemon for handling IO command from driver
236 *
237 * 2) wait for the device becoming ready: the daemon should submit
238 * sqes to /dev/ublkcN, just like usb's urb usage, each request needs
239 * one sqe. If one IO request comes to kernel driver of /dev/ublkbN,
240 * the sqe for this request is completed, and the daemon gets notified.
241 * When every io request of driver gets its own sqe queued, we think
242 * /dev/ublkbN is ready to start
243 *
244 * 3) in current process context, sent START_DEV command to
245 * /dev/ublk-control with device id, which will cause ublk driver to
246 * expose /dev/ublkbN
247 */
ublksrv_ctrl_start_dev(struct ublksrv_ctrl_dev * ctrl_dev,int daemon_pid)248 int ublksrv_ctrl_start_dev(struct ublksrv_ctrl_dev *ctrl_dev,
249 int daemon_pid)
250 {
251 struct ublksrv_ctrl_cmd_data data = {
252 .cmd_op = UBLK_CMD_START_DEV,
253 .flags = CTRL_CMD_HAS_DATA,
254 };
255 int ret;
256
257 ublk_un_privileged_prep_data(ctrl_dev, data);
258
259 ctrl_dev->dev_info.ublksrv_pid = data.data[0] = daemon_pid;
260
261 ret = __ublksrv_ctrl_cmd(ctrl_dev, &data);
262
263 return ret;
264 }
265
266 /*
267 * Stop the ublksrv device:
268 *
269 * 1) send STOP_DEV command to /dev/ublk-control with device id provided
270 *
271 * 2) ublk driver gets this command, freeze /dev/ublkbN, then complete all
272 * pending seq, meantime tell the daemon via cqe->res to not submit sqe
273 * any more, since we are being closed. Also delete /dev/ublkbN.
274 *
275 * 3) the ublk daemon figures out that all sqes are completed, and free,
276 * then close /dev/ublkcN and exit itself.
277 */
__ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev * dev,unsigned cmd_op)278 static int __ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev *dev, unsigned cmd_op)
279 {
280 struct ublksrv_ctrl_cmd_data data = {
281 .cmd_op = cmd_op,
282 .flags = CTRL_CMD_HAS_BUF | CTRL_CMD_NO_TRANS,
283 .addr = (__u64)&dev->dev_info,
284 .len = sizeof(struct ublksrv_ctrl_dev_info),
285 };
286
287 return __ublksrv_ctrl_cmd(dev, &data);
288 }
289
ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev * dev)290 int ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev *dev)
291 {
292 int ret = __ublksrv_ctrl_add_dev(dev, UBLK_U_CMD_ADD_DEV);
293
294 if (ret < 0)
295 return __ublksrv_ctrl_add_dev(dev, UBLK_CMD_ADD_DEV);
296
297 return ret;
298 }
299
ublksrv_ctrl_del_dev_async(struct ublksrv_ctrl_dev * dev)300 int ublksrv_ctrl_del_dev_async(struct ublksrv_ctrl_dev *dev)
301 {
302 struct ublksrv_ctrl_cmd_data data = {
303 .cmd_op = UBLK_U_CMD_DEL_DEV_ASYNC,
304 .flags = CTRL_CMD_NO_TRANS,
305 };
306
307 ublk_un_privileged_prep_data(dev, data);
308
309 return __ublksrv_ctrl_cmd(dev, &data);
310 }
311
ublksrv_ctrl_del_dev(struct ublksrv_ctrl_dev * dev)312 int ublksrv_ctrl_del_dev(struct ublksrv_ctrl_dev *dev)
313 {
314 struct ublksrv_ctrl_cmd_data data = {
315 .cmd_op = UBLK_CMD_DEL_DEV,
316 .flags = 0,
317 };
318
319 ublk_un_privileged_prep_data(dev, data);
320
321 return __ublksrv_ctrl_cmd(dev, &data);
322 }
323
__ublksrv_ctrl_get_info_no_trans(struct ublksrv_ctrl_dev * dev,unsigned cmd_op)324 static int __ublksrv_ctrl_get_info_no_trans(struct ublksrv_ctrl_dev *dev,
325 unsigned cmd_op)
326 {
327 char buf[UBLKC_PATH_MAX + sizeof(dev->dev_info)];
328 struct ublksrv_ctrl_cmd_data data = {
329 .cmd_op = cmd_op,
330 .flags = CTRL_CMD_HAS_BUF | CTRL_CMD_NO_TRANS,
331 .addr = (__u64)&dev->dev_info,
332 .len = sizeof(struct ublksrv_ctrl_dev_info),
333 };
334 bool has_dev_path = false;
335 int ret;
336
337 if (ublk_is_unprivileged(dev) && _IOC_NR(data.cmd_op) == UBLK_CMD_GET_DEV_INFO)
338 return -EINVAL;
339
340 if (_IOC_NR(data.cmd_op) == UBLK_CMD_GET_DEV_INFO2) {
341 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV,
342 dev->dev_info.dev_id);
343 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA;
344 data.len = sizeof(buf);
345 data.dev_path_len = UBLKC_PATH_MAX;
346 data.addr = (__u64)buf;
347 has_dev_path = true;
348 }
349
350 ret = __ublksrv_ctrl_cmd(dev, &data);
351 if (ret >= 0 && has_dev_path)
352 memcpy(&dev->dev_info, &buf[UBLKC_PATH_MAX],
353 sizeof(dev->dev_info));
354 return ret;
355 }
356
__ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev * dev,unsigned cmd_op)357 static int __ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev *dev,
358 unsigned cmd_op)
359 {
360 unsigned new_code = legacy_op_to_ioctl(cmd_op);
361 int ret = __ublksrv_ctrl_get_info_no_trans(dev, new_code);
362
363 /*
364 * Try ioctl cmd encoding first, then fallback to legacy command
365 * opcode if ioctl encoding fails
366 */
367 if (ret < 0)
368 ret = __ublksrv_ctrl_get_info_no_trans(dev, cmd_op);
369
370 return ret;
371 }
372
373 /*
374 * Deal with userspace/kernel compatibility
375 *
376 * 1) if kernel is capable of handling UBLK_F_UNPRIVILEGED_DEV,
377 * - ublksrv supports UBLK_F_UNPRIVILEGED_DEV
378 * ublksrv should send UBLK_CMD_GET_DEV_INFO2, given anytime unprivileged
379 * application needs to query devices it owns, when the application has
380 * no idea if UBLK_F_UNPRIVILEGED_DEV is set given the capability info
381 * is stateless, and application always get it via control command
382 *
383 * - ublksrv doesn't support UBLK_F_UNPRIVILEGED_DEV
384 * UBLK_CMD_GET_DEV_INFO is always sent to kernel, and the feature of
385 * UBLK_F_UNPRIVILEGED_DEV isn't available for user
386 *
387 * 2) if kernel isn't capable of handling UBLK_F_UNPRIVILEGED_DEV
388 * - ublksrv supports UBLK_F_UNPRIVILEGED_DEV
389 * UBLK_CMD_GET_DEV_INFO2 is tried first, and will be failed, then
390 * UBLK_CMD_GET_DEV_INFO is retried given UBLK_F_UNPRIVILEGED_DEV
391 * can't be set
392 *
393 * - ublksrv doesn't support UBLK_F_UNPRIVILEGED_DEV
394 * UBLK_CMD_GET_DEV_INFO is always sent to kernel, and the feature of
395 * UBLK_F_UNPRIVILEGED_DEV isn't available for user
396 *
397 */
ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev * dev)398 int ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev *dev)
399 {
400 int ret;
401
402 unsigned cmd_op =
403 #ifdef UBLK_CMD_GET_DEV_INFO2
404 UBLK_CMD_GET_DEV_INFO2;
405 #else
406 UBLK_CMD_GET_DEV_INFO;
407 #endif
408 ret = __ublksrv_ctrl_get_info(dev, cmd_op);
409
410 if (cmd_op == UBLK_CMD_GET_DEV_INFO)
411 return ret;
412
413 if (ret < 0) {
414 /* unprivileged does support GET_DEV_INFO2 */
415 if (ublk_is_unprivileged(dev))
416 return ret;
417 /*
418 * fallback to GET_DEV_INFO since driver may not support
419 * GET_DEV_INFO2
420 */
421 ret = __ublksrv_ctrl_get_info(dev, UBLK_CMD_GET_DEV_INFO);
422 }
423
424 return ret;
425 }
426
ublksrv_ctrl_stop_dev(struct ublksrv_ctrl_dev * dev)427 int ublksrv_ctrl_stop_dev(struct ublksrv_ctrl_dev *dev)
428 {
429 struct ublksrv_ctrl_cmd_data data = {
430 .cmd_op = UBLK_CMD_STOP_DEV,
431 };
432 int ret;
433
434 ublk_un_privileged_prep_data(dev, data);
435
436 ret = __ublksrv_ctrl_cmd(dev, &data);
437 return ret;
438 }
439
ublksrv_dev_state_desc(struct ublksrv_ctrl_dev * dev)440 static const char *ublksrv_dev_state_desc(struct ublksrv_ctrl_dev *dev)
441 {
442 switch (dev->dev_info.state) {
443 case UBLK_S_DEV_DEAD:
444 return "DEAD";
445 case UBLK_S_DEV_LIVE:
446 return "LIVE";
447 case UBLK_S_DEV_QUIESCED:
448 return "QUIESCED";
449 case UBLK_S_DEV_FAIL_IO:
450 return "FAIL_IO";
451 default:
452 return "UNKNOWN";
453 };
454 }
455
ublksrv_ctrl_dump(struct ublksrv_ctrl_dev * dev,const char * jbuf)456 void ublksrv_ctrl_dump(struct ublksrv_ctrl_dev *dev, const char *jbuf)
457 {
458 struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
459 int i, ret;
460 struct ublk_params p;
461
462 ret = ublksrv_ctrl_get_params(dev, &p);
463 if (ret < 0) {
464 fprintf(stderr, "failed to get params %m\n");
465 return;
466 }
467
468 printf("dev id %d: nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n",
469 info->dev_id,
470 info->nr_hw_queues, info->queue_depth,
471 1 << p.basic.logical_bs_shift, p.basic.dev_sectors);
472 printf("\tmax rq size %d daemon pid %d flags 0x%llx state %s\n",
473 info->max_io_buf_bytes,
474 info->ublksrv_pid, info->flags,
475 ublksrv_dev_state_desc(dev));
476 printf("\tublkc: %u:%d ublkb: %u:%u owner: %u:%u\n",
477 p.devt.char_major, p.devt.char_minor,
478 p.devt.disk_major, p.devt.disk_minor,
479 info->owner_uid, info->owner_gid);
480
481 if (jbuf) {
482 char buf[512];
483
484 for(i = 0; i < info->nr_hw_queues; i++) {
485 unsigned tid;
486
487 ublksrv_json_read_queue_info(jbuf, i, &tid, buf, 512);
488 printf("\tqueue %u: tid %d affinity(%s)\n",
489 i, tid, buf);
490 }
491
492 ublksrv_json_read_target_info(jbuf, buf, 512);
493 printf("\ttarget %s\n", buf);
494 }
495 }
496
ublksrv_ctrl_set_params(struct ublksrv_ctrl_dev * dev,struct ublk_params * params)497 int ublksrv_ctrl_set_params(struct ublksrv_ctrl_dev *dev,
498 struct ublk_params *params)
499 {
500 struct ublksrv_ctrl_cmd_data data = {
501 .cmd_op = UBLK_CMD_SET_PARAMS,
502 .flags = CTRL_CMD_HAS_BUF,
503 .addr = (__u64)params,
504 .len = sizeof(*params),
505 };
506 char buf[UBLKC_PATH_MAX + sizeof(*params)];
507
508 params->len = sizeof(*params);
509
510 if (ublk_is_unprivileged(dev)) {
511 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV,
512 dev->dev_info.dev_id);
513 memcpy(&buf[UBLKC_PATH_MAX], params, sizeof(*params));
514 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA;
515 data.len = sizeof(buf);
516 data.dev_path_len = UBLKC_PATH_MAX;
517 data.addr = (__u64)buf;
518 }
519
520 return __ublksrv_ctrl_cmd(dev, &data);
521 }
522
ublksrv_ctrl_get_params(struct ublksrv_ctrl_dev * dev,struct ublk_params * params)523 int ublksrv_ctrl_get_params(struct ublksrv_ctrl_dev *dev,
524 struct ublk_params *params)
525 {
526 struct ublksrv_ctrl_cmd_data data = {
527 .cmd_op = UBLK_CMD_GET_PARAMS,
528 .flags = CTRL_CMD_HAS_BUF,
529 .addr = (__u64)params,
530 .len = sizeof(*params),
531 };
532 char buf[UBLKC_PATH_MAX + sizeof(*params)];
533 int ret;
534
535 params->len = sizeof(*params);
536
537 if (ublk_is_unprivileged(dev)) {
538 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV,
539 dev->dev_info.dev_id);
540 memcpy(&buf[UBLKC_PATH_MAX], params, sizeof(*params));
541 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA;
542 data.len = sizeof(buf);
543 data.dev_path_len = UBLKC_PATH_MAX;
544 data.addr = (__u64)buf;
545 }
546
547 ret = __ublksrv_ctrl_cmd(dev, &data);
548 if (ret >= 0 && ublk_is_unprivileged(dev))
549 memcpy(params, &buf[UBLKC_PATH_MAX], sizeof(*params));
550
551 return 0;
552 }
553
ublksrv_ctrl_start_recovery(struct ublksrv_ctrl_dev * dev)554 int ublksrv_ctrl_start_recovery(struct ublksrv_ctrl_dev *dev)
555 {
556 struct ublksrv_ctrl_cmd_data data = {
557 .cmd_op = UBLK_CMD_START_USER_RECOVERY,
558 .flags = 0,
559 };
560 int ret;
561
562 ublk_un_privileged_prep_data(dev, data);
563
564 ret = __ublksrv_ctrl_cmd(dev, &data);
565 return ret;
566 }
567
ublksrv_ctrl_end_recovery(struct ublksrv_ctrl_dev * dev,int daemon_pid)568 int ublksrv_ctrl_end_recovery(struct ublksrv_ctrl_dev *dev, int daemon_pid)
569 {
570 struct ublksrv_ctrl_cmd_data data = {
571 .cmd_op = UBLK_CMD_END_USER_RECOVERY,
572 .flags = CTRL_CMD_HAS_DATA,
573 };
574 int ret;
575
576 ublk_un_privileged_prep_data(dev, data);
577
578 dev->dev_info.ublksrv_pid = data.data[0] = daemon_pid;
579
580 ret = __ublksrv_ctrl_cmd(dev, &data);
581 return ret;
582 }
583
ublksrv_ctrl_get_features(struct ublksrv_ctrl_dev * dev,__u64 * features)584 int ublksrv_ctrl_get_features(struct ublksrv_ctrl_dev *dev,
585 __u64 *features)
586 {
587 struct ublksrv_ctrl_cmd_data data = {
588 .cmd_op = UBLK_U_CMD_GET_FEATURES,
589 .flags = CTRL_CMD_HAS_BUF,
590 .addr = (__u64)features,
591 .len = sizeof(*features),
592 };
593
594 return __ublksrv_ctrl_cmd(dev, &data);
595 }
596
ublksrv_ctrl_get_dev_info(const struct ublksrv_ctrl_dev * dev)597 const struct ublksrv_ctrl_dev_info *ublksrv_ctrl_get_dev_info(
598 const struct ublksrv_ctrl_dev *dev)
599 {
600 return &dev->dev_info;
601 }
602
ublksrv_ctrl_get_run_dir(const struct ublksrv_ctrl_dev * dev)603 const char *ublksrv_ctrl_get_run_dir(const struct ublksrv_ctrl_dev *dev)
604 {
605 return dev->run_dir;
606 }
607
ublksrv_ctrl_prep_recovery(struct ublksrv_ctrl_dev * dev,const char * tgt_type,const struct ublksrv_tgt_type * tgt_ops,const char * recovery_jbuf)608 void ublksrv_ctrl_prep_recovery(struct ublksrv_ctrl_dev *dev,
609 const char *tgt_type, const struct ublksrv_tgt_type *tgt_ops,
610 const char *recovery_jbuf)
611 {
612 dev->tgt_type = tgt_type;
613 dev->tgt_ops = tgt_ops;
614 dev->tgt_argc = -1;
615 dev->recovery_jbuf = recovery_jbuf;
616 }
617
ublksrv_ctrl_get_recovery_jbuf(const struct ublksrv_ctrl_dev * dev)618 const char *ublksrv_ctrl_get_recovery_jbuf(const struct ublksrv_ctrl_dev *dev)
619 {
620 return dev->recovery_jbuf;
621 }
622