1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio vhost-user driver
4 *
5 * Copyright(c) 2019 Intel Corporation
6 *
7 * This driver allows virtio devices to be used over a vhost-user socket.
8 *
9 * Guest devices can be instantiated by kernel module or command line
10 * parameters. One device will be created for each parameter. Syntax:
11 *
12 * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
13 * where:
14 * <socket> := vhost-user socket path to connect
15 * <virtio_id> := virtio device id (as in virtio_ids.h)
16 * <platform_id> := (optional) platform device id
17 *
18 * example:
19 * virtio_uml.device=/var/uml.socket:1
20 *
21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
22 */
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/virtio.h>
28 #include <linux/virtio_config.h>
29 #include <linux/virtio_ring.h>
30 #include <linux/time-internal.h>
31 #include <linux/virtio-uml.h>
32 #include <shared/as-layout.h>
33 #include <irq_kern.h>
34 #include <init.h>
35 #include <os.h>
36 #include "vhost_user.h"
37
38 #define MAX_SUPPORTED_QUEUE_SIZE 256
39
40 #define to_virtio_uml_device(_vdev) \
41 container_of(_vdev, struct virtio_uml_device, vdev)
42
43 struct virtio_uml_platform_data {
44 u32 virtio_device_id;
45 const char *socket_path;
46 struct work_struct conn_broken_wk;
47 struct platform_device *pdev;
48 };
49
50 struct virtio_uml_device {
51 struct virtio_device vdev;
52 struct platform_device *pdev;
53 struct virtio_uml_platform_data *pdata;
54
55 spinlock_t sock_lock;
56 int sock, req_fd, irq;
57 u64 features;
58 u64 protocol_features;
59 u8 status;
60 u8 registered:1;
61 u8 suspended:1;
62 u8 no_vq_suspend:1;
63
64 u8 config_changed_irq:1;
65 uint64_t vq_irq_vq_map;
66 int recv_rc;
67 };
68
69 struct virtio_uml_vq_info {
70 int kick_fd, call_fd;
71 char name[32];
72 bool suspended;
73 };
74
75 extern unsigned long long physmem_size, highmem;
76
77 #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
78
79 /* Vhost-user protocol */
80
full_sendmsg_fds(int fd,const void * buf,unsigned int len,const int * fds,unsigned int fds_num)81 static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
82 const int *fds, unsigned int fds_num)
83 {
84 int rc;
85
86 do {
87 rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
88 if (rc > 0) {
89 buf += rc;
90 len -= rc;
91 fds = NULL;
92 fds_num = 0;
93 }
94 } while (len && (rc >= 0 || rc == -EINTR));
95
96 if (rc < 0)
97 return rc;
98 return 0;
99 }
100
full_read(int fd,void * buf,int len,bool abortable)101 static int full_read(int fd, void *buf, int len, bool abortable)
102 {
103 int rc;
104
105 if (!len)
106 return 0;
107
108 do {
109 rc = os_read_file(fd, buf, len);
110 if (rc > 0) {
111 buf += rc;
112 len -= rc;
113 }
114 } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
115
116 if (rc < 0)
117 return rc;
118 if (rc == 0)
119 return -ECONNRESET;
120 return 0;
121 }
122
vhost_user_recv_header(int fd,struct vhost_user_msg * msg)123 static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
124 {
125 return full_read(fd, msg, sizeof(msg->header), true);
126 }
127
vhost_user_recv(struct virtio_uml_device * vu_dev,int fd,struct vhost_user_msg * msg,size_t max_payload_size,bool wait)128 static int vhost_user_recv(struct virtio_uml_device *vu_dev,
129 int fd, struct vhost_user_msg *msg,
130 size_t max_payload_size, bool wait)
131 {
132 size_t size;
133 int rc;
134
135 /*
136 * In virtio time-travel mode, we're handling all the vhost-user
137 * FDs by polling them whenever appropriate. However, we may get
138 * into a situation where we're sending out an interrupt message
139 * to a device (e.g. a net device) and need to handle a simulation
140 * time message while doing so, e.g. one that tells us to update
141 * our idea of how long we can run without scheduling.
142 *
143 * Thus, we need to not just read() from the given fd, but need
144 * to also handle messages for the simulation time - this function
145 * does that for us while waiting for the given fd to be readable.
146 */
147 if (wait)
148 time_travel_wait_readable(fd);
149
150 rc = vhost_user_recv_header(fd, msg);
151
152 if (rc)
153 return rc;
154 size = msg->header.size;
155 if (size > max_payload_size)
156 return -EPROTO;
157 return full_read(fd, &msg->payload, size, false);
158 }
159
vhost_user_check_reset(struct virtio_uml_device * vu_dev,int rc)160 static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
161 int rc)
162 {
163 struct virtio_uml_platform_data *pdata = vu_dev->pdata;
164
165 if (rc != -ECONNRESET)
166 return;
167
168 if (!vu_dev->registered)
169 return;
170
171 vu_dev->registered = 0;
172
173 schedule_work(&pdata->conn_broken_wk);
174 }
175
vhost_user_recv_resp(struct virtio_uml_device * vu_dev,struct vhost_user_msg * msg,size_t max_payload_size)176 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
177 struct vhost_user_msg *msg,
178 size_t max_payload_size)
179 {
180 int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
181 max_payload_size, true);
182
183 if (rc) {
184 vhost_user_check_reset(vu_dev, rc);
185 return rc;
186 }
187
188 if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
189 return -EPROTO;
190
191 return 0;
192 }
193
vhost_user_recv_u64(struct virtio_uml_device * vu_dev,u64 * value)194 static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
195 u64 *value)
196 {
197 struct vhost_user_msg msg;
198 int rc = vhost_user_recv_resp(vu_dev, &msg,
199 sizeof(msg.payload.integer));
200
201 if (rc)
202 return rc;
203 if (msg.header.size != sizeof(msg.payload.integer))
204 return -EPROTO;
205 *value = msg.payload.integer;
206 return 0;
207 }
208
vhost_user_recv_req(struct virtio_uml_device * vu_dev,struct vhost_user_msg * msg,size_t max_payload_size)209 static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
210 struct vhost_user_msg *msg,
211 size_t max_payload_size)
212 {
213 int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
214 max_payload_size, false);
215
216 if (rc)
217 return rc;
218
219 if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
220 VHOST_USER_VERSION)
221 return -EPROTO;
222
223 return 0;
224 }
225
vhost_user_send(struct virtio_uml_device * vu_dev,bool need_response,struct vhost_user_msg * msg,int * fds,size_t num_fds)226 static int vhost_user_send(struct virtio_uml_device *vu_dev,
227 bool need_response, struct vhost_user_msg *msg,
228 int *fds, size_t num_fds)
229 {
230 size_t size = sizeof(msg->header) + msg->header.size;
231 unsigned long flags;
232 bool request_ack;
233 int rc;
234
235 msg->header.flags |= VHOST_USER_VERSION;
236
237 /*
238 * The need_response flag indicates that we already need a response,
239 * e.g. to read the features. In these cases, don't request an ACK as
240 * it is meaningless. Also request an ACK only if supported.
241 */
242 request_ack = !need_response;
243 if (!(vu_dev->protocol_features &
244 BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
245 request_ack = false;
246
247 if (request_ack)
248 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
249
250 spin_lock_irqsave(&vu_dev->sock_lock, flags);
251 rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
252 if (rc < 0)
253 goto out;
254
255 if (request_ack) {
256 uint64_t status;
257
258 rc = vhost_user_recv_u64(vu_dev, &status);
259 if (rc)
260 goto out;
261
262 if (status) {
263 vu_err(vu_dev, "slave reports error: %llu\n", status);
264 rc = -EIO;
265 goto out;
266 }
267 }
268
269 out:
270 spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
271 return rc;
272 }
273
vhost_user_send_no_payload(struct virtio_uml_device * vu_dev,bool need_response,u32 request)274 static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
275 bool need_response, u32 request)
276 {
277 struct vhost_user_msg msg = {
278 .header.request = request,
279 };
280
281 return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
282 }
283
vhost_user_send_no_payload_fd(struct virtio_uml_device * vu_dev,u32 request,int fd)284 static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
285 u32 request, int fd)
286 {
287 struct vhost_user_msg msg = {
288 .header.request = request,
289 };
290
291 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
292 }
293
vhost_user_send_u64(struct virtio_uml_device * vu_dev,u32 request,u64 value)294 static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
295 u32 request, u64 value)
296 {
297 struct vhost_user_msg msg = {
298 .header.request = request,
299 .header.size = sizeof(msg.payload.integer),
300 .payload.integer = value,
301 };
302
303 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
304 }
305
vhost_user_set_owner(struct virtio_uml_device * vu_dev)306 static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
307 {
308 return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
309 }
310
vhost_user_get_features(struct virtio_uml_device * vu_dev,u64 * features)311 static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
312 u64 *features)
313 {
314 int rc = vhost_user_send_no_payload(vu_dev, true,
315 VHOST_USER_GET_FEATURES);
316
317 if (rc)
318 return rc;
319 return vhost_user_recv_u64(vu_dev, features);
320 }
321
vhost_user_set_features(struct virtio_uml_device * vu_dev,u64 features)322 static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
323 u64 features)
324 {
325 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
326 }
327
vhost_user_get_protocol_features(struct virtio_uml_device * vu_dev,u64 * protocol_features)328 static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
329 u64 *protocol_features)
330 {
331 int rc = vhost_user_send_no_payload(vu_dev, true,
332 VHOST_USER_GET_PROTOCOL_FEATURES);
333
334 if (rc)
335 return rc;
336 return vhost_user_recv_u64(vu_dev, protocol_features);
337 }
338
vhost_user_set_protocol_features(struct virtio_uml_device * vu_dev,u64 protocol_features)339 static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
340 u64 protocol_features)
341 {
342 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
343 protocol_features);
344 }
345
vhost_user_reply(struct virtio_uml_device * vu_dev,struct vhost_user_msg * msg,int response)346 static void vhost_user_reply(struct virtio_uml_device *vu_dev,
347 struct vhost_user_msg *msg, int response)
348 {
349 struct vhost_user_msg reply = {
350 .payload.integer = response,
351 };
352 size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
353 int rc;
354
355 reply.header = msg->header;
356 reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
357 reply.header.flags |= VHOST_USER_FLAG_REPLY;
358 reply.header.size = sizeof(reply.payload.integer);
359
360 rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
361
362 if (rc)
363 vu_err(vu_dev,
364 "sending reply to slave request failed: %d (size %zu)\n",
365 rc, size);
366 }
367
vu_req_read_message(struct virtio_uml_device * vu_dev,struct time_travel_event * ev)368 static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
369 struct time_travel_event *ev)
370 {
371 struct virtqueue *vq;
372 int response = 1;
373 struct {
374 struct vhost_user_msg msg;
375 u8 extra_payload[512];
376 } msg;
377 int rc;
378
379 rc = vhost_user_recv_req(vu_dev, &msg.msg,
380 sizeof(msg.msg.payload) +
381 sizeof(msg.extra_payload));
382
383 vu_dev->recv_rc = rc;
384 if (rc)
385 return IRQ_NONE;
386
387 switch (msg.msg.header.request) {
388 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
389 vu_dev->config_changed_irq = true;
390 response = 0;
391 break;
392 case VHOST_USER_SLAVE_VRING_CALL:
393 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
394 if (vq->index == msg.msg.payload.vring_state.index) {
395 response = 0;
396 vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
397 break;
398 }
399 }
400 break;
401 case VHOST_USER_SLAVE_IOTLB_MSG:
402 /* not supported - VIRTIO_F_ACCESS_PLATFORM */
403 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
404 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
405 default:
406 vu_err(vu_dev, "unexpected slave request %d\n",
407 msg.msg.header.request);
408 }
409
410 if (ev && !vu_dev->suspended)
411 time_travel_add_irq_event(ev);
412
413 if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
414 vhost_user_reply(vu_dev, &msg.msg, response);
415
416 return IRQ_HANDLED;
417 }
418
vu_req_interrupt(int irq,void * data)419 static irqreturn_t vu_req_interrupt(int irq, void *data)
420 {
421 struct virtio_uml_device *vu_dev = data;
422 irqreturn_t ret = IRQ_HANDLED;
423
424 if (!um_irq_timetravel_handler_used())
425 ret = vu_req_read_message(vu_dev, NULL);
426
427 if (vu_dev->recv_rc) {
428 vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
429 } else if (vu_dev->vq_irq_vq_map) {
430 struct virtqueue *vq;
431
432 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
433 if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
434 vring_interrupt(0 /* ignored */, vq);
435 }
436 vu_dev->vq_irq_vq_map = 0;
437 } else if (vu_dev->config_changed_irq) {
438 virtio_config_changed(&vu_dev->vdev);
439 vu_dev->config_changed_irq = false;
440 }
441
442 return ret;
443 }
444
vu_req_interrupt_comm_handler(int irq,int fd,void * data,struct time_travel_event * ev)445 static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
446 struct time_travel_event *ev)
447 {
448 vu_req_read_message(data, ev);
449 }
450
vhost_user_init_slave_req(struct virtio_uml_device * vu_dev)451 static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
452 {
453 int rc, req_fds[2];
454
455 /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
456 rc = os_pipe(req_fds, true, true);
457 if (rc < 0)
458 return rc;
459 vu_dev->req_fd = req_fds[0];
460
461 rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
462 vu_req_interrupt, IRQF_SHARED,
463 vu_dev->pdev->name, vu_dev,
464 vu_req_interrupt_comm_handler);
465 if (rc < 0)
466 goto err_close;
467
468 vu_dev->irq = rc;
469
470 rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
471 req_fds[1]);
472 if (rc)
473 goto err_free_irq;
474
475 goto out;
476
477 err_free_irq:
478 um_free_irq(vu_dev->irq, vu_dev);
479 err_close:
480 os_close_file(req_fds[0]);
481 out:
482 /* Close unused write end of request fds */
483 os_close_file(req_fds[1]);
484 return rc;
485 }
486
vhost_user_init(struct virtio_uml_device * vu_dev)487 static int vhost_user_init(struct virtio_uml_device *vu_dev)
488 {
489 int rc = vhost_user_set_owner(vu_dev);
490
491 if (rc)
492 return rc;
493 rc = vhost_user_get_features(vu_dev, &vu_dev->features);
494 if (rc)
495 return rc;
496
497 if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
498 rc = vhost_user_get_protocol_features(vu_dev,
499 &vu_dev->protocol_features);
500 if (rc)
501 return rc;
502 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
503 rc = vhost_user_set_protocol_features(vu_dev,
504 vu_dev->protocol_features);
505 if (rc)
506 return rc;
507 }
508
509 if (vu_dev->protocol_features &
510 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
511 rc = vhost_user_init_slave_req(vu_dev);
512 if (rc)
513 return rc;
514 }
515
516 return 0;
517 }
518
vhost_user_get_config(struct virtio_uml_device * vu_dev,u32 offset,void * buf,u32 len)519 static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
520 u32 offset, void *buf, u32 len)
521 {
522 u32 cfg_size = offset + len;
523 struct vhost_user_msg *msg;
524 size_t payload_size = sizeof(msg->payload.config) + cfg_size;
525 size_t msg_size = sizeof(msg->header) + payload_size;
526 int rc;
527
528 if (!(vu_dev->protocol_features &
529 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
530 return;
531
532 msg = kzalloc(msg_size, GFP_KERNEL);
533 if (!msg)
534 return;
535 msg->header.request = VHOST_USER_GET_CONFIG;
536 msg->header.size = payload_size;
537 msg->payload.config.offset = 0;
538 msg->payload.config.size = cfg_size;
539
540 rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
541 if (rc) {
542 vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
543 rc);
544 goto free;
545 }
546
547 rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
548 if (rc) {
549 vu_err(vu_dev,
550 "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
551 rc);
552 goto free;
553 }
554
555 if (msg->header.size != payload_size ||
556 msg->payload.config.size != cfg_size) {
557 rc = -EPROTO;
558 vu_err(vu_dev,
559 "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
560 msg->header.size, payload_size,
561 msg->payload.config.size, cfg_size);
562 goto free;
563 }
564 memcpy(buf, msg->payload.config.payload + offset, len);
565
566 free:
567 kfree(msg);
568 }
569
vhost_user_set_config(struct virtio_uml_device * vu_dev,u32 offset,const void * buf,u32 len)570 static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
571 u32 offset, const void *buf, u32 len)
572 {
573 struct vhost_user_msg *msg;
574 size_t payload_size = sizeof(msg->payload.config) + len;
575 size_t msg_size = sizeof(msg->header) + payload_size;
576 int rc;
577
578 if (!(vu_dev->protocol_features &
579 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
580 return;
581
582 msg = kzalloc(msg_size, GFP_KERNEL);
583 if (!msg)
584 return;
585 msg->header.request = VHOST_USER_SET_CONFIG;
586 msg->header.size = payload_size;
587 msg->payload.config.offset = offset;
588 msg->payload.config.size = len;
589 memcpy(msg->payload.config.payload, buf, len);
590
591 rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
592 if (rc)
593 vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
594 rc);
595
596 kfree(msg);
597 }
598
vhost_user_init_mem_region(u64 addr,u64 size,int * fd_out,struct vhost_user_mem_region * region_out)599 static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
600 struct vhost_user_mem_region *region_out)
601 {
602 unsigned long long mem_offset;
603 int rc = phys_mapping(addr, &mem_offset);
604
605 if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
606 return -EFAULT;
607 *fd_out = rc;
608 region_out->guest_addr = addr;
609 region_out->user_addr = addr;
610 region_out->size = size;
611 region_out->mmap_offset = mem_offset;
612
613 /* Ensure mapping is valid for the entire region */
614 rc = phys_mapping(addr + size - 1, &mem_offset);
615 if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
616 addr + size - 1, rc, *fd_out))
617 return -EFAULT;
618 return 0;
619 }
620
vhost_user_set_mem_table(struct virtio_uml_device * vu_dev)621 static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
622 {
623 struct vhost_user_msg msg = {
624 .header.request = VHOST_USER_SET_MEM_TABLE,
625 .header.size = sizeof(msg.payload.mem_regions),
626 .payload.mem_regions.num = 1,
627 };
628 unsigned long reserved = uml_reserved - uml_physmem;
629 int fds[2];
630 int rc;
631
632 /*
633 * This is a bit tricky, see also the comment with setup_physmem().
634 *
635 * Essentially, setup_physmem() uses a file to mmap() our physmem,
636 * but the code and data we *already* have is omitted. To us, this
637 * is no difference, since they both become part of our address
638 * space and memory consumption. To somebody looking in from the
639 * outside, however, it is different because the part of our memory
640 * consumption that's already part of the binary (code/data) is not
641 * mapped from the file, so it's not visible to another mmap from
642 * the file descriptor.
643 *
644 * Thus, don't advertise this space to the vhost-user slave. This
645 * means that the slave will likely abort or similar when we give
646 * it an address from the hidden range, since it's not marked as
647 * a valid address, but at least that way we detect the issue and
648 * don't just have the slave read an all-zeroes buffer from the
649 * shared memory file, or write something there that we can never
650 * see (depending on the direction of the virtqueue traffic.)
651 *
652 * Since we usually don't want to use .text for virtio buffers,
653 * this effectively means that you cannot use
654 * 1) global variables, which are in the .bss and not in the shm
655 * file-backed memory
656 * 2) the stack in some processes, depending on where they have
657 * their stack (or maybe only no interrupt stack?)
658 *
659 * The stack is already not typically valid for DMA, so this isn't
660 * much of a restriction, but global variables might be encountered.
661 *
662 * It might be possible to fix it by copying around the data that's
663 * between bss_start and where we map the file now, but it's not
664 * something that you typically encounter with virtio drivers, so
665 * it didn't seem worthwhile.
666 */
667 rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
668 &fds[0],
669 &msg.payload.mem_regions.regions[0]);
670
671 if (rc < 0)
672 return rc;
673 if (highmem) {
674 msg.payload.mem_regions.num++;
675 rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
676 &fds[1], &msg.payload.mem_regions.regions[1]);
677 if (rc < 0)
678 return rc;
679 }
680
681 return vhost_user_send(vu_dev, false, &msg, fds,
682 msg.payload.mem_regions.num);
683 }
684
vhost_user_set_vring_state(struct virtio_uml_device * vu_dev,u32 request,u32 index,u32 num)685 static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
686 u32 request, u32 index, u32 num)
687 {
688 struct vhost_user_msg msg = {
689 .header.request = request,
690 .header.size = sizeof(msg.payload.vring_state),
691 .payload.vring_state.index = index,
692 .payload.vring_state.num = num,
693 };
694
695 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
696 }
697
vhost_user_set_vring_num(struct virtio_uml_device * vu_dev,u32 index,u32 num)698 static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
699 u32 index, u32 num)
700 {
701 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
702 index, num);
703 }
704
vhost_user_set_vring_base(struct virtio_uml_device * vu_dev,u32 index,u32 offset)705 static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
706 u32 index, u32 offset)
707 {
708 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
709 index, offset);
710 }
711
vhost_user_set_vring_addr(struct virtio_uml_device * vu_dev,u32 index,u64 desc,u64 used,u64 avail,u64 log)712 static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
713 u32 index, u64 desc, u64 used, u64 avail,
714 u64 log)
715 {
716 struct vhost_user_msg msg = {
717 .header.request = VHOST_USER_SET_VRING_ADDR,
718 .header.size = sizeof(msg.payload.vring_addr),
719 .payload.vring_addr.index = index,
720 .payload.vring_addr.desc = desc,
721 .payload.vring_addr.used = used,
722 .payload.vring_addr.avail = avail,
723 .payload.vring_addr.log = log,
724 };
725
726 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
727 }
728
vhost_user_set_vring_fd(struct virtio_uml_device * vu_dev,u32 request,int index,int fd)729 static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
730 u32 request, int index, int fd)
731 {
732 struct vhost_user_msg msg = {
733 .header.request = request,
734 .header.size = sizeof(msg.payload.integer),
735 .payload.integer = index,
736 };
737
738 if (index & ~VHOST_USER_VRING_INDEX_MASK)
739 return -EINVAL;
740 if (fd < 0) {
741 msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
742 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
743 }
744 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
745 }
746
vhost_user_set_vring_call(struct virtio_uml_device * vu_dev,int index,int fd)747 static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
748 int index, int fd)
749 {
750 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
751 index, fd);
752 }
753
vhost_user_set_vring_kick(struct virtio_uml_device * vu_dev,int index,int fd)754 static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
755 int index, int fd)
756 {
757 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
758 index, fd);
759 }
760
vhost_user_set_vring_enable(struct virtio_uml_device * vu_dev,u32 index,bool enable)761 static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
762 u32 index, bool enable)
763 {
764 if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
765 return 0;
766
767 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
768 index, enable);
769 }
770
771
772 /* Virtio interface */
773
vu_notify(struct virtqueue * vq)774 static bool vu_notify(struct virtqueue *vq)
775 {
776 struct virtio_uml_vq_info *info = vq->priv;
777 const uint64_t n = 1;
778 int rc;
779
780 if (info->suspended)
781 return true;
782
783 time_travel_propagate_time();
784
785 if (info->kick_fd < 0) {
786 struct virtio_uml_device *vu_dev;
787
788 vu_dev = to_virtio_uml_device(vq->vdev);
789
790 return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
791 vq->index, 0) == 0;
792 }
793
794 do {
795 rc = os_write_file(info->kick_fd, &n, sizeof(n));
796 } while (rc == -EINTR);
797 return !WARN(rc != sizeof(n), "write returned %d\n", rc);
798 }
799
vu_interrupt(int irq,void * opaque)800 static irqreturn_t vu_interrupt(int irq, void *opaque)
801 {
802 struct virtqueue *vq = opaque;
803 struct virtio_uml_vq_info *info = vq->priv;
804 uint64_t n;
805 int rc;
806 irqreturn_t ret = IRQ_NONE;
807
808 do {
809 rc = os_read_file(info->call_fd, &n, sizeof(n));
810 if (rc == sizeof(n))
811 ret |= vring_interrupt(irq, vq);
812 } while (rc == sizeof(n) || rc == -EINTR);
813 WARN(rc != -EAGAIN, "read returned %d\n", rc);
814 return ret;
815 }
816
817
vu_get(struct virtio_device * vdev,unsigned offset,void * buf,unsigned len)818 static void vu_get(struct virtio_device *vdev, unsigned offset,
819 void *buf, unsigned len)
820 {
821 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
822
823 vhost_user_get_config(vu_dev, offset, buf, len);
824 }
825
vu_set(struct virtio_device * vdev,unsigned offset,const void * buf,unsigned len)826 static void vu_set(struct virtio_device *vdev, unsigned offset,
827 const void *buf, unsigned len)
828 {
829 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
830
831 vhost_user_set_config(vu_dev, offset, buf, len);
832 }
833
vu_get_status(struct virtio_device * vdev)834 static u8 vu_get_status(struct virtio_device *vdev)
835 {
836 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
837
838 return vu_dev->status;
839 }
840
vu_set_status(struct virtio_device * vdev,u8 status)841 static void vu_set_status(struct virtio_device *vdev, u8 status)
842 {
843 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
844
845 vu_dev->status = status;
846 }
847
vu_reset(struct virtio_device * vdev)848 static void vu_reset(struct virtio_device *vdev)
849 {
850 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
851
852 vu_dev->status = 0;
853 }
854
vu_del_vq(struct virtqueue * vq)855 static void vu_del_vq(struct virtqueue *vq)
856 {
857 struct virtio_uml_vq_info *info = vq->priv;
858
859 if (info->call_fd >= 0) {
860 struct virtio_uml_device *vu_dev;
861
862 vu_dev = to_virtio_uml_device(vq->vdev);
863
864 um_free_irq(vu_dev->irq, vq);
865 os_close_file(info->call_fd);
866 }
867
868 if (info->kick_fd >= 0)
869 os_close_file(info->kick_fd);
870
871 vring_del_virtqueue(vq);
872 kfree(info);
873 }
874
vu_del_vqs(struct virtio_device * vdev)875 static void vu_del_vqs(struct virtio_device *vdev)
876 {
877 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
878 struct virtqueue *vq, *n;
879 u64 features;
880
881 /* Note: reverse order as a workaround to a decoding bug in snabb */
882 list_for_each_entry_reverse(vq, &vdev->vqs, list)
883 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
884
885 /* Ensure previous messages have been processed */
886 WARN_ON(vhost_user_get_features(vu_dev, &features));
887
888 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
889 vu_del_vq(vq);
890 }
891
vu_setup_vq_call_fd(struct virtio_uml_device * vu_dev,struct virtqueue * vq)892 static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
893 struct virtqueue *vq)
894 {
895 struct virtio_uml_vq_info *info = vq->priv;
896 int call_fds[2];
897 int rc;
898
899 /* no call FD needed/desired in this case */
900 if (vu_dev->protocol_features &
901 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
902 vu_dev->protocol_features &
903 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
904 info->call_fd = -1;
905 return 0;
906 }
907
908 /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
909 rc = os_pipe(call_fds, true, true);
910 if (rc < 0)
911 return rc;
912
913 info->call_fd = call_fds[0];
914 rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
915 vu_interrupt, IRQF_SHARED, info->name, vq);
916 if (rc < 0)
917 goto close_both;
918
919 rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
920 if (rc)
921 goto release_irq;
922
923 goto out;
924
925 release_irq:
926 um_free_irq(vu_dev->irq, vq);
927 close_both:
928 os_close_file(call_fds[0]);
929 out:
930 /* Close (unused) write end of call fds */
931 os_close_file(call_fds[1]);
932
933 return rc;
934 }
935
vu_setup_vq(struct virtio_device * vdev,unsigned index,vq_callback_t * callback,const char * name,bool ctx)936 static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
937 unsigned index, vq_callback_t *callback,
938 const char *name, bool ctx)
939 {
940 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
941 struct platform_device *pdev = vu_dev->pdev;
942 struct virtio_uml_vq_info *info;
943 struct virtqueue *vq;
944 int num = MAX_SUPPORTED_QUEUE_SIZE;
945 int rc;
946
947 info = kzalloc(sizeof(*info), GFP_KERNEL);
948 if (!info) {
949 rc = -ENOMEM;
950 goto error_kzalloc;
951 }
952 snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
953 pdev->id, name);
954
955 vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
956 ctx, vu_notify, callback, info->name);
957 if (!vq) {
958 rc = -ENOMEM;
959 goto error_create;
960 }
961 vq->priv = info;
962 num = virtqueue_get_vring_size(vq);
963
964 if (vu_dev->protocol_features &
965 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
966 info->kick_fd = -1;
967 } else {
968 rc = os_eventfd(0, 0);
969 if (rc < 0)
970 goto error_kick;
971 info->kick_fd = rc;
972 }
973
974 rc = vu_setup_vq_call_fd(vu_dev, vq);
975 if (rc)
976 goto error_call;
977
978 rc = vhost_user_set_vring_num(vu_dev, index, num);
979 if (rc)
980 goto error_setup;
981
982 rc = vhost_user_set_vring_base(vu_dev, index, 0);
983 if (rc)
984 goto error_setup;
985
986 rc = vhost_user_set_vring_addr(vu_dev, index,
987 virtqueue_get_desc_addr(vq),
988 virtqueue_get_used_addr(vq),
989 virtqueue_get_avail_addr(vq),
990 (u64) -1);
991 if (rc)
992 goto error_setup;
993
994 return vq;
995
996 error_setup:
997 if (info->call_fd >= 0) {
998 um_free_irq(vu_dev->irq, vq);
999 os_close_file(info->call_fd);
1000 }
1001 error_call:
1002 if (info->kick_fd >= 0)
1003 os_close_file(info->kick_fd);
1004 error_kick:
1005 vring_del_virtqueue(vq);
1006 error_create:
1007 kfree(info);
1008 error_kzalloc:
1009 return ERR_PTR(rc);
1010 }
1011
vu_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)1012 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
1013 struct virtqueue *vqs[], vq_callback_t *callbacks[],
1014 const char * const names[], const bool *ctx,
1015 struct irq_affinity *desc)
1016 {
1017 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1018 int i, queue_idx = 0, rc;
1019 struct virtqueue *vq;
1020
1021 /* not supported for now */
1022 if (WARN_ON(nvqs > 64))
1023 return -EINVAL;
1024
1025 rc = vhost_user_set_mem_table(vu_dev);
1026 if (rc)
1027 return rc;
1028
1029 for (i = 0; i < nvqs; ++i) {
1030 if (!names[i]) {
1031 vqs[i] = NULL;
1032 continue;
1033 }
1034
1035 vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
1036 ctx ? ctx[i] : false);
1037 if (IS_ERR(vqs[i])) {
1038 rc = PTR_ERR(vqs[i]);
1039 goto error_setup;
1040 }
1041 }
1042
1043 list_for_each_entry(vq, &vdev->vqs, list) {
1044 struct virtio_uml_vq_info *info = vq->priv;
1045
1046 if (info->kick_fd >= 0) {
1047 rc = vhost_user_set_vring_kick(vu_dev, vq->index,
1048 info->kick_fd);
1049 if (rc)
1050 goto error_setup;
1051 }
1052
1053 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
1054 if (rc)
1055 goto error_setup;
1056 }
1057
1058 return 0;
1059
1060 error_setup:
1061 vu_del_vqs(vdev);
1062 return rc;
1063 }
1064
vu_get_features(struct virtio_device * vdev)1065 static u64 vu_get_features(struct virtio_device *vdev)
1066 {
1067 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1068
1069 return vu_dev->features;
1070 }
1071
vu_finalize_features(struct virtio_device * vdev)1072 static int vu_finalize_features(struct virtio_device *vdev)
1073 {
1074 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1075 u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
1076
1077 vring_transport_features(vdev);
1078 vu_dev->features = vdev->features | supported;
1079
1080 return vhost_user_set_features(vu_dev, vu_dev->features);
1081 }
1082
vu_bus_name(struct virtio_device * vdev)1083 static const char *vu_bus_name(struct virtio_device *vdev)
1084 {
1085 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1086
1087 return vu_dev->pdev->name;
1088 }
1089
1090 static const struct virtio_config_ops virtio_uml_config_ops = {
1091 .get = vu_get,
1092 .set = vu_set,
1093 .get_status = vu_get_status,
1094 .set_status = vu_set_status,
1095 .reset = vu_reset,
1096 .find_vqs = vu_find_vqs,
1097 .del_vqs = vu_del_vqs,
1098 .get_features = vu_get_features,
1099 .finalize_features = vu_finalize_features,
1100 .bus_name = vu_bus_name,
1101 };
1102
virtio_uml_release_dev(struct device * d)1103 static void virtio_uml_release_dev(struct device *d)
1104 {
1105 struct virtio_device *vdev =
1106 container_of(d, struct virtio_device, dev);
1107 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1108
1109 time_travel_propagate_time();
1110
1111 /* might not have been opened due to not negotiating the feature */
1112 if (vu_dev->req_fd >= 0) {
1113 um_free_irq(vu_dev->irq, vu_dev);
1114 os_close_file(vu_dev->req_fd);
1115 }
1116
1117 os_close_file(vu_dev->sock);
1118 kfree(vu_dev);
1119 }
1120
virtio_uml_set_no_vq_suspend(struct virtio_device * vdev,bool no_vq_suspend)1121 void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
1122 bool no_vq_suspend)
1123 {
1124 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1125
1126 if (WARN_ON(vdev->config != &virtio_uml_config_ops))
1127 return;
1128
1129 vu_dev->no_vq_suspend = no_vq_suspend;
1130 dev_info(&vdev->dev, "%sabled VQ suspend\n",
1131 no_vq_suspend ? "dis" : "en");
1132 }
1133
vu_of_conn_broken(struct work_struct * wk)1134 static void vu_of_conn_broken(struct work_struct *wk)
1135 {
1136 struct virtio_uml_platform_data *pdata;
1137 struct virtio_uml_device *vu_dev;
1138
1139 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1140
1141 vu_dev = platform_get_drvdata(pdata->pdev);
1142
1143 virtio_break_device(&vu_dev->vdev);
1144
1145 /*
1146 * We can't remove the device from the devicetree so the only thing we
1147 * can do is warn.
1148 */
1149 WARN_ON(1);
1150 }
1151
1152 /* Platform device */
1153
1154 static struct virtio_uml_platform_data *
virtio_uml_create_pdata(struct platform_device * pdev)1155 virtio_uml_create_pdata(struct platform_device *pdev)
1156 {
1157 struct device_node *np = pdev->dev.of_node;
1158 struct virtio_uml_platform_data *pdata;
1159 int ret;
1160
1161 if (!np)
1162 return ERR_PTR(-EINVAL);
1163
1164 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1165 if (!pdata)
1166 return ERR_PTR(-ENOMEM);
1167
1168 INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
1169 pdata->pdev = pdev;
1170
1171 ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
1172 if (ret)
1173 return ERR_PTR(ret);
1174
1175 ret = of_property_read_u32(np, "virtio-device-id",
1176 &pdata->virtio_device_id);
1177 if (ret)
1178 return ERR_PTR(ret);
1179
1180 return pdata;
1181 }
1182
virtio_uml_probe(struct platform_device * pdev)1183 static int virtio_uml_probe(struct platform_device *pdev)
1184 {
1185 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1186 struct virtio_uml_device *vu_dev;
1187 int rc;
1188
1189 if (!pdata) {
1190 pdata = virtio_uml_create_pdata(pdev);
1191 if (IS_ERR(pdata))
1192 return PTR_ERR(pdata);
1193 }
1194
1195 vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
1196 if (!vu_dev)
1197 return -ENOMEM;
1198
1199 vu_dev->pdata = pdata;
1200 vu_dev->vdev.dev.parent = &pdev->dev;
1201 vu_dev->vdev.dev.release = virtio_uml_release_dev;
1202 vu_dev->vdev.config = &virtio_uml_config_ops;
1203 vu_dev->vdev.id.device = pdata->virtio_device_id;
1204 vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
1205 vu_dev->pdev = pdev;
1206 vu_dev->req_fd = -1;
1207
1208 time_travel_propagate_time();
1209
1210 do {
1211 rc = os_connect_socket(pdata->socket_path);
1212 } while (rc == -EINTR);
1213 if (rc < 0)
1214 goto error_free;
1215 vu_dev->sock = rc;
1216
1217 spin_lock_init(&vu_dev->sock_lock);
1218
1219 rc = vhost_user_init(vu_dev);
1220 if (rc)
1221 goto error_init;
1222
1223 platform_set_drvdata(pdev, vu_dev);
1224
1225 device_set_wakeup_capable(&vu_dev->vdev.dev, true);
1226
1227 rc = register_virtio_device(&vu_dev->vdev);
1228 if (rc)
1229 put_device(&vu_dev->vdev.dev);
1230 vu_dev->registered = 1;
1231 return rc;
1232
1233 error_init:
1234 os_close_file(vu_dev->sock);
1235 error_free:
1236 kfree(vu_dev);
1237 return rc;
1238 }
1239
virtio_uml_remove(struct platform_device * pdev)1240 static int virtio_uml_remove(struct platform_device *pdev)
1241 {
1242 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1243
1244 unregister_virtio_device(&vu_dev->vdev);
1245 return 0;
1246 }
1247
1248 /* Command line device list */
1249
vu_cmdline_release_dev(struct device * d)1250 static void vu_cmdline_release_dev(struct device *d)
1251 {
1252 }
1253
1254 static struct device vu_cmdline_parent = {
1255 .init_name = "virtio-uml-cmdline",
1256 .release = vu_cmdline_release_dev,
1257 };
1258
1259 static bool vu_cmdline_parent_registered;
1260 static int vu_cmdline_id;
1261
vu_unregister_cmdline_device(struct device * dev,void * data)1262 static int vu_unregister_cmdline_device(struct device *dev, void *data)
1263 {
1264 struct platform_device *pdev = to_platform_device(dev);
1265 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1266
1267 kfree(pdata->socket_path);
1268 platform_device_unregister(pdev);
1269 return 0;
1270 }
1271
vu_conn_broken(struct work_struct * wk)1272 static void vu_conn_broken(struct work_struct *wk)
1273 {
1274 struct virtio_uml_platform_data *pdata;
1275 struct virtio_uml_device *vu_dev;
1276
1277 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1278
1279 vu_dev = platform_get_drvdata(pdata->pdev);
1280
1281 virtio_break_device(&vu_dev->vdev);
1282
1283 vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
1284 }
1285
vu_cmdline_set(const char * device,const struct kernel_param * kp)1286 static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
1287 {
1288 const char *ids = strchr(device, ':');
1289 unsigned int virtio_device_id;
1290 int processed, consumed, err;
1291 char *socket_path;
1292 struct virtio_uml_platform_data pdata, *ppdata;
1293 struct platform_device *pdev;
1294
1295 if (!ids || ids == device)
1296 return -EINVAL;
1297
1298 processed = sscanf(ids, ":%u%n:%d%n",
1299 &virtio_device_id, &consumed,
1300 &vu_cmdline_id, &consumed);
1301
1302 if (processed < 1 || ids[consumed])
1303 return -EINVAL;
1304
1305 if (!vu_cmdline_parent_registered) {
1306 err = device_register(&vu_cmdline_parent);
1307 if (err) {
1308 pr_err("Failed to register parent device!\n");
1309 put_device(&vu_cmdline_parent);
1310 return err;
1311 }
1312 vu_cmdline_parent_registered = true;
1313 }
1314
1315 socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
1316 if (!socket_path)
1317 return -ENOMEM;
1318
1319 pdata.virtio_device_id = (u32) virtio_device_id;
1320 pdata.socket_path = socket_path;
1321
1322 pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1323 vu_cmdline_id, virtio_device_id, socket_path);
1324
1325 pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
1326 vu_cmdline_id++, &pdata,
1327 sizeof(pdata));
1328 err = PTR_ERR_OR_ZERO(pdev);
1329 if (err)
1330 goto free;
1331
1332 ppdata = pdev->dev.platform_data;
1333 ppdata->pdev = pdev;
1334 INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
1335
1336 return 0;
1337
1338 free:
1339 kfree(socket_path);
1340 return err;
1341 }
1342
vu_cmdline_get_device(struct device * dev,void * data)1343 static int vu_cmdline_get_device(struct device *dev, void *data)
1344 {
1345 struct platform_device *pdev = to_platform_device(dev);
1346 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1347 char *buffer = data;
1348 unsigned int len = strlen(buffer);
1349
1350 snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
1351 pdata->socket_path, pdata->virtio_device_id, pdev->id);
1352 return 0;
1353 }
1354
vu_cmdline_get(char * buffer,const struct kernel_param * kp)1355 static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
1356 {
1357 buffer[0] = '\0';
1358 if (vu_cmdline_parent_registered)
1359 device_for_each_child(&vu_cmdline_parent, buffer,
1360 vu_cmdline_get_device);
1361 return strlen(buffer) + 1;
1362 }
1363
1364 static const struct kernel_param_ops vu_cmdline_param_ops = {
1365 .set = vu_cmdline_set,
1366 .get = vu_cmdline_get,
1367 };
1368
1369 device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
1370 __uml_help(vu_cmdline_param_ops,
1371 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1372 " Configure a virtio device over a vhost-user socket.\n"
1373 " See virtio_ids.h for a list of possible virtio device id values.\n"
1374 " Optionally use a specific platform_device id.\n\n"
1375 );
1376
1377
vu_unregister_cmdline_devices(void)1378 static void vu_unregister_cmdline_devices(void)
1379 {
1380 if (vu_cmdline_parent_registered) {
1381 device_for_each_child(&vu_cmdline_parent, NULL,
1382 vu_unregister_cmdline_device);
1383 device_unregister(&vu_cmdline_parent);
1384 vu_cmdline_parent_registered = false;
1385 }
1386 }
1387
1388 /* Platform driver */
1389
1390 static const struct of_device_id virtio_uml_match[] = {
1391 { .compatible = "virtio,uml", },
1392 { }
1393 };
1394 MODULE_DEVICE_TABLE(of, virtio_uml_match);
1395
virtio_uml_suspend(struct platform_device * pdev,pm_message_t state)1396 static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
1397 {
1398 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1399
1400 if (!vu_dev->no_vq_suspend) {
1401 struct virtqueue *vq;
1402
1403 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1404 struct virtio_uml_vq_info *info = vq->priv;
1405
1406 info->suspended = true;
1407 vhost_user_set_vring_enable(vu_dev, vq->index, false);
1408 }
1409 }
1410
1411 if (!device_may_wakeup(&vu_dev->vdev.dev)) {
1412 vu_dev->suspended = true;
1413 return 0;
1414 }
1415
1416 return irq_set_irq_wake(vu_dev->irq, 1);
1417 }
1418
virtio_uml_resume(struct platform_device * pdev)1419 static int virtio_uml_resume(struct platform_device *pdev)
1420 {
1421 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1422
1423 if (!vu_dev->no_vq_suspend) {
1424 struct virtqueue *vq;
1425
1426 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1427 struct virtio_uml_vq_info *info = vq->priv;
1428
1429 info->suspended = false;
1430 vhost_user_set_vring_enable(vu_dev, vq->index, true);
1431 }
1432 }
1433
1434 vu_dev->suspended = false;
1435
1436 if (!device_may_wakeup(&vu_dev->vdev.dev))
1437 return 0;
1438
1439 return irq_set_irq_wake(vu_dev->irq, 0);
1440 }
1441
1442 static struct platform_driver virtio_uml_driver = {
1443 .probe = virtio_uml_probe,
1444 .remove = virtio_uml_remove,
1445 .driver = {
1446 .name = "virtio-uml",
1447 .of_match_table = virtio_uml_match,
1448 },
1449 .suspend = virtio_uml_suspend,
1450 .resume = virtio_uml_resume,
1451 };
1452
virtio_uml_init(void)1453 static int __init virtio_uml_init(void)
1454 {
1455 return platform_driver_register(&virtio_uml_driver);
1456 }
1457
virtio_uml_exit(void)1458 static void __exit virtio_uml_exit(void)
1459 {
1460 platform_driver_unregister(&virtio_uml_driver);
1461 vu_unregister_cmdline_devices();
1462 }
1463
1464 module_init(virtio_uml_init);
1465 module_exit(virtio_uml_exit);
1466 __uml_exitcall(virtio_uml_exit);
1467
1468 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1469 MODULE_LICENSE("GPL");
1470