• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VMware VMCI Driver
3  *
4  * Copyright (C) 2012 VMware, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation version 2 and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 
16 #include <linux/vmw_vmci_defs.h>
17 #include <linux/vmw_vmci_api.h>
18 #include <linux/highmem.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/pagemap.h>
24 #include <linux/pci.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/uio.h>
28 #include <linux/wait.h>
29 #include <linux/vmalloc.h>
30 #include <linux/skbuff.h>
31 
32 #include "vmci_handle_array.h"
33 #include "vmci_queue_pair.h"
34 #include "vmci_datagram.h"
35 #include "vmci_resource.h"
36 #include "vmci_context.h"
37 #include "vmci_driver.h"
38 #include "vmci_event.h"
39 #include "vmci_route.h"
40 
41 /*
42  * In the following, we will distinguish between two kinds of VMX processes -
43  * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
44  * VMCI page files in the VMX and supporting VM to VM communication and the
45  * newer ones that use the guest memory directly. We will in the following
46  * refer to the older VMX versions as old-style VMX'en, and the newer ones as
47  * new-style VMX'en.
48  *
49  * The state transition datagram is as follows (the VMCIQPB_ prefix has been
50  * removed for readability) - see below for more details on the transtions:
51  *
52  *            --------------  NEW  -------------
53  *            |                                |
54  *           \_/                              \_/
55  *     CREATED_NO_MEM <-----------------> CREATED_MEM
56  *            |    |                           |
57  *            |    o-----------------------o   |
58  *            |                            |   |
59  *           \_/                          \_/ \_/
60  *     ATTACHED_NO_MEM <----------------> ATTACHED_MEM
61  *            |                            |   |
62  *            |     o----------------------o   |
63  *            |     |                          |
64  *           \_/   \_/                        \_/
65  *     SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
66  *            |                                |
67  *            |                                |
68  *            -------------> gone <-------------
69  *
70  * In more detail. When a VMCI queue pair is first created, it will be in the
71  * VMCIQPB_NEW state. It will then move into one of the following states:
72  *
73  * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
74  *
75  *     - the created was performed by a host endpoint, in which case there is
76  *       no backing memory yet.
77  *
78  *     - the create was initiated by an old-style VMX, that uses
79  *       vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
80  *       a later point in time. This state can be distinguished from the one
81  *       above by the context ID of the creator. A host side is not allowed to
82  *       attach until the page store has been set.
83  *
84  * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
85  *     is created by a VMX using the queue pair device backend that
86  *     sets the UVAs of the queue pair immediately and stores the
87  *     information for later attachers. At this point, it is ready for
88  *     the host side to attach to it.
89  *
90  * Once the queue pair is in one of the created states (with the exception of
91  * the case mentioned for older VMX'en above), it is possible to attach to the
92  * queue pair. Again we have two new states possible:
93  *
94  * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
95  *   paths:
96  *
97  *     - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
98  *       pair, and attaches to a queue pair previously created by the host side.
99  *
100  *     - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
101  *       already created by a guest.
102  *
103  *     - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
104  *       vmci_qp_broker_set_page_store (see below).
105  *
106  * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
107  *     VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
108  *     bring the queue pair into this state. Once vmci_qp_broker_set_page_store
109  *     is called to register the user memory, the VMCIQPB_ATTACH_MEM state
110  *     will be entered.
111  *
112  * From the attached queue pair, the queue pair can enter the shutdown states
113  * when either side of the queue pair detaches. If the guest side detaches
114  * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
115  * the content of the queue pair will no longer be available. If the host
116  * side detaches first, the queue pair will either enter the
117  * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
118  * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
119  * (e.g., the host detaches while a guest is stunned).
120  *
121  * New-style VMX'en will also unmap guest memory, if the guest is
122  * quiesced, e.g., during a snapshot operation. In that case, the guest
123  * memory will no longer be available, and the queue pair will transition from
124  * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
125  * in which case the queue pair will transition from the *_NO_MEM state at that
126  * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
127  * since the peer may have either attached or detached in the meantime. The
128  * values are laid out such that ++ on a state will move from a *_NO_MEM to a
129  * *_MEM state, and vice versa.
130  */
131 
132 /*
133  * VMCIMemcpy{To,From}QueueFunc() prototypes.  Functions of these
134  * types are passed around to enqueue and dequeue routines.  Note that
135  * often the functions passed are simply wrappers around memcpy
136  * itself.
137  *
138  * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
139  * there's an unused last parameter for the hosted side.  In
140  * ESX, that parameter holds a buffer type.
141  */
142 typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
143 				      u64 queue_offset, const void *src,
144 				      size_t src_offset, size_t size);
145 typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
146 					const struct vmci_queue *queue,
147 					u64 queue_offset, size_t size);
148 
149 /* The Kernel specific component of the struct vmci_queue structure. */
150 struct vmci_queue_kern_if {
151 	struct mutex __mutex;	/* Protects the queue. */
152 	struct mutex *mutex;	/* Shared by producer and consumer queues. */
153 	size_t num_pages;	/* Number of pages incl. header. */
154 	bool host;		/* Host or guest? */
155 	union {
156 		struct {
157 			dma_addr_t *pas;
158 			void **vas;
159 		} g;		/* Used by the guest. */
160 		struct {
161 			struct page **page;
162 			struct page **header_page;
163 		} h;		/* Used by the host. */
164 	} u;
165 };
166 
167 /*
168  * This structure is opaque to the clients.
169  */
170 struct vmci_qp {
171 	struct vmci_handle handle;
172 	struct vmci_queue *produce_q;
173 	struct vmci_queue *consume_q;
174 	u64 produce_q_size;
175 	u64 consume_q_size;
176 	u32 peer;
177 	u32 flags;
178 	u32 priv_flags;
179 	bool guest_endpoint;
180 	unsigned int blocked;
181 	unsigned int generation;
182 	wait_queue_head_t event;
183 };
184 
185 enum qp_broker_state {
186 	VMCIQPB_NEW,
187 	VMCIQPB_CREATED_NO_MEM,
188 	VMCIQPB_CREATED_MEM,
189 	VMCIQPB_ATTACHED_NO_MEM,
190 	VMCIQPB_ATTACHED_MEM,
191 	VMCIQPB_SHUTDOWN_NO_MEM,
192 	VMCIQPB_SHUTDOWN_MEM,
193 	VMCIQPB_GONE
194 };
195 
196 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
197 				     _qpb->state == VMCIQPB_ATTACHED_MEM || \
198 				     _qpb->state == VMCIQPB_SHUTDOWN_MEM)
199 
200 /*
201  * In the queue pair broker, we always use the guest point of view for
202  * the produce and consume queue values and references, e.g., the
203  * produce queue size stored is the guests produce queue size. The
204  * host endpoint will need to swap these around. The only exception is
205  * the local queue pairs on the host, in which case the host endpoint
206  * that creates the queue pair will have the right orientation, and
207  * the attaching host endpoint will need to swap.
208  */
209 struct qp_entry {
210 	struct list_head list_item;
211 	struct vmci_handle handle;
212 	u32 peer;
213 	u32 flags;
214 	u64 produce_size;
215 	u64 consume_size;
216 	u32 ref_count;
217 };
218 
219 struct qp_broker_entry {
220 	struct vmci_resource resource;
221 	struct qp_entry qp;
222 	u32 create_id;
223 	u32 attach_id;
224 	enum qp_broker_state state;
225 	bool require_trusted_attach;
226 	bool created_by_trusted;
227 	bool vmci_page_files;	/* Created by VMX using VMCI page files */
228 	struct vmci_queue *produce_q;
229 	struct vmci_queue *consume_q;
230 	struct vmci_queue_header saved_produce_q;
231 	struct vmci_queue_header saved_consume_q;
232 	vmci_event_release_cb wakeup_cb;
233 	void *client_data;
234 	void *local_mem;	/* Kernel memory for local queue pair */
235 };
236 
237 struct qp_guest_endpoint {
238 	struct vmci_resource resource;
239 	struct qp_entry qp;
240 	u64 num_ppns;
241 	void *produce_q;
242 	void *consume_q;
243 	struct ppn_set ppn_set;
244 };
245 
246 struct qp_list {
247 	struct list_head head;
248 	struct mutex mutex;	/* Protect queue list. */
249 };
250 
251 static struct qp_list qp_broker_list = {
252 	.head = LIST_HEAD_INIT(qp_broker_list.head),
253 	.mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
254 };
255 
256 static struct qp_list qp_guest_endpoints = {
257 	.head = LIST_HEAD_INIT(qp_guest_endpoints.head),
258 	.mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
259 };
260 
261 #define INVALID_VMCI_GUEST_MEM_ID  0
262 #define QPE_NUM_PAGES(_QPE) ((u32) \
263 			     (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
264 			      DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
265 
266 
267 /*
268  * Frees kernel VA space for a given queue and its queue header, and
269  * frees physical data pages.
270  */
qp_free_queue(void * q,u64 size)271 static void qp_free_queue(void *q, u64 size)
272 {
273 	struct vmci_queue *queue = q;
274 
275 	if (queue) {
276 		u64 i;
277 
278 		/* Given size does not include header, so add in a page here. */
279 		for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
280 			dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
281 					  queue->kernel_if->u.g.vas[i],
282 					  queue->kernel_if->u.g.pas[i]);
283 		}
284 
285 		vfree(queue);
286 	}
287 }
288 
289 /*
290  * Allocates kernel queue pages of specified size with IOMMU mappings,
291  * plus space for the queue structure/kernel interface and the queue
292  * header.
293  */
qp_alloc_queue(u64 size,u32 flags)294 static void *qp_alloc_queue(u64 size, u32 flags)
295 {
296 	u64 i;
297 	struct vmci_queue *queue;
298 	size_t pas_size;
299 	size_t vas_size;
300 	size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
301 	u64 num_pages;
302 
303 	if (size > SIZE_MAX - PAGE_SIZE)
304 		return NULL;
305 	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
306 	if (num_pages >
307 		 (SIZE_MAX - queue_size) /
308 		 (sizeof(*queue->kernel_if->u.g.pas) +
309 		  sizeof(*queue->kernel_if->u.g.vas)))
310 		return NULL;
311 
312 	pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
313 	vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
314 	queue_size += pas_size + vas_size;
315 
316 	queue = vmalloc(queue_size);
317 	if (!queue)
318 		return NULL;
319 
320 	queue->q_header = NULL;
321 	queue->saved_header = NULL;
322 	queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
323 	queue->kernel_if->mutex = NULL;
324 	queue->kernel_if->num_pages = num_pages;
325 	queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
326 	queue->kernel_if->u.g.vas =
327 		(void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
328 	queue->kernel_if->host = false;
329 
330 	for (i = 0; i < num_pages; i++) {
331 		queue->kernel_if->u.g.vas[i] =
332 			dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
333 					   &queue->kernel_if->u.g.pas[i],
334 					   GFP_KERNEL);
335 		if (!queue->kernel_if->u.g.vas[i]) {
336 			/* Size excl. the header. */
337 			qp_free_queue(queue, i * PAGE_SIZE);
338 			return NULL;
339 		}
340 	}
341 
342 	/* Queue header is the first page. */
343 	queue->q_header = queue->kernel_if->u.g.vas[0];
344 
345 	return queue;
346 }
347 
348 /*
349  * Copies from a given buffer or iovector to a VMCI Queue.  Uses
350  * kmap()/kunmap() to dynamically map/unmap required portions of the queue
351  * by traversing the offset -> page translation structure for the queue.
352  * Assumes that offset + size does not wrap around in the queue.
353  */
__qp_memcpy_to_queue(struct vmci_queue * queue,u64 queue_offset,const void * src,size_t size,bool is_iovec)354 static int __qp_memcpy_to_queue(struct vmci_queue *queue,
355 				u64 queue_offset,
356 				const void *src,
357 				size_t size,
358 				bool is_iovec)
359 {
360 	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
361 	size_t bytes_copied = 0;
362 
363 	while (bytes_copied < size) {
364 		const u64 page_index =
365 			(queue_offset + bytes_copied) / PAGE_SIZE;
366 		const size_t page_offset =
367 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
368 		void *va;
369 		size_t to_copy;
370 
371 		if (kernel_if->host)
372 			va = kmap(kernel_if->u.h.page[page_index]);
373 		else
374 			va = kernel_if->u.g.vas[page_index + 1];
375 			/* Skip header. */
376 
377 		if (size - bytes_copied > PAGE_SIZE - page_offset)
378 			/* Enough payload to fill up from this page. */
379 			to_copy = PAGE_SIZE - page_offset;
380 		else
381 			to_copy = size - bytes_copied;
382 
383 		if (is_iovec) {
384 			struct msghdr *msg = (struct msghdr *)src;
385 			int err;
386 
387 			/* The iovec will track bytes_copied internally. */
388 			err = memcpy_from_msg((u8 *)va + page_offset,
389 					      msg, to_copy);
390 			if (err != 0) {
391 				if (kernel_if->host)
392 					kunmap(kernel_if->u.h.page[page_index]);
393 				return VMCI_ERROR_INVALID_ARGS;
394 			}
395 		} else {
396 			memcpy((u8 *)va + page_offset,
397 			       (u8 *)src + bytes_copied, to_copy);
398 		}
399 
400 		bytes_copied += to_copy;
401 		if (kernel_if->host)
402 			kunmap(kernel_if->u.h.page[page_index]);
403 	}
404 
405 	return VMCI_SUCCESS;
406 }
407 
408 /*
409  * Copies to a given buffer or iovector from a VMCI Queue.  Uses
410  * kmap()/kunmap() to dynamically map/unmap required portions of the queue
411  * by traversing the offset -> page translation structure for the queue.
412  * Assumes that offset + size does not wrap around in the queue.
413  */
__qp_memcpy_from_queue(void * dest,const struct vmci_queue * queue,u64 queue_offset,size_t size,bool is_iovec)414 static int __qp_memcpy_from_queue(void *dest,
415 				  const struct vmci_queue *queue,
416 				  u64 queue_offset,
417 				  size_t size,
418 				  bool is_iovec)
419 {
420 	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
421 	size_t bytes_copied = 0;
422 
423 	while (bytes_copied < size) {
424 		const u64 page_index =
425 			(queue_offset + bytes_copied) / PAGE_SIZE;
426 		const size_t page_offset =
427 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
428 		void *va;
429 		size_t to_copy;
430 
431 		if (kernel_if->host)
432 			va = kmap(kernel_if->u.h.page[page_index]);
433 		else
434 			va = kernel_if->u.g.vas[page_index + 1];
435 			/* Skip header. */
436 
437 		if (size - bytes_copied > PAGE_SIZE - page_offset)
438 			/* Enough payload to fill up this page. */
439 			to_copy = PAGE_SIZE - page_offset;
440 		else
441 			to_copy = size - bytes_copied;
442 
443 		if (is_iovec) {
444 			struct msghdr *msg = dest;
445 			int err;
446 
447 			/* The iovec will track bytes_copied internally. */
448 			err = memcpy_to_msg(msg, (u8 *)va + page_offset,
449 					     to_copy);
450 			if (err != 0) {
451 				if (kernel_if->host)
452 					kunmap(kernel_if->u.h.page[page_index]);
453 				return VMCI_ERROR_INVALID_ARGS;
454 			}
455 		} else {
456 			memcpy((u8 *)dest + bytes_copied,
457 			       (u8 *)va + page_offset, to_copy);
458 		}
459 
460 		bytes_copied += to_copy;
461 		if (kernel_if->host)
462 			kunmap(kernel_if->u.h.page[page_index]);
463 	}
464 
465 	return VMCI_SUCCESS;
466 }
467 
468 /*
469  * Allocates two list of PPNs --- one for the pages in the produce queue,
470  * and the other for the pages in the consume queue. Intializes the list
471  * of PPNs with the page frame numbers of the KVA for the two queues (and
472  * the queue headers).
473  */
qp_alloc_ppn_set(void * prod_q,u64 num_produce_pages,void * cons_q,u64 num_consume_pages,struct ppn_set * ppn_set)474 static int qp_alloc_ppn_set(void *prod_q,
475 			    u64 num_produce_pages,
476 			    void *cons_q,
477 			    u64 num_consume_pages, struct ppn_set *ppn_set)
478 {
479 	u32 *produce_ppns;
480 	u32 *consume_ppns;
481 	struct vmci_queue *produce_q = prod_q;
482 	struct vmci_queue *consume_q = cons_q;
483 	u64 i;
484 
485 	if (!produce_q || !num_produce_pages || !consume_q ||
486 	    !num_consume_pages || !ppn_set)
487 		return VMCI_ERROR_INVALID_ARGS;
488 
489 	if (ppn_set->initialized)
490 		return VMCI_ERROR_ALREADY_EXISTS;
491 
492 	produce_ppns =
493 	    kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
494 	if (!produce_ppns)
495 		return VMCI_ERROR_NO_MEM;
496 
497 	consume_ppns =
498 	    kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
499 	if (!consume_ppns) {
500 		kfree(produce_ppns);
501 		return VMCI_ERROR_NO_MEM;
502 	}
503 
504 	for (i = 0; i < num_produce_pages; i++) {
505 		unsigned long pfn;
506 
507 		produce_ppns[i] =
508 			produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
509 		pfn = produce_ppns[i];
510 
511 		/* Fail allocation if PFN isn't supported by hypervisor. */
512 		if (sizeof(pfn) > sizeof(*produce_ppns)
513 		    && pfn != produce_ppns[i])
514 			goto ppn_error;
515 	}
516 
517 	for (i = 0; i < num_consume_pages; i++) {
518 		unsigned long pfn;
519 
520 		consume_ppns[i] =
521 			consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
522 		pfn = consume_ppns[i];
523 
524 		/* Fail allocation if PFN isn't supported by hypervisor. */
525 		if (sizeof(pfn) > sizeof(*consume_ppns)
526 		    && pfn != consume_ppns[i])
527 			goto ppn_error;
528 	}
529 
530 	ppn_set->num_produce_pages = num_produce_pages;
531 	ppn_set->num_consume_pages = num_consume_pages;
532 	ppn_set->produce_ppns = produce_ppns;
533 	ppn_set->consume_ppns = consume_ppns;
534 	ppn_set->initialized = true;
535 	return VMCI_SUCCESS;
536 
537  ppn_error:
538 	kfree(produce_ppns);
539 	kfree(consume_ppns);
540 	return VMCI_ERROR_INVALID_ARGS;
541 }
542 
543 /*
544  * Frees the two list of PPNs for a queue pair.
545  */
qp_free_ppn_set(struct ppn_set * ppn_set)546 static void qp_free_ppn_set(struct ppn_set *ppn_set)
547 {
548 	if (ppn_set->initialized) {
549 		/* Do not call these functions on NULL inputs. */
550 		kfree(ppn_set->produce_ppns);
551 		kfree(ppn_set->consume_ppns);
552 	}
553 	memset(ppn_set, 0, sizeof(*ppn_set));
554 }
555 
556 /*
557  * Populates the list of PPNs in the hypercall structure with the PPNS
558  * of the produce queue and the consume queue.
559  */
qp_populate_ppn_set(u8 * call_buf,const struct ppn_set * ppn_set)560 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
561 {
562 	memcpy(call_buf, ppn_set->produce_ppns,
563 	       ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
564 	memcpy(call_buf +
565 	       ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
566 	       ppn_set->consume_ppns,
567 	       ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
568 
569 	return VMCI_SUCCESS;
570 }
571 
qp_memcpy_to_queue(struct vmci_queue * queue,u64 queue_offset,const void * src,size_t src_offset,size_t size)572 static int qp_memcpy_to_queue(struct vmci_queue *queue,
573 			      u64 queue_offset,
574 			      const void *src, size_t src_offset, size_t size)
575 {
576 	return __qp_memcpy_to_queue(queue, queue_offset,
577 				    (u8 *)src + src_offset, size, false);
578 }
579 
qp_memcpy_from_queue(void * dest,size_t dest_offset,const struct vmci_queue * queue,u64 queue_offset,size_t size)580 static int qp_memcpy_from_queue(void *dest,
581 				size_t dest_offset,
582 				const struct vmci_queue *queue,
583 				u64 queue_offset, size_t size)
584 {
585 	return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
586 				      queue, queue_offset, size, false);
587 }
588 
589 /*
590  * Copies from a given iovec from a VMCI Queue.
591  */
qp_memcpy_to_queue_iov(struct vmci_queue * queue,u64 queue_offset,const void * msg,size_t src_offset,size_t size)592 static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
593 				  u64 queue_offset,
594 				  const void *msg,
595 				  size_t src_offset, size_t size)
596 {
597 
598 	/*
599 	 * We ignore src_offset because src is really a struct iovec * and will
600 	 * maintain offset internally.
601 	 */
602 	return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
603 }
604 
605 /*
606  * Copies to a given iovec from a VMCI Queue.
607  */
qp_memcpy_from_queue_iov(void * dest,size_t dest_offset,const struct vmci_queue * queue,u64 queue_offset,size_t size)608 static int qp_memcpy_from_queue_iov(void *dest,
609 				    size_t dest_offset,
610 				    const struct vmci_queue *queue,
611 				    u64 queue_offset, size_t size)
612 {
613 	/*
614 	 * We ignore dest_offset because dest is really a struct iovec * and
615 	 * will maintain offset internally.
616 	 */
617 	return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
618 }
619 
620 /*
621  * Allocates kernel VA space of specified size plus space for the queue
622  * and kernel interface.  This is different from the guest queue allocator,
623  * because we do not allocate our own queue header/data pages here but
624  * share those of the guest.
625  */
qp_host_alloc_queue(u64 size)626 static struct vmci_queue *qp_host_alloc_queue(u64 size)
627 {
628 	struct vmci_queue *queue;
629 	size_t queue_page_size;
630 	u64 num_pages;
631 	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
632 
633 	if (size > SIZE_MAX - PAGE_SIZE)
634 		return NULL;
635 	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
636 	if (num_pages > (SIZE_MAX - queue_size) /
637 		 sizeof(*queue->kernel_if->u.h.page))
638 		return NULL;
639 
640 	queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
641 
642 	if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
643 		return NULL;
644 
645 	queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
646 	if (queue) {
647 		queue->q_header = NULL;
648 		queue->saved_header = NULL;
649 		queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
650 		queue->kernel_if->host = true;
651 		queue->kernel_if->mutex = NULL;
652 		queue->kernel_if->num_pages = num_pages;
653 		queue->kernel_if->u.h.header_page =
654 		    (struct page **)((u8 *)queue + queue_size);
655 		queue->kernel_if->u.h.page =
656 			&queue->kernel_if->u.h.header_page[1];
657 	}
658 
659 	return queue;
660 }
661 
662 /*
663  * Frees kernel memory for a given queue (header plus translation
664  * structure).
665  */
qp_host_free_queue(struct vmci_queue * queue,u64 queue_size)666 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
667 {
668 	kfree(queue);
669 }
670 
671 /*
672  * Initialize the mutex for the pair of queues.  This mutex is used to
673  * protect the q_header and the buffer from changing out from under any
674  * users of either queue.  Of course, it's only any good if the mutexes
675  * are actually acquired.  Queue structure must lie on non-paged memory
676  * or we cannot guarantee access to the mutex.
677  */
qp_init_queue_mutex(struct vmci_queue * produce_q,struct vmci_queue * consume_q)678 static void qp_init_queue_mutex(struct vmci_queue *produce_q,
679 				struct vmci_queue *consume_q)
680 {
681 	/*
682 	 * Only the host queue has shared state - the guest queues do not
683 	 * need to synchronize access using a queue mutex.
684 	 */
685 
686 	if (produce_q->kernel_if->host) {
687 		produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
688 		consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
689 		mutex_init(produce_q->kernel_if->mutex);
690 	}
691 }
692 
693 /*
694  * Cleans up the mutex for the pair of queues.
695  */
qp_cleanup_queue_mutex(struct vmci_queue * produce_q,struct vmci_queue * consume_q)696 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
697 				   struct vmci_queue *consume_q)
698 {
699 	if (produce_q->kernel_if->host) {
700 		produce_q->kernel_if->mutex = NULL;
701 		consume_q->kernel_if->mutex = NULL;
702 	}
703 }
704 
705 /*
706  * Acquire the mutex for the queue.  Note that the produce_q and
707  * the consume_q share a mutex.  So, only one of the two need to
708  * be passed in to this routine.  Either will work just fine.
709  */
qp_acquire_queue_mutex(struct vmci_queue * queue)710 static void qp_acquire_queue_mutex(struct vmci_queue *queue)
711 {
712 	if (queue->kernel_if->host)
713 		mutex_lock(queue->kernel_if->mutex);
714 }
715 
716 /*
717  * Release the mutex for the queue.  Note that the produce_q and
718  * the consume_q share a mutex.  So, only one of the two need to
719  * be passed in to this routine.  Either will work just fine.
720  */
qp_release_queue_mutex(struct vmci_queue * queue)721 static void qp_release_queue_mutex(struct vmci_queue *queue)
722 {
723 	if (queue->kernel_if->host)
724 		mutex_unlock(queue->kernel_if->mutex);
725 }
726 
727 /*
728  * Helper function to release pages in the PageStoreAttachInfo
729  * previously obtained using get_user_pages.
730  */
qp_release_pages(struct page ** pages,u64 num_pages,bool dirty)731 static void qp_release_pages(struct page **pages,
732 			     u64 num_pages, bool dirty)
733 {
734 	int i;
735 
736 	for (i = 0; i < num_pages; i++) {
737 		if (dirty)
738 			set_page_dirty_lock(pages[i]);
739 
740 		page_cache_release(pages[i]);
741 		pages[i] = NULL;
742 	}
743 }
744 
745 /*
746  * Lock the user pages referenced by the {produce,consume}Buffer
747  * struct into memory and populate the {produce,consume}Pages
748  * arrays in the attach structure with them.
749  */
qp_host_get_user_memory(u64 produce_uva,u64 consume_uva,struct vmci_queue * produce_q,struct vmci_queue * consume_q)750 static int qp_host_get_user_memory(u64 produce_uva,
751 				   u64 consume_uva,
752 				   struct vmci_queue *produce_q,
753 				   struct vmci_queue *consume_q)
754 {
755 	int retval;
756 	int err = VMCI_SUCCESS;
757 
758 	retval = get_user_pages_fast((uintptr_t) produce_uva,
759 				     produce_q->kernel_if->num_pages, 1,
760 				     produce_q->kernel_if->u.h.header_page);
761 	if (retval < (int)produce_q->kernel_if->num_pages) {
762 		pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
763 			retval);
764 		if (retval > 0)
765 			qp_release_pages(produce_q->kernel_if->u.h.header_page,
766 					retval, false);
767 		err = VMCI_ERROR_NO_MEM;
768 		goto out;
769 	}
770 
771 	retval = get_user_pages_fast((uintptr_t) consume_uva,
772 				     consume_q->kernel_if->num_pages, 1,
773 				     consume_q->kernel_if->u.h.header_page);
774 	if (retval < (int)consume_q->kernel_if->num_pages) {
775 		pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
776 			retval);
777 		if (retval > 0)
778 			qp_release_pages(consume_q->kernel_if->u.h.header_page,
779 					retval, false);
780 		qp_release_pages(produce_q->kernel_if->u.h.header_page,
781 				 produce_q->kernel_if->num_pages, false);
782 		err = VMCI_ERROR_NO_MEM;
783 	}
784 
785  out:
786 	return err;
787 }
788 
789 /*
790  * Registers the specification of the user pages used for backing a queue
791  * pair. Enough information to map in pages is stored in the OS specific
792  * part of the struct vmci_queue structure.
793  */
qp_host_register_user_memory(struct vmci_qp_page_store * page_store,struct vmci_queue * produce_q,struct vmci_queue * consume_q)794 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
795 					struct vmci_queue *produce_q,
796 					struct vmci_queue *consume_q)
797 {
798 	u64 produce_uva;
799 	u64 consume_uva;
800 
801 	/*
802 	 * The new style and the old style mapping only differs in
803 	 * that we either get a single or two UVAs, so we split the
804 	 * single UVA range at the appropriate spot.
805 	 */
806 	produce_uva = page_store->pages;
807 	consume_uva = page_store->pages +
808 	    produce_q->kernel_if->num_pages * PAGE_SIZE;
809 	return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
810 				       consume_q);
811 }
812 
813 /*
814  * Releases and removes the references to user pages stored in the attach
815  * struct.  Pages are released from the page cache and may become
816  * swappable again.
817  */
qp_host_unregister_user_memory(struct vmci_queue * produce_q,struct vmci_queue * consume_q)818 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
819 					   struct vmci_queue *consume_q)
820 {
821 	qp_release_pages(produce_q->kernel_if->u.h.header_page,
822 			 produce_q->kernel_if->num_pages, true);
823 	memset(produce_q->kernel_if->u.h.header_page, 0,
824 	       sizeof(*produce_q->kernel_if->u.h.header_page) *
825 	       produce_q->kernel_if->num_pages);
826 	qp_release_pages(consume_q->kernel_if->u.h.header_page,
827 			 consume_q->kernel_if->num_pages, true);
828 	memset(consume_q->kernel_if->u.h.header_page, 0,
829 	       sizeof(*consume_q->kernel_if->u.h.header_page) *
830 	       consume_q->kernel_if->num_pages);
831 }
832 
833 /*
834  * Once qp_host_register_user_memory has been performed on a
835  * queue, the queue pair headers can be mapped into the
836  * kernel. Once mapped, they must be unmapped with
837  * qp_host_unmap_queues prior to calling
838  * qp_host_unregister_user_memory.
839  * Pages are pinned.
840  */
qp_host_map_queues(struct vmci_queue * produce_q,struct vmci_queue * consume_q)841 static int qp_host_map_queues(struct vmci_queue *produce_q,
842 			      struct vmci_queue *consume_q)
843 {
844 	int result;
845 
846 	if (!produce_q->q_header || !consume_q->q_header) {
847 		struct page *headers[2];
848 
849 		if (produce_q->q_header != consume_q->q_header)
850 			return VMCI_ERROR_QUEUEPAIR_MISMATCH;
851 
852 		if (produce_q->kernel_if->u.h.header_page == NULL ||
853 		    *produce_q->kernel_if->u.h.header_page == NULL)
854 			return VMCI_ERROR_UNAVAILABLE;
855 
856 		headers[0] = *produce_q->kernel_if->u.h.header_page;
857 		headers[1] = *consume_q->kernel_if->u.h.header_page;
858 
859 		produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
860 		if (produce_q->q_header != NULL) {
861 			consume_q->q_header =
862 			    (struct vmci_queue_header *)((u8 *)
863 							 produce_q->q_header +
864 							 PAGE_SIZE);
865 			result = VMCI_SUCCESS;
866 		} else {
867 			pr_warn("vmap failed\n");
868 			result = VMCI_ERROR_NO_MEM;
869 		}
870 	} else {
871 		result = VMCI_SUCCESS;
872 	}
873 
874 	return result;
875 }
876 
877 /*
878  * Unmaps previously mapped queue pair headers from the kernel.
879  * Pages are unpinned.
880  */
qp_host_unmap_queues(u32 gid,struct vmci_queue * produce_q,struct vmci_queue * consume_q)881 static int qp_host_unmap_queues(u32 gid,
882 				struct vmci_queue *produce_q,
883 				struct vmci_queue *consume_q)
884 {
885 	if (produce_q->q_header) {
886 		if (produce_q->q_header < consume_q->q_header)
887 			vunmap(produce_q->q_header);
888 		else
889 			vunmap(consume_q->q_header);
890 
891 		produce_q->q_header = NULL;
892 		consume_q->q_header = NULL;
893 	}
894 
895 	return VMCI_SUCCESS;
896 }
897 
898 /*
899  * Finds the entry in the list corresponding to a given handle. Assumes
900  * that the list is locked.
901  */
qp_list_find(struct qp_list * qp_list,struct vmci_handle handle)902 static struct qp_entry *qp_list_find(struct qp_list *qp_list,
903 				     struct vmci_handle handle)
904 {
905 	struct qp_entry *entry;
906 
907 	if (vmci_handle_is_invalid(handle))
908 		return NULL;
909 
910 	list_for_each_entry(entry, &qp_list->head, list_item) {
911 		if (vmci_handle_is_equal(entry->handle, handle))
912 			return entry;
913 	}
914 
915 	return NULL;
916 }
917 
918 /*
919  * Finds the entry in the list corresponding to a given handle.
920  */
921 static struct qp_guest_endpoint *
qp_guest_handle_to_entry(struct vmci_handle handle)922 qp_guest_handle_to_entry(struct vmci_handle handle)
923 {
924 	struct qp_guest_endpoint *entry;
925 	struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
926 
927 	entry = qp ? container_of(
928 		qp, struct qp_guest_endpoint, qp) : NULL;
929 	return entry;
930 }
931 
932 /*
933  * Finds the entry in the list corresponding to a given handle.
934  */
935 static struct qp_broker_entry *
qp_broker_handle_to_entry(struct vmci_handle handle)936 qp_broker_handle_to_entry(struct vmci_handle handle)
937 {
938 	struct qp_broker_entry *entry;
939 	struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
940 
941 	entry = qp ? container_of(
942 		qp, struct qp_broker_entry, qp) : NULL;
943 	return entry;
944 }
945 
946 /*
947  * Dispatches a queue pair event message directly into the local event
948  * queue.
949  */
qp_notify_peer_local(bool attach,struct vmci_handle handle)950 static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
951 {
952 	u32 context_id = vmci_get_context_id();
953 	struct vmci_event_qp ev;
954 
955 	ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
956 	ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
957 					  VMCI_CONTEXT_RESOURCE_ID);
958 	ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
959 	ev.msg.event_data.event =
960 	    attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
961 	ev.payload.peer_id = context_id;
962 	ev.payload.handle = handle;
963 
964 	return vmci_event_dispatch(&ev.msg.hdr);
965 }
966 
967 /*
968  * Allocates and initializes a qp_guest_endpoint structure.
969  * Allocates a queue_pair rid (and handle) iff the given entry has
970  * an invalid handle.  0 through VMCI_RESERVED_RESOURCE_ID_MAX
971  * are reserved handles.  Assumes that the QP list mutex is held
972  * by the caller.
973  */
974 static struct qp_guest_endpoint *
qp_guest_endpoint_create(struct vmci_handle handle,u32 peer,u32 flags,u64 produce_size,u64 consume_size,void * produce_q,void * consume_q)975 qp_guest_endpoint_create(struct vmci_handle handle,
976 			 u32 peer,
977 			 u32 flags,
978 			 u64 produce_size,
979 			 u64 consume_size,
980 			 void *produce_q,
981 			 void *consume_q)
982 {
983 	int result;
984 	struct qp_guest_endpoint *entry;
985 	/* One page each for the queue headers. */
986 	const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
987 	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
988 
989 	if (vmci_handle_is_invalid(handle)) {
990 		u32 context_id = vmci_get_context_id();
991 
992 		handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
993 	}
994 
995 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
996 	if (entry) {
997 		entry->qp.peer = peer;
998 		entry->qp.flags = flags;
999 		entry->qp.produce_size = produce_size;
1000 		entry->qp.consume_size = consume_size;
1001 		entry->qp.ref_count = 0;
1002 		entry->num_ppns = num_ppns;
1003 		entry->produce_q = produce_q;
1004 		entry->consume_q = consume_q;
1005 		INIT_LIST_HEAD(&entry->qp.list_item);
1006 
1007 		/* Add resource obj */
1008 		result = vmci_resource_add(&entry->resource,
1009 					   VMCI_RESOURCE_TYPE_QPAIR_GUEST,
1010 					   handle);
1011 		entry->qp.handle = vmci_resource_handle(&entry->resource);
1012 		if ((result != VMCI_SUCCESS) ||
1013 		    qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
1014 			pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1015 				handle.context, handle.resource, result);
1016 			kfree(entry);
1017 			entry = NULL;
1018 		}
1019 	}
1020 	return entry;
1021 }
1022 
1023 /*
1024  * Frees a qp_guest_endpoint structure.
1025  */
qp_guest_endpoint_destroy(struct qp_guest_endpoint * entry)1026 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
1027 {
1028 	qp_free_ppn_set(&entry->ppn_set);
1029 	qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
1030 	qp_free_queue(entry->produce_q, entry->qp.produce_size);
1031 	qp_free_queue(entry->consume_q, entry->qp.consume_size);
1032 	/* Unlink from resource hash table and free callback */
1033 	vmci_resource_remove(&entry->resource);
1034 
1035 	kfree(entry);
1036 }
1037 
1038 /*
1039  * Helper to make a queue_pairAlloc hypercall when the driver is
1040  * supporting a guest device.
1041  */
qp_alloc_hypercall(const struct qp_guest_endpoint * entry)1042 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
1043 {
1044 	struct vmci_qp_alloc_msg *alloc_msg;
1045 	size_t msg_size;
1046 	int result;
1047 
1048 	if (!entry || entry->num_ppns <= 2)
1049 		return VMCI_ERROR_INVALID_ARGS;
1050 
1051 	msg_size = sizeof(*alloc_msg) +
1052 	    (size_t) entry->num_ppns * sizeof(u32);
1053 	alloc_msg = kmalloc(msg_size, GFP_KERNEL);
1054 	if (!alloc_msg)
1055 		return VMCI_ERROR_NO_MEM;
1056 
1057 	alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1058 					      VMCI_QUEUEPAIR_ALLOC);
1059 	alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
1060 	alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
1061 	alloc_msg->handle = entry->qp.handle;
1062 	alloc_msg->peer = entry->qp.peer;
1063 	alloc_msg->flags = entry->qp.flags;
1064 	alloc_msg->produce_size = entry->qp.produce_size;
1065 	alloc_msg->consume_size = entry->qp.consume_size;
1066 	alloc_msg->num_ppns = entry->num_ppns;
1067 
1068 	result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
1069 				     &entry->ppn_set);
1070 	if (result == VMCI_SUCCESS)
1071 		result = vmci_send_datagram(&alloc_msg->hdr);
1072 
1073 	kfree(alloc_msg);
1074 
1075 	return result;
1076 }
1077 
1078 /*
1079  * Helper to make a queue_pairDetach hypercall when the driver is
1080  * supporting a guest device.
1081  */
qp_detatch_hypercall(struct vmci_handle handle)1082 static int qp_detatch_hypercall(struct vmci_handle handle)
1083 {
1084 	struct vmci_qp_detach_msg detach_msg;
1085 
1086 	detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1087 					      VMCI_QUEUEPAIR_DETACH);
1088 	detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
1089 	detach_msg.hdr.payload_size = sizeof(handle);
1090 	detach_msg.handle = handle;
1091 
1092 	return vmci_send_datagram(&detach_msg.hdr);
1093 }
1094 
1095 /*
1096  * Adds the given entry to the list. Assumes that the list is locked.
1097  */
qp_list_add_entry(struct qp_list * qp_list,struct qp_entry * entry)1098 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1099 {
1100 	if (entry)
1101 		list_add(&entry->list_item, &qp_list->head);
1102 }
1103 
1104 /*
1105  * Removes the given entry from the list. Assumes that the list is locked.
1106  */
qp_list_remove_entry(struct qp_list * qp_list,struct qp_entry * entry)1107 static void qp_list_remove_entry(struct qp_list *qp_list,
1108 				 struct qp_entry *entry)
1109 {
1110 	if (entry)
1111 		list_del(&entry->list_item);
1112 }
1113 
1114 /*
1115  * Helper for VMCI queue_pair detach interface. Frees the physical
1116  * pages for the queue pair.
1117  */
qp_detatch_guest_work(struct vmci_handle handle)1118 static int qp_detatch_guest_work(struct vmci_handle handle)
1119 {
1120 	int result;
1121 	struct qp_guest_endpoint *entry;
1122 	u32 ref_count = ~0;	/* To avoid compiler warning below */
1123 
1124 	mutex_lock(&qp_guest_endpoints.mutex);
1125 
1126 	entry = qp_guest_handle_to_entry(handle);
1127 	if (!entry) {
1128 		mutex_unlock(&qp_guest_endpoints.mutex);
1129 		return VMCI_ERROR_NOT_FOUND;
1130 	}
1131 
1132 	if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1133 		result = VMCI_SUCCESS;
1134 
1135 		if (entry->qp.ref_count > 1) {
1136 			result = qp_notify_peer_local(false, handle);
1137 			/*
1138 			 * We can fail to notify a local queuepair
1139 			 * because we can't allocate.  We still want
1140 			 * to release the entry if that happens, so
1141 			 * don't bail out yet.
1142 			 */
1143 		}
1144 	} else {
1145 		result = qp_detatch_hypercall(handle);
1146 		if (result < VMCI_SUCCESS) {
1147 			/*
1148 			 * We failed to notify a non-local queuepair.
1149 			 * That other queuepair might still be
1150 			 * accessing the shared memory, so don't
1151 			 * release the entry yet.  It will get cleaned
1152 			 * up by VMCIqueue_pair_Exit() if necessary
1153 			 * (assuming we are going away, otherwise why
1154 			 * did this fail?).
1155 			 */
1156 
1157 			mutex_unlock(&qp_guest_endpoints.mutex);
1158 			return result;
1159 		}
1160 	}
1161 
1162 	/*
1163 	 * If we get here then we either failed to notify a local queuepair, or
1164 	 * we succeeded in all cases.  Release the entry if required.
1165 	 */
1166 
1167 	entry->qp.ref_count--;
1168 	if (entry->qp.ref_count == 0)
1169 		qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1170 
1171 	/* If we didn't remove the entry, this could change once we unlock. */
1172 	if (entry)
1173 		ref_count = entry->qp.ref_count;
1174 
1175 	mutex_unlock(&qp_guest_endpoints.mutex);
1176 
1177 	if (ref_count == 0)
1178 		qp_guest_endpoint_destroy(entry);
1179 
1180 	return result;
1181 }
1182 
1183 /*
1184  * This functions handles the actual allocation of a VMCI queue
1185  * pair guest endpoint. Allocates physical pages for the queue
1186  * pair. It makes OS dependent calls through generic wrappers.
1187  */
qp_alloc_guest_work(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags)1188 static int qp_alloc_guest_work(struct vmci_handle *handle,
1189 			       struct vmci_queue **produce_q,
1190 			       u64 produce_size,
1191 			       struct vmci_queue **consume_q,
1192 			       u64 consume_size,
1193 			       u32 peer,
1194 			       u32 flags,
1195 			       u32 priv_flags)
1196 {
1197 	const u64 num_produce_pages =
1198 	    DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1199 	const u64 num_consume_pages =
1200 	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1201 	void *my_produce_q = NULL;
1202 	void *my_consume_q = NULL;
1203 	int result;
1204 	struct qp_guest_endpoint *queue_pair_entry = NULL;
1205 
1206 	if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1207 		return VMCI_ERROR_NO_ACCESS;
1208 
1209 	mutex_lock(&qp_guest_endpoints.mutex);
1210 
1211 	queue_pair_entry = qp_guest_handle_to_entry(*handle);
1212 	if (queue_pair_entry) {
1213 		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1214 			/* Local attach case. */
1215 			if (queue_pair_entry->qp.ref_count > 1) {
1216 				pr_devel("Error attempting to attach more than once\n");
1217 				result = VMCI_ERROR_UNAVAILABLE;
1218 				goto error_keep_entry;
1219 			}
1220 
1221 			if (queue_pair_entry->qp.produce_size != consume_size ||
1222 			    queue_pair_entry->qp.consume_size !=
1223 			    produce_size ||
1224 			    queue_pair_entry->qp.flags !=
1225 			    (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1226 				pr_devel("Error mismatched queue pair in local attach\n");
1227 				result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1228 				goto error_keep_entry;
1229 			}
1230 
1231 			/*
1232 			 * Do a local attach.  We swap the consume and
1233 			 * produce queues for the attacher and deliver
1234 			 * an attach event.
1235 			 */
1236 			result = qp_notify_peer_local(true, *handle);
1237 			if (result < VMCI_SUCCESS)
1238 				goto error_keep_entry;
1239 
1240 			my_produce_q = queue_pair_entry->consume_q;
1241 			my_consume_q = queue_pair_entry->produce_q;
1242 			goto out;
1243 		}
1244 
1245 		result = VMCI_ERROR_ALREADY_EXISTS;
1246 		goto error_keep_entry;
1247 	}
1248 
1249 	my_produce_q = qp_alloc_queue(produce_size, flags);
1250 	if (!my_produce_q) {
1251 		pr_warn("Error allocating pages for produce queue\n");
1252 		result = VMCI_ERROR_NO_MEM;
1253 		goto error;
1254 	}
1255 
1256 	my_consume_q = qp_alloc_queue(consume_size, flags);
1257 	if (!my_consume_q) {
1258 		pr_warn("Error allocating pages for consume queue\n");
1259 		result = VMCI_ERROR_NO_MEM;
1260 		goto error;
1261 	}
1262 
1263 	queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1264 						    produce_size, consume_size,
1265 						    my_produce_q, my_consume_q);
1266 	if (!queue_pair_entry) {
1267 		pr_warn("Error allocating memory in %s\n", __func__);
1268 		result = VMCI_ERROR_NO_MEM;
1269 		goto error;
1270 	}
1271 
1272 	result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1273 				  num_consume_pages,
1274 				  &queue_pair_entry->ppn_set);
1275 	if (result < VMCI_SUCCESS) {
1276 		pr_warn("qp_alloc_ppn_set failed\n");
1277 		goto error;
1278 	}
1279 
1280 	/*
1281 	 * It's only necessary to notify the host if this queue pair will be
1282 	 * attached to from another context.
1283 	 */
1284 	if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1285 		/* Local create case. */
1286 		u32 context_id = vmci_get_context_id();
1287 
1288 		/*
1289 		 * Enforce similar checks on local queue pairs as we
1290 		 * do for regular ones.  The handle's context must
1291 		 * match the creator or attacher context id (here they
1292 		 * are both the current context id) and the
1293 		 * attach-only flag cannot exist during create.  We
1294 		 * also ensure specified peer is this context or an
1295 		 * invalid one.
1296 		 */
1297 		if (queue_pair_entry->qp.handle.context != context_id ||
1298 		    (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1299 		     queue_pair_entry->qp.peer != context_id)) {
1300 			result = VMCI_ERROR_NO_ACCESS;
1301 			goto error;
1302 		}
1303 
1304 		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1305 			result = VMCI_ERROR_NOT_FOUND;
1306 			goto error;
1307 		}
1308 	} else {
1309 		result = qp_alloc_hypercall(queue_pair_entry);
1310 		if (result < VMCI_SUCCESS) {
1311 			pr_warn("qp_alloc_hypercall result = %d\n", result);
1312 			goto error;
1313 		}
1314 	}
1315 
1316 	qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1317 			    (struct vmci_queue *)my_consume_q);
1318 
1319 	qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1320 
1321  out:
1322 	queue_pair_entry->qp.ref_count++;
1323 	*handle = queue_pair_entry->qp.handle;
1324 	*produce_q = (struct vmci_queue *)my_produce_q;
1325 	*consume_q = (struct vmci_queue *)my_consume_q;
1326 
1327 	/*
1328 	 * We should initialize the queue pair header pages on a local
1329 	 * queue pair create.  For non-local queue pairs, the
1330 	 * hypervisor initializes the header pages in the create step.
1331 	 */
1332 	if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1333 	    queue_pair_entry->qp.ref_count == 1) {
1334 		vmci_q_header_init((*produce_q)->q_header, *handle);
1335 		vmci_q_header_init((*consume_q)->q_header, *handle);
1336 	}
1337 
1338 	mutex_unlock(&qp_guest_endpoints.mutex);
1339 
1340 	return VMCI_SUCCESS;
1341 
1342  error:
1343 	mutex_unlock(&qp_guest_endpoints.mutex);
1344 	if (queue_pair_entry) {
1345 		/* The queues will be freed inside the destroy routine. */
1346 		qp_guest_endpoint_destroy(queue_pair_entry);
1347 	} else {
1348 		qp_free_queue(my_produce_q, produce_size);
1349 		qp_free_queue(my_consume_q, consume_size);
1350 	}
1351 	return result;
1352 
1353  error_keep_entry:
1354 	/* This path should only be used when an existing entry was found. */
1355 	mutex_unlock(&qp_guest_endpoints.mutex);
1356 	return result;
1357 }
1358 
1359 /*
1360  * The first endpoint issuing a queue pair allocation will create the state
1361  * of the queue pair in the queue pair broker.
1362  *
1363  * If the creator is a guest, it will associate a VMX virtual address range
1364  * with the queue pair as specified by the page_store. For compatibility with
1365  * older VMX'en, that would use a separate step to set the VMX virtual
1366  * address range, the virtual address range can be registered later using
1367  * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1368  * used.
1369  *
1370  * If the creator is the host, a page_store of NULL should be used as well,
1371  * since the host is not able to supply a page store for the queue pair.
1372  *
1373  * For older VMX and host callers, the queue pair will be created in the
1374  * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1375  * created in VMCOQPB_CREATED_MEM state.
1376  */
qp_broker_create(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent)1377 static int qp_broker_create(struct vmci_handle handle,
1378 			    u32 peer,
1379 			    u32 flags,
1380 			    u32 priv_flags,
1381 			    u64 produce_size,
1382 			    u64 consume_size,
1383 			    struct vmci_qp_page_store *page_store,
1384 			    struct vmci_ctx *context,
1385 			    vmci_event_release_cb wakeup_cb,
1386 			    void *client_data, struct qp_broker_entry **ent)
1387 {
1388 	struct qp_broker_entry *entry = NULL;
1389 	const u32 context_id = vmci_ctx_get_id(context);
1390 	bool is_local = flags & VMCI_QPFLAG_LOCAL;
1391 	int result;
1392 	u64 guest_produce_size;
1393 	u64 guest_consume_size;
1394 
1395 	/* Do not create if the caller asked not to. */
1396 	if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1397 		return VMCI_ERROR_NOT_FOUND;
1398 
1399 	/*
1400 	 * Creator's context ID should match handle's context ID or the creator
1401 	 * must allow the context in handle's context ID as the "peer".
1402 	 */
1403 	if (handle.context != context_id && handle.context != peer)
1404 		return VMCI_ERROR_NO_ACCESS;
1405 
1406 	if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1407 		return VMCI_ERROR_DST_UNREACHABLE;
1408 
1409 	/*
1410 	 * Creator's context ID for local queue pairs should match the
1411 	 * peer, if a peer is specified.
1412 	 */
1413 	if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1414 		return VMCI_ERROR_NO_ACCESS;
1415 
1416 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1417 	if (!entry)
1418 		return VMCI_ERROR_NO_MEM;
1419 
1420 	if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1421 		/*
1422 		 * The queue pair broker entry stores values from the guest
1423 		 * point of view, so a creating host side endpoint should swap
1424 		 * produce and consume values -- unless it is a local queue
1425 		 * pair, in which case no swapping is necessary, since the local
1426 		 * attacher will swap queues.
1427 		 */
1428 
1429 		guest_produce_size = consume_size;
1430 		guest_consume_size = produce_size;
1431 	} else {
1432 		guest_produce_size = produce_size;
1433 		guest_consume_size = consume_size;
1434 	}
1435 
1436 	entry->qp.handle = handle;
1437 	entry->qp.peer = peer;
1438 	entry->qp.flags = flags;
1439 	entry->qp.produce_size = guest_produce_size;
1440 	entry->qp.consume_size = guest_consume_size;
1441 	entry->qp.ref_count = 1;
1442 	entry->create_id = context_id;
1443 	entry->attach_id = VMCI_INVALID_ID;
1444 	entry->state = VMCIQPB_NEW;
1445 	entry->require_trusted_attach =
1446 	    !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1447 	entry->created_by_trusted =
1448 	    !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1449 	entry->vmci_page_files = false;
1450 	entry->wakeup_cb = wakeup_cb;
1451 	entry->client_data = client_data;
1452 	entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1453 	if (entry->produce_q == NULL) {
1454 		result = VMCI_ERROR_NO_MEM;
1455 		goto error;
1456 	}
1457 	entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1458 	if (entry->consume_q == NULL) {
1459 		result = VMCI_ERROR_NO_MEM;
1460 		goto error;
1461 	}
1462 
1463 	qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1464 
1465 	INIT_LIST_HEAD(&entry->qp.list_item);
1466 
1467 	if (is_local) {
1468 		u8 *tmp;
1469 
1470 		entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1471 					   PAGE_SIZE, GFP_KERNEL);
1472 		if (entry->local_mem == NULL) {
1473 			result = VMCI_ERROR_NO_MEM;
1474 			goto error;
1475 		}
1476 		entry->state = VMCIQPB_CREATED_MEM;
1477 		entry->produce_q->q_header = entry->local_mem;
1478 		tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1479 		    (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1480 		entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1481 	} else if (page_store) {
1482 		/*
1483 		 * The VMX already initialized the queue pair headers, so no
1484 		 * need for the kernel side to do that.
1485 		 */
1486 		result = qp_host_register_user_memory(page_store,
1487 						      entry->produce_q,
1488 						      entry->consume_q);
1489 		if (result < VMCI_SUCCESS)
1490 			goto error;
1491 
1492 		entry->state = VMCIQPB_CREATED_MEM;
1493 	} else {
1494 		/*
1495 		 * A create without a page_store may be either a host
1496 		 * side create (in which case we are waiting for the
1497 		 * guest side to supply the memory) or an old style
1498 		 * queue pair create (in which case we will expect a
1499 		 * set page store call as the next step).
1500 		 */
1501 		entry->state = VMCIQPB_CREATED_NO_MEM;
1502 	}
1503 
1504 	qp_list_add_entry(&qp_broker_list, &entry->qp);
1505 	if (ent != NULL)
1506 		*ent = entry;
1507 
1508 	/* Add to resource obj */
1509 	result = vmci_resource_add(&entry->resource,
1510 				   VMCI_RESOURCE_TYPE_QPAIR_HOST,
1511 				   handle);
1512 	if (result != VMCI_SUCCESS) {
1513 		pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1514 			handle.context, handle.resource, result);
1515 		goto error;
1516 	}
1517 
1518 	entry->qp.handle = vmci_resource_handle(&entry->resource);
1519 	if (is_local) {
1520 		vmci_q_header_init(entry->produce_q->q_header,
1521 				   entry->qp.handle);
1522 		vmci_q_header_init(entry->consume_q->q_header,
1523 				   entry->qp.handle);
1524 	}
1525 
1526 	vmci_ctx_qp_create(context, entry->qp.handle);
1527 
1528 	return VMCI_SUCCESS;
1529 
1530  error:
1531 	if (entry != NULL) {
1532 		qp_host_free_queue(entry->produce_q, guest_produce_size);
1533 		qp_host_free_queue(entry->consume_q, guest_consume_size);
1534 		kfree(entry);
1535 	}
1536 
1537 	return result;
1538 }
1539 
1540 /*
1541  * Enqueues an event datagram to notify the peer VM attached to
1542  * the given queue pair handle about attach/detach event by the
1543  * given VM.  Returns Payload size of datagram enqueued on
1544  * success, error code otherwise.
1545  */
qp_notify_peer(bool attach,struct vmci_handle handle,u32 my_id,u32 peer_id)1546 static int qp_notify_peer(bool attach,
1547 			  struct vmci_handle handle,
1548 			  u32 my_id,
1549 			  u32 peer_id)
1550 {
1551 	int rv;
1552 	struct vmci_event_qp ev;
1553 
1554 	if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1555 	    peer_id == VMCI_INVALID_ID)
1556 		return VMCI_ERROR_INVALID_ARGS;
1557 
1558 	/*
1559 	 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1560 	 * number of pending events from the hypervisor to a given VM
1561 	 * otherwise a rogue VM could do an arbitrary number of attach
1562 	 * and detach operations causing memory pressure in the host
1563 	 * kernel.
1564 	 */
1565 
1566 	ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1567 	ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1568 					  VMCI_CONTEXT_RESOURCE_ID);
1569 	ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1570 	ev.msg.event_data.event = attach ?
1571 	    VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1572 	ev.payload.handle = handle;
1573 	ev.payload.peer_id = my_id;
1574 
1575 	rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1576 				    &ev.msg.hdr, false);
1577 	if (rv < VMCI_SUCCESS)
1578 		pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1579 			attach ? "ATTACH" : "DETACH", peer_id);
1580 
1581 	return rv;
1582 }
1583 
1584 /*
1585  * The second endpoint issuing a queue pair allocation will attach to
1586  * the queue pair registered with the queue pair broker.
1587  *
1588  * If the attacher is a guest, it will associate a VMX virtual address
1589  * range with the queue pair as specified by the page_store. At this
1590  * point, the already attach host endpoint may start using the queue
1591  * pair, and an attach event is sent to it. For compatibility with
1592  * older VMX'en, that used a separate step to set the VMX virtual
1593  * address range, the virtual address range can be registered later
1594  * using vmci_qp_broker_set_page_store. In that case, a page_store of
1595  * NULL should be used, and the attach event will be generated once
1596  * the actual page store has been set.
1597  *
1598  * If the attacher is the host, a page_store of NULL should be used as
1599  * well, since the page store information is already set by the guest.
1600  *
1601  * For new VMX and host callers, the queue pair will be moved to the
1602  * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1603  * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1604  */
qp_broker_attach(struct qp_broker_entry * entry,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent)1605 static int qp_broker_attach(struct qp_broker_entry *entry,
1606 			    u32 peer,
1607 			    u32 flags,
1608 			    u32 priv_flags,
1609 			    u64 produce_size,
1610 			    u64 consume_size,
1611 			    struct vmci_qp_page_store *page_store,
1612 			    struct vmci_ctx *context,
1613 			    vmci_event_release_cb wakeup_cb,
1614 			    void *client_data,
1615 			    struct qp_broker_entry **ent)
1616 {
1617 	const u32 context_id = vmci_ctx_get_id(context);
1618 	bool is_local = flags & VMCI_QPFLAG_LOCAL;
1619 	int result;
1620 
1621 	if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1622 	    entry->state != VMCIQPB_CREATED_MEM)
1623 		return VMCI_ERROR_UNAVAILABLE;
1624 
1625 	if (is_local) {
1626 		if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1627 		    context_id != entry->create_id) {
1628 			return VMCI_ERROR_INVALID_ARGS;
1629 		}
1630 	} else if (context_id == entry->create_id ||
1631 		   context_id == entry->attach_id) {
1632 		return VMCI_ERROR_ALREADY_EXISTS;
1633 	}
1634 
1635 	if (VMCI_CONTEXT_IS_VM(context_id) &&
1636 	    VMCI_CONTEXT_IS_VM(entry->create_id))
1637 		return VMCI_ERROR_DST_UNREACHABLE;
1638 
1639 	/*
1640 	 * If we are attaching from a restricted context then the queuepair
1641 	 * must have been created by a trusted endpoint.
1642 	 */
1643 	if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1644 	    !entry->created_by_trusted)
1645 		return VMCI_ERROR_NO_ACCESS;
1646 
1647 	/*
1648 	 * If we are attaching to a queuepair that was created by a restricted
1649 	 * context then we must be trusted.
1650 	 */
1651 	if (entry->require_trusted_attach &&
1652 	    (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1653 		return VMCI_ERROR_NO_ACCESS;
1654 
1655 	/*
1656 	 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1657 	 * control check is not performed.
1658 	 */
1659 	if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1660 		return VMCI_ERROR_NO_ACCESS;
1661 
1662 	if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1663 		/*
1664 		 * Do not attach if the caller doesn't support Host Queue Pairs
1665 		 * and a host created this queue pair.
1666 		 */
1667 
1668 		if (!vmci_ctx_supports_host_qp(context))
1669 			return VMCI_ERROR_INVALID_RESOURCE;
1670 
1671 	} else if (context_id == VMCI_HOST_CONTEXT_ID) {
1672 		struct vmci_ctx *create_context;
1673 		bool supports_host_qp;
1674 
1675 		/*
1676 		 * Do not attach a host to a user created queue pair if that
1677 		 * user doesn't support host queue pair end points.
1678 		 */
1679 
1680 		create_context = vmci_ctx_get(entry->create_id);
1681 		supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1682 		vmci_ctx_put(create_context);
1683 
1684 		if (!supports_host_qp)
1685 			return VMCI_ERROR_INVALID_RESOURCE;
1686 	}
1687 
1688 	if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1689 		return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1690 
1691 	if (context_id != VMCI_HOST_CONTEXT_ID) {
1692 		/*
1693 		 * The queue pair broker entry stores values from the guest
1694 		 * point of view, so an attaching guest should match the values
1695 		 * stored in the entry.
1696 		 */
1697 
1698 		if (entry->qp.produce_size != produce_size ||
1699 		    entry->qp.consume_size != consume_size) {
1700 			return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1701 		}
1702 	} else if (entry->qp.produce_size != consume_size ||
1703 		   entry->qp.consume_size != produce_size) {
1704 		return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1705 	}
1706 
1707 	if (context_id != VMCI_HOST_CONTEXT_ID) {
1708 		/*
1709 		 * If a guest attached to a queue pair, it will supply
1710 		 * the backing memory.  If this is a pre NOVMVM vmx,
1711 		 * the backing memory will be supplied by calling
1712 		 * vmci_qp_broker_set_page_store() following the
1713 		 * return of the vmci_qp_broker_alloc() call. If it is
1714 		 * a vmx of version NOVMVM or later, the page store
1715 		 * must be supplied as part of the
1716 		 * vmci_qp_broker_alloc call.  Under all circumstances
1717 		 * must the initially created queue pair not have any
1718 		 * memory associated with it already.
1719 		 */
1720 
1721 		if (entry->state != VMCIQPB_CREATED_NO_MEM)
1722 			return VMCI_ERROR_INVALID_ARGS;
1723 
1724 		if (page_store != NULL) {
1725 			/*
1726 			 * Patch up host state to point to guest
1727 			 * supplied memory. The VMX already
1728 			 * initialized the queue pair headers, so no
1729 			 * need for the kernel side to do that.
1730 			 */
1731 
1732 			result = qp_host_register_user_memory(page_store,
1733 							      entry->produce_q,
1734 							      entry->consume_q);
1735 			if (result < VMCI_SUCCESS)
1736 				return result;
1737 
1738 			entry->state = VMCIQPB_ATTACHED_MEM;
1739 		} else {
1740 			entry->state = VMCIQPB_ATTACHED_NO_MEM;
1741 		}
1742 	} else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1743 		/*
1744 		 * The host side is attempting to attach to a queue
1745 		 * pair that doesn't have any memory associated with
1746 		 * it. This must be a pre NOVMVM vmx that hasn't set
1747 		 * the page store information yet, or a quiesced VM.
1748 		 */
1749 
1750 		return VMCI_ERROR_UNAVAILABLE;
1751 	} else {
1752 		/* The host side has successfully attached to a queue pair. */
1753 		entry->state = VMCIQPB_ATTACHED_MEM;
1754 	}
1755 
1756 	if (entry->state == VMCIQPB_ATTACHED_MEM) {
1757 		result =
1758 		    qp_notify_peer(true, entry->qp.handle, context_id,
1759 				   entry->create_id);
1760 		if (result < VMCI_SUCCESS)
1761 			pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1762 				entry->create_id, entry->qp.handle.context,
1763 				entry->qp.handle.resource);
1764 	}
1765 
1766 	entry->attach_id = context_id;
1767 	entry->qp.ref_count++;
1768 	if (wakeup_cb) {
1769 		entry->wakeup_cb = wakeup_cb;
1770 		entry->client_data = client_data;
1771 	}
1772 
1773 	/*
1774 	 * When attaching to local queue pairs, the context already has
1775 	 * an entry tracking the queue pair, so don't add another one.
1776 	 */
1777 	if (!is_local)
1778 		vmci_ctx_qp_create(context, entry->qp.handle);
1779 
1780 	if (ent != NULL)
1781 		*ent = entry;
1782 
1783 	return VMCI_SUCCESS;
1784 }
1785 
1786 /*
1787  * queue_pair_Alloc for use when setting up queue pair endpoints
1788  * on the host.
1789  */
qp_broker_alloc(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent,bool * swap)1790 static int qp_broker_alloc(struct vmci_handle handle,
1791 			   u32 peer,
1792 			   u32 flags,
1793 			   u32 priv_flags,
1794 			   u64 produce_size,
1795 			   u64 consume_size,
1796 			   struct vmci_qp_page_store *page_store,
1797 			   struct vmci_ctx *context,
1798 			   vmci_event_release_cb wakeup_cb,
1799 			   void *client_data,
1800 			   struct qp_broker_entry **ent,
1801 			   bool *swap)
1802 {
1803 	const u32 context_id = vmci_ctx_get_id(context);
1804 	bool create;
1805 	struct qp_broker_entry *entry = NULL;
1806 	bool is_local = flags & VMCI_QPFLAG_LOCAL;
1807 	int result;
1808 
1809 	if (vmci_handle_is_invalid(handle) ||
1810 	    (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1811 	    !(produce_size || consume_size) ||
1812 	    !context || context_id == VMCI_INVALID_ID ||
1813 	    handle.context == VMCI_INVALID_ID) {
1814 		return VMCI_ERROR_INVALID_ARGS;
1815 	}
1816 
1817 	if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1818 		return VMCI_ERROR_INVALID_ARGS;
1819 
1820 	/*
1821 	 * In the initial argument check, we ensure that non-vmkernel hosts
1822 	 * are not allowed to create local queue pairs.
1823 	 */
1824 
1825 	mutex_lock(&qp_broker_list.mutex);
1826 
1827 	if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1828 		pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1829 			 context_id, handle.context, handle.resource);
1830 		mutex_unlock(&qp_broker_list.mutex);
1831 		return VMCI_ERROR_ALREADY_EXISTS;
1832 	}
1833 
1834 	if (handle.resource != VMCI_INVALID_ID)
1835 		entry = qp_broker_handle_to_entry(handle);
1836 
1837 	if (!entry) {
1838 		create = true;
1839 		result =
1840 		    qp_broker_create(handle, peer, flags, priv_flags,
1841 				     produce_size, consume_size, page_store,
1842 				     context, wakeup_cb, client_data, ent);
1843 	} else {
1844 		create = false;
1845 		result =
1846 		    qp_broker_attach(entry, peer, flags, priv_flags,
1847 				     produce_size, consume_size, page_store,
1848 				     context, wakeup_cb, client_data, ent);
1849 	}
1850 
1851 	mutex_unlock(&qp_broker_list.mutex);
1852 
1853 	if (swap)
1854 		*swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1855 		    !(create && is_local);
1856 
1857 	return result;
1858 }
1859 
1860 /*
1861  * This function implements the kernel API for allocating a queue
1862  * pair.
1863  */
qp_alloc_host_work(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags,vmci_event_release_cb wakeup_cb,void * client_data)1864 static int qp_alloc_host_work(struct vmci_handle *handle,
1865 			      struct vmci_queue **produce_q,
1866 			      u64 produce_size,
1867 			      struct vmci_queue **consume_q,
1868 			      u64 consume_size,
1869 			      u32 peer,
1870 			      u32 flags,
1871 			      u32 priv_flags,
1872 			      vmci_event_release_cb wakeup_cb,
1873 			      void *client_data)
1874 {
1875 	struct vmci_handle new_handle;
1876 	struct vmci_ctx *context;
1877 	struct qp_broker_entry *entry;
1878 	int result;
1879 	bool swap;
1880 
1881 	if (vmci_handle_is_invalid(*handle)) {
1882 		new_handle = vmci_make_handle(
1883 			VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1884 	} else
1885 		new_handle = *handle;
1886 
1887 	context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1888 	entry = NULL;
1889 	result =
1890 	    qp_broker_alloc(new_handle, peer, flags, priv_flags,
1891 			    produce_size, consume_size, NULL, context,
1892 			    wakeup_cb, client_data, &entry, &swap);
1893 	if (result == VMCI_SUCCESS) {
1894 		if (swap) {
1895 			/*
1896 			 * If this is a local queue pair, the attacher
1897 			 * will swap around produce and consume
1898 			 * queues.
1899 			 */
1900 
1901 			*produce_q = entry->consume_q;
1902 			*consume_q = entry->produce_q;
1903 		} else {
1904 			*produce_q = entry->produce_q;
1905 			*consume_q = entry->consume_q;
1906 		}
1907 
1908 		*handle = vmci_resource_handle(&entry->resource);
1909 	} else {
1910 		*handle = VMCI_INVALID_HANDLE;
1911 		pr_devel("queue pair broker failed to alloc (result=%d)\n",
1912 			 result);
1913 	}
1914 	vmci_ctx_put(context);
1915 	return result;
1916 }
1917 
1918 /*
1919  * Allocates a VMCI queue_pair. Only checks validity of input
1920  * arguments. The real work is done in the host or guest
1921  * specific function.
1922  */
vmci_qp_alloc(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags,bool guest_endpoint,vmci_event_release_cb wakeup_cb,void * client_data)1923 int vmci_qp_alloc(struct vmci_handle *handle,
1924 		  struct vmci_queue **produce_q,
1925 		  u64 produce_size,
1926 		  struct vmci_queue **consume_q,
1927 		  u64 consume_size,
1928 		  u32 peer,
1929 		  u32 flags,
1930 		  u32 priv_flags,
1931 		  bool guest_endpoint,
1932 		  vmci_event_release_cb wakeup_cb,
1933 		  void *client_data)
1934 {
1935 	if (!handle || !produce_q || !consume_q ||
1936 	    (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1937 		return VMCI_ERROR_INVALID_ARGS;
1938 
1939 	if (guest_endpoint) {
1940 		return qp_alloc_guest_work(handle, produce_q,
1941 					   produce_size, consume_q,
1942 					   consume_size, peer,
1943 					   flags, priv_flags);
1944 	} else {
1945 		return qp_alloc_host_work(handle, produce_q,
1946 					  produce_size, consume_q,
1947 					  consume_size, peer, flags,
1948 					  priv_flags, wakeup_cb, client_data);
1949 	}
1950 }
1951 
1952 /*
1953  * This function implements the host kernel API for detaching from
1954  * a queue pair.
1955  */
qp_detatch_host_work(struct vmci_handle handle)1956 static int qp_detatch_host_work(struct vmci_handle handle)
1957 {
1958 	int result;
1959 	struct vmci_ctx *context;
1960 
1961 	context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1962 
1963 	result = vmci_qp_broker_detach(handle, context);
1964 
1965 	vmci_ctx_put(context);
1966 	return result;
1967 }
1968 
1969 /*
1970  * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1971  * Real work is done in the host or guest specific function.
1972  */
qp_detatch(struct vmci_handle handle,bool guest_endpoint)1973 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1974 {
1975 	if (vmci_handle_is_invalid(handle))
1976 		return VMCI_ERROR_INVALID_ARGS;
1977 
1978 	if (guest_endpoint)
1979 		return qp_detatch_guest_work(handle);
1980 	else
1981 		return qp_detatch_host_work(handle);
1982 }
1983 
1984 /*
1985  * Returns the entry from the head of the list. Assumes that the list is
1986  * locked.
1987  */
qp_list_get_head(struct qp_list * qp_list)1988 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1989 {
1990 	if (!list_empty(&qp_list->head)) {
1991 		struct qp_entry *entry =
1992 		    list_first_entry(&qp_list->head, struct qp_entry,
1993 				     list_item);
1994 		return entry;
1995 	}
1996 
1997 	return NULL;
1998 }
1999 
vmci_qp_broker_exit(void)2000 void vmci_qp_broker_exit(void)
2001 {
2002 	struct qp_entry *entry;
2003 	struct qp_broker_entry *be;
2004 
2005 	mutex_lock(&qp_broker_list.mutex);
2006 
2007 	while ((entry = qp_list_get_head(&qp_broker_list))) {
2008 		be = (struct qp_broker_entry *)entry;
2009 
2010 		qp_list_remove_entry(&qp_broker_list, entry);
2011 		kfree(be);
2012 	}
2013 
2014 	mutex_unlock(&qp_broker_list.mutex);
2015 }
2016 
2017 /*
2018  * Requests that a queue pair be allocated with the VMCI queue
2019  * pair broker. Allocates a queue pair entry if one does not
2020  * exist. Attaches to one if it exists, and retrieves the page
2021  * files backing that queue_pair.  Assumes that the queue pair
2022  * broker lock is held.
2023  */
vmci_qp_broker_alloc(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context)2024 int vmci_qp_broker_alloc(struct vmci_handle handle,
2025 			 u32 peer,
2026 			 u32 flags,
2027 			 u32 priv_flags,
2028 			 u64 produce_size,
2029 			 u64 consume_size,
2030 			 struct vmci_qp_page_store *page_store,
2031 			 struct vmci_ctx *context)
2032 {
2033 	return qp_broker_alloc(handle, peer, flags, priv_flags,
2034 			       produce_size, consume_size,
2035 			       page_store, context, NULL, NULL, NULL, NULL);
2036 }
2037 
2038 /*
2039  * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
2040  * step to add the UVAs of the VMX mapping of the queue pair. This function
2041  * provides backwards compatibility with such VMX'en, and takes care of
2042  * registering the page store for a queue pair previously allocated by the
2043  * VMX during create or attach. This function will move the queue pair state
2044  * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
2045  * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
2046  * attached state with memory, the queue pair is ready to be used by the
2047  * host peer, and an attached event will be generated.
2048  *
2049  * Assumes that the queue pair broker lock is held.
2050  *
2051  * This function is only used by the hosted platform, since there is no
2052  * issue with backwards compatibility for vmkernel.
2053  */
vmci_qp_broker_set_page_store(struct vmci_handle handle,u64 produce_uva,u64 consume_uva,struct vmci_ctx * context)2054 int vmci_qp_broker_set_page_store(struct vmci_handle handle,
2055 				  u64 produce_uva,
2056 				  u64 consume_uva,
2057 				  struct vmci_ctx *context)
2058 {
2059 	struct qp_broker_entry *entry;
2060 	int result;
2061 	const u32 context_id = vmci_ctx_get_id(context);
2062 
2063 	if (vmci_handle_is_invalid(handle) || !context ||
2064 	    context_id == VMCI_INVALID_ID)
2065 		return VMCI_ERROR_INVALID_ARGS;
2066 
2067 	/*
2068 	 * We only support guest to host queue pairs, so the VMX must
2069 	 * supply UVAs for the mapped page files.
2070 	 */
2071 
2072 	if (produce_uva == 0 || consume_uva == 0)
2073 		return VMCI_ERROR_INVALID_ARGS;
2074 
2075 	mutex_lock(&qp_broker_list.mutex);
2076 
2077 	if (!vmci_ctx_qp_exists(context, handle)) {
2078 		pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2079 			context_id, handle.context, handle.resource);
2080 		result = VMCI_ERROR_NOT_FOUND;
2081 		goto out;
2082 	}
2083 
2084 	entry = qp_broker_handle_to_entry(handle);
2085 	if (!entry) {
2086 		result = VMCI_ERROR_NOT_FOUND;
2087 		goto out;
2088 	}
2089 
2090 	/*
2091 	 * If I'm the owner then I can set the page store.
2092 	 *
2093 	 * Or, if a host created the queue_pair and I'm the attached peer
2094 	 * then I can set the page store.
2095 	 */
2096 	if (entry->create_id != context_id &&
2097 	    (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2098 	     entry->attach_id != context_id)) {
2099 		result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2100 		goto out;
2101 	}
2102 
2103 	if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2104 	    entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2105 		result = VMCI_ERROR_UNAVAILABLE;
2106 		goto out;
2107 	}
2108 
2109 	result = qp_host_get_user_memory(produce_uva, consume_uva,
2110 					 entry->produce_q, entry->consume_q);
2111 	if (result < VMCI_SUCCESS)
2112 		goto out;
2113 
2114 	result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2115 	if (result < VMCI_SUCCESS) {
2116 		qp_host_unregister_user_memory(entry->produce_q,
2117 					       entry->consume_q);
2118 		goto out;
2119 	}
2120 
2121 	if (entry->state == VMCIQPB_CREATED_NO_MEM)
2122 		entry->state = VMCIQPB_CREATED_MEM;
2123 	else
2124 		entry->state = VMCIQPB_ATTACHED_MEM;
2125 
2126 	entry->vmci_page_files = true;
2127 
2128 	if (entry->state == VMCIQPB_ATTACHED_MEM) {
2129 		result =
2130 		    qp_notify_peer(true, handle, context_id, entry->create_id);
2131 		if (result < VMCI_SUCCESS) {
2132 			pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2133 				entry->create_id, entry->qp.handle.context,
2134 				entry->qp.handle.resource);
2135 		}
2136 	}
2137 
2138 	result = VMCI_SUCCESS;
2139  out:
2140 	mutex_unlock(&qp_broker_list.mutex);
2141 	return result;
2142 }
2143 
2144 /*
2145  * Resets saved queue headers for the given QP broker
2146  * entry. Should be used when guest memory becomes available
2147  * again, or the guest detaches.
2148  */
qp_reset_saved_headers(struct qp_broker_entry * entry)2149 static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2150 {
2151 	entry->produce_q->saved_header = NULL;
2152 	entry->consume_q->saved_header = NULL;
2153 }
2154 
2155 /*
2156  * The main entry point for detaching from a queue pair registered with the
2157  * queue pair broker. If more than one endpoint is attached to the queue
2158  * pair, the first endpoint will mainly decrement a reference count and
2159  * generate a notification to its peer. The last endpoint will clean up
2160  * the queue pair state registered with the broker.
2161  *
2162  * When a guest endpoint detaches, it will unmap and unregister the guest
2163  * memory backing the queue pair. If the host is still attached, it will
2164  * no longer be able to access the queue pair content.
2165  *
2166  * If the queue pair is already in a state where there is no memory
2167  * registered for the queue pair (any *_NO_MEM state), it will transition to
2168  * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2169  * endpoint is the first of two endpoints to detach. If the host endpoint is
2170  * the first out of two to detach, the queue pair will move to the
2171  * VMCIQPB_SHUTDOWN_MEM state.
2172  */
vmci_qp_broker_detach(struct vmci_handle handle,struct vmci_ctx * context)2173 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2174 {
2175 	struct qp_broker_entry *entry;
2176 	const u32 context_id = vmci_ctx_get_id(context);
2177 	u32 peer_id;
2178 	bool is_local = false;
2179 	int result;
2180 
2181 	if (vmci_handle_is_invalid(handle) || !context ||
2182 	    context_id == VMCI_INVALID_ID) {
2183 		return VMCI_ERROR_INVALID_ARGS;
2184 	}
2185 
2186 	mutex_lock(&qp_broker_list.mutex);
2187 
2188 	if (!vmci_ctx_qp_exists(context, handle)) {
2189 		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2190 			 context_id, handle.context, handle.resource);
2191 		result = VMCI_ERROR_NOT_FOUND;
2192 		goto out;
2193 	}
2194 
2195 	entry = qp_broker_handle_to_entry(handle);
2196 	if (!entry) {
2197 		pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2198 			 context_id, handle.context, handle.resource);
2199 		result = VMCI_ERROR_NOT_FOUND;
2200 		goto out;
2201 	}
2202 
2203 	if (context_id != entry->create_id && context_id != entry->attach_id) {
2204 		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2205 		goto out;
2206 	}
2207 
2208 	if (context_id == entry->create_id) {
2209 		peer_id = entry->attach_id;
2210 		entry->create_id = VMCI_INVALID_ID;
2211 	} else {
2212 		peer_id = entry->create_id;
2213 		entry->attach_id = VMCI_INVALID_ID;
2214 	}
2215 	entry->qp.ref_count--;
2216 
2217 	is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2218 
2219 	if (context_id != VMCI_HOST_CONTEXT_ID) {
2220 		bool headers_mapped;
2221 
2222 		/*
2223 		 * Pre NOVMVM vmx'en may detach from a queue pair
2224 		 * before setting the page store, and in that case
2225 		 * there is no user memory to detach from. Also, more
2226 		 * recent VMX'en may detach from a queue pair in the
2227 		 * quiesced state.
2228 		 */
2229 
2230 		qp_acquire_queue_mutex(entry->produce_q);
2231 		headers_mapped = entry->produce_q->q_header ||
2232 		    entry->consume_q->q_header;
2233 		if (QPBROKERSTATE_HAS_MEM(entry)) {
2234 			result =
2235 			    qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2236 						 entry->produce_q,
2237 						 entry->consume_q);
2238 			if (result < VMCI_SUCCESS)
2239 				pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2240 					handle.context, handle.resource,
2241 					result);
2242 
2243 			if (entry->vmci_page_files)
2244 				qp_host_unregister_user_memory(entry->produce_q,
2245 							       entry->
2246 							       consume_q);
2247 			else
2248 				qp_host_unregister_user_memory(entry->produce_q,
2249 							       entry->
2250 							       consume_q);
2251 
2252 		}
2253 
2254 		if (!headers_mapped)
2255 			qp_reset_saved_headers(entry);
2256 
2257 		qp_release_queue_mutex(entry->produce_q);
2258 
2259 		if (!headers_mapped && entry->wakeup_cb)
2260 			entry->wakeup_cb(entry->client_data);
2261 
2262 	} else {
2263 		if (entry->wakeup_cb) {
2264 			entry->wakeup_cb = NULL;
2265 			entry->client_data = NULL;
2266 		}
2267 	}
2268 
2269 	if (entry->qp.ref_count == 0) {
2270 		qp_list_remove_entry(&qp_broker_list, &entry->qp);
2271 
2272 		if (is_local)
2273 			kfree(entry->local_mem);
2274 
2275 		qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2276 		qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2277 		qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2278 		/* Unlink from resource hash table and free callback */
2279 		vmci_resource_remove(&entry->resource);
2280 
2281 		kfree(entry);
2282 
2283 		vmci_ctx_qp_destroy(context, handle);
2284 	} else {
2285 		qp_notify_peer(false, handle, context_id, peer_id);
2286 		if (context_id == VMCI_HOST_CONTEXT_ID &&
2287 		    QPBROKERSTATE_HAS_MEM(entry)) {
2288 			entry->state = VMCIQPB_SHUTDOWN_MEM;
2289 		} else {
2290 			entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2291 		}
2292 
2293 		if (!is_local)
2294 			vmci_ctx_qp_destroy(context, handle);
2295 
2296 	}
2297 	result = VMCI_SUCCESS;
2298  out:
2299 	mutex_unlock(&qp_broker_list.mutex);
2300 	return result;
2301 }
2302 
2303 /*
2304  * Establishes the necessary mappings for a queue pair given a
2305  * reference to the queue pair guest memory. This is usually
2306  * called when a guest is unquiesced and the VMX is allowed to
2307  * map guest memory once again.
2308  */
vmci_qp_broker_map(struct vmci_handle handle,struct vmci_ctx * context,u64 guest_mem)2309 int vmci_qp_broker_map(struct vmci_handle handle,
2310 		       struct vmci_ctx *context,
2311 		       u64 guest_mem)
2312 {
2313 	struct qp_broker_entry *entry;
2314 	const u32 context_id = vmci_ctx_get_id(context);
2315 	bool is_local = false;
2316 	int result;
2317 
2318 	if (vmci_handle_is_invalid(handle) || !context ||
2319 	    context_id == VMCI_INVALID_ID)
2320 		return VMCI_ERROR_INVALID_ARGS;
2321 
2322 	mutex_lock(&qp_broker_list.mutex);
2323 
2324 	if (!vmci_ctx_qp_exists(context, handle)) {
2325 		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2326 			 context_id, handle.context, handle.resource);
2327 		result = VMCI_ERROR_NOT_FOUND;
2328 		goto out;
2329 	}
2330 
2331 	entry = qp_broker_handle_to_entry(handle);
2332 	if (!entry) {
2333 		pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2334 			 context_id, handle.context, handle.resource);
2335 		result = VMCI_ERROR_NOT_FOUND;
2336 		goto out;
2337 	}
2338 
2339 	if (context_id != entry->create_id && context_id != entry->attach_id) {
2340 		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2341 		goto out;
2342 	}
2343 
2344 	is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2345 	result = VMCI_SUCCESS;
2346 
2347 	if (context_id != VMCI_HOST_CONTEXT_ID &&
2348 	    !QPBROKERSTATE_HAS_MEM(entry)) {
2349 		struct vmci_qp_page_store page_store;
2350 
2351 		page_store.pages = guest_mem;
2352 		page_store.len = QPE_NUM_PAGES(entry->qp);
2353 
2354 		qp_acquire_queue_mutex(entry->produce_q);
2355 		qp_reset_saved_headers(entry);
2356 		result =
2357 		    qp_host_register_user_memory(&page_store,
2358 						 entry->produce_q,
2359 						 entry->consume_q);
2360 		qp_release_queue_mutex(entry->produce_q);
2361 		if (result == VMCI_SUCCESS) {
2362 			/* Move state from *_NO_MEM to *_MEM */
2363 
2364 			entry->state++;
2365 
2366 			if (entry->wakeup_cb)
2367 				entry->wakeup_cb(entry->client_data);
2368 		}
2369 	}
2370 
2371  out:
2372 	mutex_unlock(&qp_broker_list.mutex);
2373 	return result;
2374 }
2375 
2376 /*
2377  * Saves a snapshot of the queue headers for the given QP broker
2378  * entry. Should be used when guest memory is unmapped.
2379  * Results:
2380  * VMCI_SUCCESS on success, appropriate error code if guest memory
2381  * can't be accessed..
2382  */
qp_save_headers(struct qp_broker_entry * entry)2383 static int qp_save_headers(struct qp_broker_entry *entry)
2384 {
2385 	int result;
2386 
2387 	if (entry->produce_q->saved_header != NULL &&
2388 	    entry->consume_q->saved_header != NULL) {
2389 		/*
2390 		 *  If the headers have already been saved, we don't need to do
2391 		 *  it again, and we don't want to map in the headers
2392 		 *  unnecessarily.
2393 		 */
2394 
2395 		return VMCI_SUCCESS;
2396 	}
2397 
2398 	if (NULL == entry->produce_q->q_header ||
2399 	    NULL == entry->consume_q->q_header) {
2400 		result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2401 		if (result < VMCI_SUCCESS)
2402 			return result;
2403 	}
2404 
2405 	memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2406 	       sizeof(entry->saved_produce_q));
2407 	entry->produce_q->saved_header = &entry->saved_produce_q;
2408 	memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2409 	       sizeof(entry->saved_consume_q));
2410 	entry->consume_q->saved_header = &entry->saved_consume_q;
2411 
2412 	return VMCI_SUCCESS;
2413 }
2414 
2415 /*
2416  * Removes all references to the guest memory of a given queue pair, and
2417  * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2418  * called when a VM is being quiesced where access to guest memory should
2419  * avoided.
2420  */
vmci_qp_broker_unmap(struct vmci_handle handle,struct vmci_ctx * context,u32 gid)2421 int vmci_qp_broker_unmap(struct vmci_handle handle,
2422 			 struct vmci_ctx *context,
2423 			 u32 gid)
2424 {
2425 	struct qp_broker_entry *entry;
2426 	const u32 context_id = vmci_ctx_get_id(context);
2427 	bool is_local = false;
2428 	int result;
2429 
2430 	if (vmci_handle_is_invalid(handle) || !context ||
2431 	    context_id == VMCI_INVALID_ID)
2432 		return VMCI_ERROR_INVALID_ARGS;
2433 
2434 	mutex_lock(&qp_broker_list.mutex);
2435 
2436 	if (!vmci_ctx_qp_exists(context, handle)) {
2437 		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2438 			 context_id, handle.context, handle.resource);
2439 		result = VMCI_ERROR_NOT_FOUND;
2440 		goto out;
2441 	}
2442 
2443 	entry = qp_broker_handle_to_entry(handle);
2444 	if (!entry) {
2445 		pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2446 			 context_id, handle.context, handle.resource);
2447 		result = VMCI_ERROR_NOT_FOUND;
2448 		goto out;
2449 	}
2450 
2451 	if (context_id != entry->create_id && context_id != entry->attach_id) {
2452 		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2453 		goto out;
2454 	}
2455 
2456 	is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2457 
2458 	if (context_id != VMCI_HOST_CONTEXT_ID &&
2459 	    QPBROKERSTATE_HAS_MEM(entry)) {
2460 		qp_acquire_queue_mutex(entry->produce_q);
2461 		result = qp_save_headers(entry);
2462 		if (result < VMCI_SUCCESS)
2463 			pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2464 				handle.context, handle.resource, result);
2465 
2466 		qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2467 
2468 		/*
2469 		 * On hosted, when we unmap queue pairs, the VMX will also
2470 		 * unmap the guest memory, so we invalidate the previously
2471 		 * registered memory. If the queue pair is mapped again at a
2472 		 * later point in time, we will need to reregister the user
2473 		 * memory with a possibly new user VA.
2474 		 */
2475 		qp_host_unregister_user_memory(entry->produce_q,
2476 					       entry->consume_q);
2477 
2478 		/*
2479 		 * Move state from *_MEM to *_NO_MEM.
2480 		 */
2481 		entry->state--;
2482 
2483 		qp_release_queue_mutex(entry->produce_q);
2484 	}
2485 
2486 	result = VMCI_SUCCESS;
2487 
2488  out:
2489 	mutex_unlock(&qp_broker_list.mutex);
2490 	return result;
2491 }
2492 
2493 /*
2494  * Destroys all guest queue pair endpoints. If active guest queue
2495  * pairs still exist, hypercalls to attempt detach from these
2496  * queue pairs will be made. Any failure to detach is silently
2497  * ignored.
2498  */
vmci_qp_guest_endpoints_exit(void)2499 void vmci_qp_guest_endpoints_exit(void)
2500 {
2501 	struct qp_entry *entry;
2502 	struct qp_guest_endpoint *ep;
2503 
2504 	mutex_lock(&qp_guest_endpoints.mutex);
2505 
2506 	while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2507 		ep = (struct qp_guest_endpoint *)entry;
2508 
2509 		/* Don't make a hypercall for local queue_pairs. */
2510 		if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2511 			qp_detatch_hypercall(entry->handle);
2512 
2513 		/* We cannot fail the exit, so let's reset ref_count. */
2514 		entry->ref_count = 0;
2515 		qp_list_remove_entry(&qp_guest_endpoints, entry);
2516 
2517 		qp_guest_endpoint_destroy(ep);
2518 	}
2519 
2520 	mutex_unlock(&qp_guest_endpoints.mutex);
2521 }
2522 
2523 /*
2524  * Helper routine that will lock the queue pair before subsequent
2525  * operations.
2526  * Note: Non-blocking on the host side is currently only implemented in ESX.
2527  * Since non-blocking isn't yet implemented on the host personality we
2528  * have no reason to acquire a spin lock.  So to avoid the use of an
2529  * unnecessary lock only acquire the mutex if we can block.
2530  */
qp_lock(const struct vmci_qp * qpair)2531 static void qp_lock(const struct vmci_qp *qpair)
2532 {
2533 	qp_acquire_queue_mutex(qpair->produce_q);
2534 }
2535 
2536 /*
2537  * Helper routine that unlocks the queue pair after calling
2538  * qp_lock.
2539  */
qp_unlock(const struct vmci_qp * qpair)2540 static void qp_unlock(const struct vmci_qp *qpair)
2541 {
2542 	qp_release_queue_mutex(qpair->produce_q);
2543 }
2544 
2545 /*
2546  * The queue headers may not be mapped at all times. If a queue is
2547  * currently not mapped, it will be attempted to do so.
2548  */
qp_map_queue_headers(struct vmci_queue * produce_q,struct vmci_queue * consume_q)2549 static int qp_map_queue_headers(struct vmci_queue *produce_q,
2550 				struct vmci_queue *consume_q)
2551 {
2552 	int result;
2553 
2554 	if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2555 		result = qp_host_map_queues(produce_q, consume_q);
2556 		if (result < VMCI_SUCCESS)
2557 			return (produce_q->saved_header &&
2558 				consume_q->saved_header) ?
2559 			    VMCI_ERROR_QUEUEPAIR_NOT_READY :
2560 			    VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2561 	}
2562 
2563 	return VMCI_SUCCESS;
2564 }
2565 
2566 /*
2567  * Helper routine that will retrieve the produce and consume
2568  * headers of a given queue pair. If the guest memory of the
2569  * queue pair is currently not available, the saved queue headers
2570  * will be returned, if these are available.
2571  */
qp_get_queue_headers(const struct vmci_qp * qpair,struct vmci_queue_header ** produce_q_header,struct vmci_queue_header ** consume_q_header)2572 static int qp_get_queue_headers(const struct vmci_qp *qpair,
2573 				struct vmci_queue_header **produce_q_header,
2574 				struct vmci_queue_header **consume_q_header)
2575 {
2576 	int result;
2577 
2578 	result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2579 	if (result == VMCI_SUCCESS) {
2580 		*produce_q_header = qpair->produce_q->q_header;
2581 		*consume_q_header = qpair->consume_q->q_header;
2582 	} else if (qpair->produce_q->saved_header &&
2583 		   qpair->consume_q->saved_header) {
2584 		*produce_q_header = qpair->produce_q->saved_header;
2585 		*consume_q_header = qpair->consume_q->saved_header;
2586 		result = VMCI_SUCCESS;
2587 	}
2588 
2589 	return result;
2590 }
2591 
2592 /*
2593  * Callback from VMCI queue pair broker indicating that a queue
2594  * pair that was previously not ready, now either is ready or
2595  * gone forever.
2596  */
qp_wakeup_cb(void * client_data)2597 static int qp_wakeup_cb(void *client_data)
2598 {
2599 	struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2600 
2601 	qp_lock(qpair);
2602 	while (qpair->blocked > 0) {
2603 		qpair->blocked--;
2604 		qpair->generation++;
2605 		wake_up(&qpair->event);
2606 	}
2607 	qp_unlock(qpair);
2608 
2609 	return VMCI_SUCCESS;
2610 }
2611 
2612 /*
2613  * Makes the calling thread wait for the queue pair to become
2614  * ready for host side access.  Returns true when thread is
2615  * woken up after queue pair state change, false otherwise.
2616  */
qp_wait_for_ready_queue(struct vmci_qp * qpair)2617 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2618 {
2619 	unsigned int generation;
2620 
2621 	qpair->blocked++;
2622 	generation = qpair->generation;
2623 	qp_unlock(qpair);
2624 	wait_event(qpair->event, generation != qpair->generation);
2625 	qp_lock(qpair);
2626 
2627 	return true;
2628 }
2629 
2630 /*
2631  * Enqueues a given buffer to the produce queue using the provided
2632  * function. As many bytes as possible (space available in the queue)
2633  * are enqueued.  Assumes the queue->mutex has been acquired.  Returns
2634  * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2635  * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2636  * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2637  * an error occured when accessing the buffer,
2638  * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2639  * available.  Otherwise, the number of bytes written to the queue is
2640  * returned.  Updates the tail pointer of the produce queue.
2641  */
qp_enqueue_locked(struct vmci_queue * produce_q,struct vmci_queue * consume_q,const u64 produce_q_size,const void * buf,size_t buf_size,vmci_memcpy_to_queue_func memcpy_to_queue)2642 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2643 				 struct vmci_queue *consume_q,
2644 				 const u64 produce_q_size,
2645 				 const void *buf,
2646 				 size_t buf_size,
2647 				 vmci_memcpy_to_queue_func memcpy_to_queue)
2648 {
2649 	s64 free_space;
2650 	u64 tail;
2651 	size_t written;
2652 	ssize_t result;
2653 
2654 	result = qp_map_queue_headers(produce_q, consume_q);
2655 	if (unlikely(result != VMCI_SUCCESS))
2656 		return result;
2657 
2658 	free_space = vmci_q_header_free_space(produce_q->q_header,
2659 					      consume_q->q_header,
2660 					      produce_q_size);
2661 	if (free_space == 0)
2662 		return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2663 
2664 	if (free_space < VMCI_SUCCESS)
2665 		return (ssize_t) free_space;
2666 
2667 	written = (size_t) (free_space > buf_size ? buf_size : free_space);
2668 	tail = vmci_q_header_producer_tail(produce_q->q_header);
2669 	if (likely(tail + written < produce_q_size)) {
2670 		result = memcpy_to_queue(produce_q, tail, buf, 0, written);
2671 	} else {
2672 		/* Tail pointer wraps around. */
2673 
2674 		const size_t tmp = (size_t) (produce_q_size - tail);
2675 
2676 		result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
2677 		if (result >= VMCI_SUCCESS)
2678 			result = memcpy_to_queue(produce_q, 0, buf, tmp,
2679 						 written - tmp);
2680 	}
2681 
2682 	if (result < VMCI_SUCCESS)
2683 		return result;
2684 
2685 	vmci_q_header_add_producer_tail(produce_q->q_header, written,
2686 					produce_q_size);
2687 	return written;
2688 }
2689 
2690 /*
2691  * Dequeues data (if available) from the given consume queue. Writes data
2692  * to the user provided buffer using the provided function.
2693  * Assumes the queue->mutex has been acquired.
2694  * Results:
2695  * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2696  * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2697  * (as defined by the queue size).
2698  * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2699  * Otherwise the number of bytes dequeued is returned.
2700  * Side effects:
2701  * Updates the head pointer of the consume queue.
2702  */
qp_dequeue_locked(struct vmci_queue * produce_q,struct vmci_queue * consume_q,const u64 consume_q_size,void * buf,size_t buf_size,vmci_memcpy_from_queue_func memcpy_from_queue,bool update_consumer)2703 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2704 				 struct vmci_queue *consume_q,
2705 				 const u64 consume_q_size,
2706 				 void *buf,
2707 				 size_t buf_size,
2708 				 vmci_memcpy_from_queue_func memcpy_from_queue,
2709 				 bool update_consumer)
2710 {
2711 	s64 buf_ready;
2712 	u64 head;
2713 	size_t read;
2714 	ssize_t result;
2715 
2716 	result = qp_map_queue_headers(produce_q, consume_q);
2717 	if (unlikely(result != VMCI_SUCCESS))
2718 		return result;
2719 
2720 	buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2721 					    produce_q->q_header,
2722 					    consume_q_size);
2723 	if (buf_ready == 0)
2724 		return VMCI_ERROR_QUEUEPAIR_NODATA;
2725 
2726 	if (buf_ready < VMCI_SUCCESS)
2727 		return (ssize_t) buf_ready;
2728 
2729 	read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2730 	head = vmci_q_header_consumer_head(produce_q->q_header);
2731 	if (likely(head + read < consume_q_size)) {
2732 		result = memcpy_from_queue(buf, 0, consume_q, head, read);
2733 	} else {
2734 		/* Head pointer wraps around. */
2735 
2736 		const size_t tmp = (size_t) (consume_q_size - head);
2737 
2738 		result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
2739 		if (result >= VMCI_SUCCESS)
2740 			result = memcpy_from_queue(buf, tmp, consume_q, 0,
2741 						   read - tmp);
2742 
2743 	}
2744 
2745 	if (result < VMCI_SUCCESS)
2746 		return result;
2747 
2748 	if (update_consumer)
2749 		vmci_q_header_add_consumer_head(produce_q->q_header,
2750 						read, consume_q_size);
2751 
2752 	return read;
2753 }
2754 
2755 /*
2756  * vmci_qpair_alloc() - Allocates a queue pair.
2757  * @qpair:      Pointer for the new vmci_qp struct.
2758  * @handle:     Handle to track the resource.
2759  * @produce_qsize:      Desired size of the producer queue.
2760  * @consume_qsize:      Desired size of the consumer queue.
2761  * @peer:       ContextID of the peer.
2762  * @flags:      VMCI flags.
2763  * @priv_flags: VMCI priviledge flags.
2764  *
2765  * This is the client interface for allocating the memory for a
2766  * vmci_qp structure and then attaching to the underlying
2767  * queue.  If an error occurs allocating the memory for the
2768  * vmci_qp structure no attempt is made to attach.  If an
2769  * error occurs attaching, then the structure is freed.
2770  */
vmci_qpair_alloc(struct vmci_qp ** qpair,struct vmci_handle * handle,u64 produce_qsize,u64 consume_qsize,u32 peer,u32 flags,u32 priv_flags)2771 int vmci_qpair_alloc(struct vmci_qp **qpair,
2772 		     struct vmci_handle *handle,
2773 		     u64 produce_qsize,
2774 		     u64 consume_qsize,
2775 		     u32 peer,
2776 		     u32 flags,
2777 		     u32 priv_flags)
2778 {
2779 	struct vmci_qp *my_qpair;
2780 	int retval;
2781 	struct vmci_handle src = VMCI_INVALID_HANDLE;
2782 	struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2783 	enum vmci_route route;
2784 	vmci_event_release_cb wakeup_cb;
2785 	void *client_data;
2786 
2787 	/*
2788 	 * Restrict the size of a queuepair.  The device already
2789 	 * enforces a limit on the total amount of memory that can be
2790 	 * allocated to queuepairs for a guest.  However, we try to
2791 	 * allocate this memory before we make the queuepair
2792 	 * allocation hypercall.  On Linux, we allocate each page
2793 	 * separately, which means rather than fail, the guest will
2794 	 * thrash while it tries to allocate, and will become
2795 	 * increasingly unresponsive to the point where it appears to
2796 	 * be hung.  So we place a limit on the size of an individual
2797 	 * queuepair here, and leave the device to enforce the
2798 	 * restriction on total queuepair memory.  (Note that this
2799 	 * doesn't prevent all cases; a user with only this much
2800 	 * physical memory could still get into trouble.)  The error
2801 	 * used by the device is NO_RESOURCES, so use that here too.
2802 	 */
2803 
2804 	if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2805 	    produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2806 		return VMCI_ERROR_NO_RESOURCES;
2807 
2808 	retval = vmci_route(&src, &dst, false, &route);
2809 	if (retval < VMCI_SUCCESS)
2810 		route = vmci_guest_code_active() ?
2811 		    VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2812 
2813 	if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2814 		pr_devel("NONBLOCK OR PINNED set");
2815 		return VMCI_ERROR_INVALID_ARGS;
2816 	}
2817 
2818 	my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2819 	if (!my_qpair)
2820 		return VMCI_ERROR_NO_MEM;
2821 
2822 	my_qpair->produce_q_size = produce_qsize;
2823 	my_qpair->consume_q_size = consume_qsize;
2824 	my_qpair->peer = peer;
2825 	my_qpair->flags = flags;
2826 	my_qpair->priv_flags = priv_flags;
2827 
2828 	wakeup_cb = NULL;
2829 	client_data = NULL;
2830 
2831 	if (VMCI_ROUTE_AS_HOST == route) {
2832 		my_qpair->guest_endpoint = false;
2833 		if (!(flags & VMCI_QPFLAG_LOCAL)) {
2834 			my_qpair->blocked = 0;
2835 			my_qpair->generation = 0;
2836 			init_waitqueue_head(&my_qpair->event);
2837 			wakeup_cb = qp_wakeup_cb;
2838 			client_data = (void *)my_qpair;
2839 		}
2840 	} else {
2841 		my_qpair->guest_endpoint = true;
2842 	}
2843 
2844 	retval = vmci_qp_alloc(handle,
2845 			       &my_qpair->produce_q,
2846 			       my_qpair->produce_q_size,
2847 			       &my_qpair->consume_q,
2848 			       my_qpair->consume_q_size,
2849 			       my_qpair->peer,
2850 			       my_qpair->flags,
2851 			       my_qpair->priv_flags,
2852 			       my_qpair->guest_endpoint,
2853 			       wakeup_cb, client_data);
2854 
2855 	if (retval < VMCI_SUCCESS) {
2856 		kfree(my_qpair);
2857 		return retval;
2858 	}
2859 
2860 	*qpair = my_qpair;
2861 	my_qpair->handle = *handle;
2862 
2863 	return retval;
2864 }
2865 EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2866 
2867 /*
2868  * vmci_qpair_detach() - Detatches the client from a queue pair.
2869  * @qpair:      Reference of a pointer to the qpair struct.
2870  *
2871  * This is the client interface for detaching from a VMCIQPair.
2872  * Note that this routine will free the memory allocated for the
2873  * vmci_qp structure too.
2874  */
vmci_qpair_detach(struct vmci_qp ** qpair)2875 int vmci_qpair_detach(struct vmci_qp **qpair)
2876 {
2877 	int result;
2878 	struct vmci_qp *old_qpair;
2879 
2880 	if (!qpair || !(*qpair))
2881 		return VMCI_ERROR_INVALID_ARGS;
2882 
2883 	old_qpair = *qpair;
2884 	result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2885 
2886 	/*
2887 	 * The guest can fail to detach for a number of reasons, and
2888 	 * if it does so, it will cleanup the entry (if there is one).
2889 	 * The host can fail too, but it won't cleanup the entry
2890 	 * immediately, it will do that later when the context is
2891 	 * freed.  Either way, we need to release the qpair struct
2892 	 * here; there isn't much the caller can do, and we don't want
2893 	 * to leak.
2894 	 */
2895 
2896 	memset(old_qpair, 0, sizeof(*old_qpair));
2897 	old_qpair->handle = VMCI_INVALID_HANDLE;
2898 	old_qpair->peer = VMCI_INVALID_ID;
2899 	kfree(old_qpair);
2900 	*qpair = NULL;
2901 
2902 	return result;
2903 }
2904 EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2905 
2906 /*
2907  * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2908  * @qpair:      Pointer to the queue pair struct.
2909  * @producer_tail:      Reference used for storing producer tail index.
2910  * @consumer_head:      Reference used for storing the consumer head index.
2911  *
2912  * This is the client interface for getting the current indexes of the
2913  * QPair from the point of the view of the caller as the producer.
2914  */
vmci_qpair_get_produce_indexes(const struct vmci_qp * qpair,u64 * producer_tail,u64 * consumer_head)2915 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2916 				   u64 *producer_tail,
2917 				   u64 *consumer_head)
2918 {
2919 	struct vmci_queue_header *produce_q_header;
2920 	struct vmci_queue_header *consume_q_header;
2921 	int result;
2922 
2923 	if (!qpair)
2924 		return VMCI_ERROR_INVALID_ARGS;
2925 
2926 	qp_lock(qpair);
2927 	result =
2928 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2929 	if (result == VMCI_SUCCESS)
2930 		vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2931 					   producer_tail, consumer_head);
2932 	qp_unlock(qpair);
2933 
2934 	if (result == VMCI_SUCCESS &&
2935 	    ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2936 	     (consumer_head && *consumer_head >= qpair->produce_q_size)))
2937 		return VMCI_ERROR_INVALID_SIZE;
2938 
2939 	return result;
2940 }
2941 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2942 
2943 /*
2944  * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
2945  * @qpair:      Pointer to the queue pair struct.
2946  * @consumer_tail:      Reference used for storing consumer tail index.
2947  * @producer_head:      Reference used for storing the producer head index.
2948  *
2949  * This is the client interface for getting the current indexes of the
2950  * QPair from the point of the view of the caller as the consumer.
2951  */
vmci_qpair_get_consume_indexes(const struct vmci_qp * qpair,u64 * consumer_tail,u64 * producer_head)2952 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2953 				   u64 *consumer_tail,
2954 				   u64 *producer_head)
2955 {
2956 	struct vmci_queue_header *produce_q_header;
2957 	struct vmci_queue_header *consume_q_header;
2958 	int result;
2959 
2960 	if (!qpair)
2961 		return VMCI_ERROR_INVALID_ARGS;
2962 
2963 	qp_lock(qpair);
2964 	result =
2965 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2966 	if (result == VMCI_SUCCESS)
2967 		vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2968 					   consumer_tail, producer_head);
2969 	qp_unlock(qpair);
2970 
2971 	if (result == VMCI_SUCCESS &&
2972 	    ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2973 	     (producer_head && *producer_head >= qpair->consume_q_size)))
2974 		return VMCI_ERROR_INVALID_SIZE;
2975 
2976 	return result;
2977 }
2978 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2979 
2980 /*
2981  * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2982  * @qpair:      Pointer to the queue pair struct.
2983  *
2984  * This is the client interface for getting the amount of free
2985  * space in the QPair from the point of the view of the caller as
2986  * the producer which is the common case.  Returns < 0 if err, else
2987  * available bytes into which data can be enqueued if > 0.
2988  */
vmci_qpair_produce_free_space(const struct vmci_qp * qpair)2989 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2990 {
2991 	struct vmci_queue_header *produce_q_header;
2992 	struct vmci_queue_header *consume_q_header;
2993 	s64 result;
2994 
2995 	if (!qpair)
2996 		return VMCI_ERROR_INVALID_ARGS;
2997 
2998 	qp_lock(qpair);
2999 	result =
3000 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3001 	if (result == VMCI_SUCCESS)
3002 		result = vmci_q_header_free_space(produce_q_header,
3003 						  consume_q_header,
3004 						  qpair->produce_q_size);
3005 	else
3006 		result = 0;
3007 
3008 	qp_unlock(qpair);
3009 
3010 	return result;
3011 }
3012 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
3013 
3014 /*
3015  * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
3016  * @qpair:      Pointer to the queue pair struct.
3017  *
3018  * This is the client interface for getting the amount of free
3019  * space in the QPair from the point of the view of the caller as
3020  * the consumer which is not the common case.  Returns < 0 if err, else
3021  * available bytes into which data can be enqueued if > 0.
3022  */
vmci_qpair_consume_free_space(const struct vmci_qp * qpair)3023 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
3024 {
3025 	struct vmci_queue_header *produce_q_header;
3026 	struct vmci_queue_header *consume_q_header;
3027 	s64 result;
3028 
3029 	if (!qpair)
3030 		return VMCI_ERROR_INVALID_ARGS;
3031 
3032 	qp_lock(qpair);
3033 	result =
3034 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3035 	if (result == VMCI_SUCCESS)
3036 		result = vmci_q_header_free_space(consume_q_header,
3037 						  produce_q_header,
3038 						  qpair->consume_q_size);
3039 	else
3040 		result = 0;
3041 
3042 	qp_unlock(qpair);
3043 
3044 	return result;
3045 }
3046 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
3047 
3048 /*
3049  * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
3050  * producer queue.
3051  * @qpair:      Pointer to the queue pair struct.
3052  *
3053  * This is the client interface for getting the amount of
3054  * enqueued data in the QPair from the point of the view of the
3055  * caller as the producer which is not the common case.  Returns < 0 if err,
3056  * else available bytes that may be read.
3057  */
vmci_qpair_produce_buf_ready(const struct vmci_qp * qpair)3058 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
3059 {
3060 	struct vmci_queue_header *produce_q_header;
3061 	struct vmci_queue_header *consume_q_header;
3062 	s64 result;
3063 
3064 	if (!qpair)
3065 		return VMCI_ERROR_INVALID_ARGS;
3066 
3067 	qp_lock(qpair);
3068 	result =
3069 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3070 	if (result == VMCI_SUCCESS)
3071 		result = vmci_q_header_buf_ready(produce_q_header,
3072 						 consume_q_header,
3073 						 qpair->produce_q_size);
3074 	else
3075 		result = 0;
3076 
3077 	qp_unlock(qpair);
3078 
3079 	return result;
3080 }
3081 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
3082 
3083 /*
3084  * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
3085  * consumer queue.
3086  * @qpair:      Pointer to the queue pair struct.
3087  *
3088  * This is the client interface for getting the amount of
3089  * enqueued data in the QPair from the point of the view of the
3090  * caller as the consumer which is the normal case.  Returns < 0 if err,
3091  * else available bytes that may be read.
3092  */
vmci_qpair_consume_buf_ready(const struct vmci_qp * qpair)3093 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
3094 {
3095 	struct vmci_queue_header *produce_q_header;
3096 	struct vmci_queue_header *consume_q_header;
3097 	s64 result;
3098 
3099 	if (!qpair)
3100 		return VMCI_ERROR_INVALID_ARGS;
3101 
3102 	qp_lock(qpair);
3103 	result =
3104 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3105 	if (result == VMCI_SUCCESS)
3106 		result = vmci_q_header_buf_ready(consume_q_header,
3107 						 produce_q_header,
3108 						 qpair->consume_q_size);
3109 	else
3110 		result = 0;
3111 
3112 	qp_unlock(qpair);
3113 
3114 	return result;
3115 }
3116 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3117 
3118 /*
3119  * vmci_qpair_enqueue() - Throw data on the queue.
3120  * @qpair:      Pointer to the queue pair struct.
3121  * @buf:        Pointer to buffer containing data
3122  * @buf_size:   Length of buffer.
3123  * @buf_type:   Buffer type (Unused).
3124  *
3125  * This is the client interface for enqueueing data into the queue.
3126  * Returns number of bytes enqueued or < 0 on error.
3127  */
vmci_qpair_enqueue(struct vmci_qp * qpair,const void * buf,size_t buf_size,int buf_type)3128 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3129 			   const void *buf,
3130 			   size_t buf_size,
3131 			   int buf_type)
3132 {
3133 	ssize_t result;
3134 
3135 	if (!qpair || !buf)
3136 		return VMCI_ERROR_INVALID_ARGS;
3137 
3138 	qp_lock(qpair);
3139 
3140 	do {
3141 		result = qp_enqueue_locked(qpair->produce_q,
3142 					   qpair->consume_q,
3143 					   qpair->produce_q_size,
3144 					   buf, buf_size,
3145 					   qp_memcpy_to_queue);
3146 
3147 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3148 		    !qp_wait_for_ready_queue(qpair))
3149 			result = VMCI_ERROR_WOULD_BLOCK;
3150 
3151 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3152 
3153 	qp_unlock(qpair);
3154 
3155 	return result;
3156 }
3157 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3158 
3159 /*
3160  * vmci_qpair_dequeue() - Get data from the queue.
3161  * @qpair:      Pointer to the queue pair struct.
3162  * @buf:        Pointer to buffer for the data
3163  * @buf_size:   Length of buffer.
3164  * @buf_type:   Buffer type (Unused).
3165  *
3166  * This is the client interface for dequeueing data from the queue.
3167  * Returns number of bytes dequeued or < 0 on error.
3168  */
vmci_qpair_dequeue(struct vmci_qp * qpair,void * buf,size_t buf_size,int buf_type)3169 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3170 			   void *buf,
3171 			   size_t buf_size,
3172 			   int buf_type)
3173 {
3174 	ssize_t result;
3175 
3176 	if (!qpair || !buf)
3177 		return VMCI_ERROR_INVALID_ARGS;
3178 
3179 	qp_lock(qpair);
3180 
3181 	do {
3182 		result = qp_dequeue_locked(qpair->produce_q,
3183 					   qpair->consume_q,
3184 					   qpair->consume_q_size,
3185 					   buf, buf_size,
3186 					   qp_memcpy_from_queue, true);
3187 
3188 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3189 		    !qp_wait_for_ready_queue(qpair))
3190 			result = VMCI_ERROR_WOULD_BLOCK;
3191 
3192 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3193 
3194 	qp_unlock(qpair);
3195 
3196 	return result;
3197 }
3198 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3199 
3200 /*
3201  * vmci_qpair_peek() - Peek at the data in the queue.
3202  * @qpair:      Pointer to the queue pair struct.
3203  * @buf:        Pointer to buffer for the data
3204  * @buf_size:   Length of buffer.
3205  * @buf_type:   Buffer type (Unused on Linux).
3206  *
3207  * This is the client interface for peeking into a queue.  (I.e.,
3208  * copy data from the queue without updating the head pointer.)
3209  * Returns number of bytes dequeued or < 0 on error.
3210  */
vmci_qpair_peek(struct vmci_qp * qpair,void * buf,size_t buf_size,int buf_type)3211 ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3212 			void *buf,
3213 			size_t buf_size,
3214 			int buf_type)
3215 {
3216 	ssize_t result;
3217 
3218 	if (!qpair || !buf)
3219 		return VMCI_ERROR_INVALID_ARGS;
3220 
3221 	qp_lock(qpair);
3222 
3223 	do {
3224 		result = qp_dequeue_locked(qpair->produce_q,
3225 					   qpair->consume_q,
3226 					   qpair->consume_q_size,
3227 					   buf, buf_size,
3228 					   qp_memcpy_from_queue, false);
3229 
3230 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3231 		    !qp_wait_for_ready_queue(qpair))
3232 			result = VMCI_ERROR_WOULD_BLOCK;
3233 
3234 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3235 
3236 	qp_unlock(qpair);
3237 
3238 	return result;
3239 }
3240 EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3241 
3242 /*
3243  * vmci_qpair_enquev() - Throw data on the queue using iov.
3244  * @qpair:      Pointer to the queue pair struct.
3245  * @iov:        Pointer to buffer containing data
3246  * @iov_size:   Length of buffer.
3247  * @buf_type:   Buffer type (Unused).
3248  *
3249  * This is the client interface for enqueueing data into the queue.
3250  * This function uses IO vectors to handle the work. Returns number
3251  * of bytes enqueued or < 0 on error.
3252  */
vmci_qpair_enquev(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3253 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3254 			  struct msghdr *msg,
3255 			  size_t iov_size,
3256 			  int buf_type)
3257 {
3258 	ssize_t result;
3259 
3260 	if (!qpair)
3261 		return VMCI_ERROR_INVALID_ARGS;
3262 
3263 	qp_lock(qpair);
3264 
3265 	do {
3266 		result = qp_enqueue_locked(qpair->produce_q,
3267 					   qpair->consume_q,
3268 					   qpair->produce_q_size,
3269 					   msg, iov_size,
3270 					   qp_memcpy_to_queue_iov);
3271 
3272 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3273 		    !qp_wait_for_ready_queue(qpair))
3274 			result = VMCI_ERROR_WOULD_BLOCK;
3275 
3276 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3277 
3278 	qp_unlock(qpair);
3279 
3280 	return result;
3281 }
3282 EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3283 
3284 /*
3285  * vmci_qpair_dequev() - Get data from the queue using iov.
3286  * @qpair:      Pointer to the queue pair struct.
3287  * @iov:        Pointer to buffer for the data
3288  * @iov_size:   Length of buffer.
3289  * @buf_type:   Buffer type (Unused).
3290  *
3291  * This is the client interface for dequeueing data from the queue.
3292  * This function uses IO vectors to handle the work. Returns number
3293  * of bytes dequeued or < 0 on error.
3294  */
vmci_qpair_dequev(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3295 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3296 			  struct msghdr *msg,
3297 			  size_t iov_size,
3298 			  int buf_type)
3299 {
3300 	ssize_t result;
3301 
3302 	if (!qpair)
3303 		return VMCI_ERROR_INVALID_ARGS;
3304 
3305 	qp_lock(qpair);
3306 
3307 	do {
3308 		result = qp_dequeue_locked(qpair->produce_q,
3309 					   qpair->consume_q,
3310 					   qpair->consume_q_size,
3311 					   msg, iov_size,
3312 					   qp_memcpy_from_queue_iov,
3313 					   true);
3314 
3315 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3316 		    !qp_wait_for_ready_queue(qpair))
3317 			result = VMCI_ERROR_WOULD_BLOCK;
3318 
3319 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3320 
3321 	qp_unlock(qpair);
3322 
3323 	return result;
3324 }
3325 EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3326 
3327 /*
3328  * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3329  * @qpair:      Pointer to the queue pair struct.
3330  * @iov:        Pointer to buffer for the data
3331  * @iov_size:   Length of buffer.
3332  * @buf_type:   Buffer type (Unused on Linux).
3333  *
3334  * This is the client interface for peeking into a queue.  (I.e.,
3335  * copy data from the queue without updating the head pointer.)
3336  * This function uses IO vectors to handle the work. Returns number
3337  * of bytes peeked or < 0 on error.
3338  */
vmci_qpair_peekv(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3339 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3340 			 struct msghdr *msg,
3341 			 size_t iov_size,
3342 			 int buf_type)
3343 {
3344 	ssize_t result;
3345 
3346 	if (!qpair)
3347 		return VMCI_ERROR_INVALID_ARGS;
3348 
3349 	qp_lock(qpair);
3350 
3351 	do {
3352 		result = qp_dequeue_locked(qpair->produce_q,
3353 					   qpair->consume_q,
3354 					   qpair->consume_q_size,
3355 					   msg, iov_size,
3356 					   qp_memcpy_from_queue_iov,
3357 					   false);
3358 
3359 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3360 		    !qp_wait_for_ready_queue(qpair))
3361 			result = VMCI_ERROR_WOULD_BLOCK;
3362 
3363 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3364 
3365 	qp_unlock(qpair);
3366 	return result;
3367 }
3368 EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
3369