• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VMware VMCI Driver
4  *
5  * Copyright (C) 2012 VMware, Inc. All rights reserved.
6  */
7 
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/highmem.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/pagemap.h>
16 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/uio.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
22 #include <linux/skbuff.h>
23 
24 #include "vmci_handle_array.h"
25 #include "vmci_queue_pair.h"
26 #include "vmci_datagram.h"
27 #include "vmci_resource.h"
28 #include "vmci_context.h"
29 #include "vmci_driver.h"
30 #include "vmci_event.h"
31 #include "vmci_route.h"
32 
33 /*
34  * In the following, we will distinguish between two kinds of VMX processes -
35  * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
36  * VMCI page files in the VMX and supporting VM to VM communication and the
37  * newer ones that use the guest memory directly. We will in the following
38  * refer to the older VMX versions as old-style VMX'en, and the newer ones as
39  * new-style VMX'en.
40  *
41  * The state transition datagram is as follows (the VMCIQPB_ prefix has been
42  * removed for readability) - see below for more details on the transtions:
43  *
44  *            --------------  NEW  -------------
45  *            |                                |
46  *           \_/                              \_/
47  *     CREATED_NO_MEM <-----------------> CREATED_MEM
48  *            |    |                           |
49  *            |    o-----------------------o   |
50  *            |                            |   |
51  *           \_/                          \_/ \_/
52  *     ATTACHED_NO_MEM <----------------> ATTACHED_MEM
53  *            |                            |   |
54  *            |     o----------------------o   |
55  *            |     |                          |
56  *           \_/   \_/                        \_/
57  *     SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
58  *            |                                |
59  *            |                                |
60  *            -------------> gone <-------------
61  *
62  * In more detail. When a VMCI queue pair is first created, it will be in the
63  * VMCIQPB_NEW state. It will then move into one of the following states:
64  *
65  * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
66  *
67  *     - the created was performed by a host endpoint, in which case there is
68  *       no backing memory yet.
69  *
70  *     - the create was initiated by an old-style VMX, that uses
71  *       vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
72  *       a later point in time. This state can be distinguished from the one
73  *       above by the context ID of the creator. A host side is not allowed to
74  *       attach until the page store has been set.
75  *
76  * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
77  *     is created by a VMX using the queue pair device backend that
78  *     sets the UVAs of the queue pair immediately and stores the
79  *     information for later attachers. At this point, it is ready for
80  *     the host side to attach to it.
81  *
82  * Once the queue pair is in one of the created states (with the exception of
83  * the case mentioned for older VMX'en above), it is possible to attach to the
84  * queue pair. Again we have two new states possible:
85  *
86  * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
87  *   paths:
88  *
89  *     - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
90  *       pair, and attaches to a queue pair previously created by the host side.
91  *
92  *     - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
93  *       already created by a guest.
94  *
95  *     - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
96  *       vmci_qp_broker_set_page_store (see below).
97  *
98  * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
99  *     VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
100  *     bring the queue pair into this state. Once vmci_qp_broker_set_page_store
101  *     is called to register the user memory, the VMCIQPB_ATTACH_MEM state
102  *     will be entered.
103  *
104  * From the attached queue pair, the queue pair can enter the shutdown states
105  * when either side of the queue pair detaches. If the guest side detaches
106  * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
107  * the content of the queue pair will no longer be available. If the host
108  * side detaches first, the queue pair will either enter the
109  * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
110  * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
111  * (e.g., the host detaches while a guest is stunned).
112  *
113  * New-style VMX'en will also unmap guest memory, if the guest is
114  * quiesced, e.g., during a snapshot operation. In that case, the guest
115  * memory will no longer be available, and the queue pair will transition from
116  * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
117  * in which case the queue pair will transition from the *_NO_MEM state at that
118  * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
119  * since the peer may have either attached or detached in the meantime. The
120  * values are laid out such that ++ on a state will move from a *_NO_MEM to a
121  * *_MEM state, and vice versa.
122  */
123 
124 /* The Kernel specific component of the struct vmci_queue structure. */
125 struct vmci_queue_kern_if {
126 	struct mutex __mutex;	/* Protects the queue. */
127 	struct mutex *mutex;	/* Shared by producer and consumer queues. */
128 	size_t num_pages;	/* Number of pages incl. header. */
129 	bool host;		/* Host or guest? */
130 	union {
131 		struct {
132 			dma_addr_t *pas;
133 			void **vas;
134 		} g;		/* Used by the guest. */
135 		struct {
136 			struct page **page;
137 			struct page **header_page;
138 		} h;		/* Used by the host. */
139 	} u;
140 };
141 
142 /*
143  * This structure is opaque to the clients.
144  */
145 struct vmci_qp {
146 	struct vmci_handle handle;
147 	struct vmci_queue *produce_q;
148 	struct vmci_queue *consume_q;
149 	u64 produce_q_size;
150 	u64 consume_q_size;
151 	u32 peer;
152 	u32 flags;
153 	u32 priv_flags;
154 	bool guest_endpoint;
155 	unsigned int blocked;
156 	unsigned int generation;
157 	wait_queue_head_t event;
158 };
159 
160 enum qp_broker_state {
161 	VMCIQPB_NEW,
162 	VMCIQPB_CREATED_NO_MEM,
163 	VMCIQPB_CREATED_MEM,
164 	VMCIQPB_ATTACHED_NO_MEM,
165 	VMCIQPB_ATTACHED_MEM,
166 	VMCIQPB_SHUTDOWN_NO_MEM,
167 	VMCIQPB_SHUTDOWN_MEM,
168 	VMCIQPB_GONE
169 };
170 
171 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
172 				     _qpb->state == VMCIQPB_ATTACHED_MEM || \
173 				     _qpb->state == VMCIQPB_SHUTDOWN_MEM)
174 
175 /*
176  * In the queue pair broker, we always use the guest point of view for
177  * the produce and consume queue values and references, e.g., the
178  * produce queue size stored is the guests produce queue size. The
179  * host endpoint will need to swap these around. The only exception is
180  * the local queue pairs on the host, in which case the host endpoint
181  * that creates the queue pair will have the right orientation, and
182  * the attaching host endpoint will need to swap.
183  */
184 struct qp_entry {
185 	struct list_head list_item;
186 	struct vmci_handle handle;
187 	u32 peer;
188 	u32 flags;
189 	u64 produce_size;
190 	u64 consume_size;
191 	u32 ref_count;
192 };
193 
194 struct qp_broker_entry {
195 	struct vmci_resource resource;
196 	struct qp_entry qp;
197 	u32 create_id;
198 	u32 attach_id;
199 	enum qp_broker_state state;
200 	bool require_trusted_attach;
201 	bool created_by_trusted;
202 	bool vmci_page_files;	/* Created by VMX using VMCI page files */
203 	struct vmci_queue *produce_q;
204 	struct vmci_queue *consume_q;
205 	struct vmci_queue_header saved_produce_q;
206 	struct vmci_queue_header saved_consume_q;
207 	vmci_event_release_cb wakeup_cb;
208 	void *client_data;
209 	void *local_mem;	/* Kernel memory for local queue pair */
210 };
211 
212 struct qp_guest_endpoint {
213 	struct vmci_resource resource;
214 	struct qp_entry qp;
215 	u64 num_ppns;
216 	void *produce_q;
217 	void *consume_q;
218 	struct ppn_set ppn_set;
219 };
220 
221 struct qp_list {
222 	struct list_head head;
223 	struct mutex mutex;	/* Protect queue list. */
224 };
225 
226 static struct qp_list qp_broker_list = {
227 	.head = LIST_HEAD_INIT(qp_broker_list.head),
228 	.mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
229 };
230 
231 static struct qp_list qp_guest_endpoints = {
232 	.head = LIST_HEAD_INIT(qp_guest_endpoints.head),
233 	.mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
234 };
235 
236 #define INVALID_VMCI_GUEST_MEM_ID  0
237 #define QPE_NUM_PAGES(_QPE) ((u32) \
238 			     (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
239 			      DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
240 
241 
242 /*
243  * Frees kernel VA space for a given queue and its queue header, and
244  * frees physical data pages.
245  */
qp_free_queue(void * q,u64 size)246 static void qp_free_queue(void *q, u64 size)
247 {
248 	struct vmci_queue *queue = q;
249 
250 	if (queue) {
251 		u64 i;
252 
253 		/* Given size does not include header, so add in a page here. */
254 		for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
255 			dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
256 					  queue->kernel_if->u.g.vas[i],
257 					  queue->kernel_if->u.g.pas[i]);
258 		}
259 
260 		vfree(queue);
261 	}
262 }
263 
264 /*
265  * Allocates kernel queue pages of specified size with IOMMU mappings,
266  * plus space for the queue structure/kernel interface and the queue
267  * header.
268  */
qp_alloc_queue(u64 size,u32 flags)269 static void *qp_alloc_queue(u64 size, u32 flags)
270 {
271 	u64 i;
272 	struct vmci_queue *queue;
273 	size_t pas_size;
274 	size_t vas_size;
275 	size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
276 	u64 num_pages;
277 
278 	if (size > SIZE_MAX - PAGE_SIZE)
279 		return NULL;
280 	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
281 	if (num_pages >
282 		 (SIZE_MAX - queue_size) /
283 		 (sizeof(*queue->kernel_if->u.g.pas) +
284 		  sizeof(*queue->kernel_if->u.g.vas)))
285 		return NULL;
286 
287 	pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
288 	vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
289 	queue_size += pas_size + vas_size;
290 
291 	queue = vmalloc(queue_size);
292 	if (!queue)
293 		return NULL;
294 
295 	queue->q_header = NULL;
296 	queue->saved_header = NULL;
297 	queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
298 	queue->kernel_if->mutex = NULL;
299 	queue->kernel_if->num_pages = num_pages;
300 	queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
301 	queue->kernel_if->u.g.vas =
302 		(void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
303 	queue->kernel_if->host = false;
304 
305 	for (i = 0; i < num_pages; i++) {
306 		queue->kernel_if->u.g.vas[i] =
307 			dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
308 					   &queue->kernel_if->u.g.pas[i],
309 					   GFP_KERNEL);
310 		if (!queue->kernel_if->u.g.vas[i]) {
311 			/* Size excl. the header. */
312 			qp_free_queue(queue, i * PAGE_SIZE);
313 			return NULL;
314 		}
315 	}
316 
317 	/* Queue header is the first page. */
318 	queue->q_header = queue->kernel_if->u.g.vas[0];
319 
320 	return queue;
321 }
322 
323 /*
324  * Copies from a given buffer or iovector to a VMCI Queue.  Uses
325  * kmap()/kunmap() to dynamically map/unmap required portions of the queue
326  * by traversing the offset -> page translation structure for the queue.
327  * Assumes that offset + size does not wrap around in the queue.
328  */
qp_memcpy_to_queue_iter(struct vmci_queue * queue,u64 queue_offset,struct iov_iter * from,size_t size)329 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
330 				  u64 queue_offset,
331 				  struct iov_iter *from,
332 				  size_t size)
333 {
334 	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
335 	size_t bytes_copied = 0;
336 
337 	while (bytes_copied < size) {
338 		const u64 page_index =
339 			(queue_offset + bytes_copied) / PAGE_SIZE;
340 		const size_t page_offset =
341 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
342 		void *va;
343 		size_t to_copy;
344 
345 		if (kernel_if->host)
346 			va = kmap(kernel_if->u.h.page[page_index]);
347 		else
348 			va = kernel_if->u.g.vas[page_index + 1];
349 			/* Skip header. */
350 
351 		if (size - bytes_copied > PAGE_SIZE - page_offset)
352 			/* Enough payload to fill up from this page. */
353 			to_copy = PAGE_SIZE - page_offset;
354 		else
355 			to_copy = size - bytes_copied;
356 
357 		if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
358 					 from)) {
359 			if (kernel_if->host)
360 				kunmap(kernel_if->u.h.page[page_index]);
361 			return VMCI_ERROR_INVALID_ARGS;
362 		}
363 		bytes_copied += to_copy;
364 		if (kernel_if->host)
365 			kunmap(kernel_if->u.h.page[page_index]);
366 	}
367 
368 	return VMCI_SUCCESS;
369 }
370 
371 /*
372  * Copies to a given buffer or iovector from a VMCI Queue.  Uses
373  * kmap()/kunmap() to dynamically map/unmap required portions of the queue
374  * by traversing the offset -> page translation structure for the queue.
375  * Assumes that offset + size does not wrap around in the queue.
376  */
qp_memcpy_from_queue_iter(struct iov_iter * to,const struct vmci_queue * queue,u64 queue_offset,size_t size)377 static int qp_memcpy_from_queue_iter(struct iov_iter *to,
378 				    const struct vmci_queue *queue,
379 				    u64 queue_offset, size_t size)
380 {
381 	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
382 	size_t bytes_copied = 0;
383 
384 	while (bytes_copied < size) {
385 		const u64 page_index =
386 			(queue_offset + bytes_copied) / PAGE_SIZE;
387 		const size_t page_offset =
388 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
389 		void *va;
390 		size_t to_copy;
391 		int err;
392 
393 		if (kernel_if->host)
394 			va = kmap(kernel_if->u.h.page[page_index]);
395 		else
396 			va = kernel_if->u.g.vas[page_index + 1];
397 			/* Skip header. */
398 
399 		if (size - bytes_copied > PAGE_SIZE - page_offset)
400 			/* Enough payload to fill up this page. */
401 			to_copy = PAGE_SIZE - page_offset;
402 		else
403 			to_copy = size - bytes_copied;
404 
405 		err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
406 		if (err != to_copy) {
407 			if (kernel_if->host)
408 				kunmap(kernel_if->u.h.page[page_index]);
409 			return VMCI_ERROR_INVALID_ARGS;
410 		}
411 		bytes_copied += to_copy;
412 		if (kernel_if->host)
413 			kunmap(kernel_if->u.h.page[page_index]);
414 	}
415 
416 	return VMCI_SUCCESS;
417 }
418 
419 /*
420  * Allocates two list of PPNs --- one for the pages in the produce queue,
421  * and the other for the pages in the consume queue. Intializes the list
422  * of PPNs with the page frame numbers of the KVA for the two queues (and
423  * the queue headers).
424  */
qp_alloc_ppn_set(void * prod_q,u64 num_produce_pages,void * cons_q,u64 num_consume_pages,struct ppn_set * ppn_set)425 static int qp_alloc_ppn_set(void *prod_q,
426 			    u64 num_produce_pages,
427 			    void *cons_q,
428 			    u64 num_consume_pages, struct ppn_set *ppn_set)
429 {
430 	u64 *produce_ppns;
431 	u64 *consume_ppns;
432 	struct vmci_queue *produce_q = prod_q;
433 	struct vmci_queue *consume_q = cons_q;
434 	u64 i;
435 
436 	if (!produce_q || !num_produce_pages || !consume_q ||
437 	    !num_consume_pages || !ppn_set)
438 		return VMCI_ERROR_INVALID_ARGS;
439 
440 	if (ppn_set->initialized)
441 		return VMCI_ERROR_ALREADY_EXISTS;
442 
443 	produce_ppns =
444 	    kmalloc_array(num_produce_pages, sizeof(*produce_ppns),
445 			  GFP_KERNEL);
446 	if (!produce_ppns)
447 		return VMCI_ERROR_NO_MEM;
448 
449 	consume_ppns =
450 	    kmalloc_array(num_consume_pages, sizeof(*consume_ppns),
451 			  GFP_KERNEL);
452 	if (!consume_ppns) {
453 		kfree(produce_ppns);
454 		return VMCI_ERROR_NO_MEM;
455 	}
456 
457 	for (i = 0; i < num_produce_pages; i++)
458 		produce_ppns[i] =
459 			produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
460 
461 	for (i = 0; i < num_consume_pages; i++)
462 		consume_ppns[i] =
463 			consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
464 
465 	ppn_set->num_produce_pages = num_produce_pages;
466 	ppn_set->num_consume_pages = num_consume_pages;
467 	ppn_set->produce_ppns = produce_ppns;
468 	ppn_set->consume_ppns = consume_ppns;
469 	ppn_set->initialized = true;
470 	return VMCI_SUCCESS;
471 }
472 
473 /*
474  * Frees the two list of PPNs for a queue pair.
475  */
qp_free_ppn_set(struct ppn_set * ppn_set)476 static void qp_free_ppn_set(struct ppn_set *ppn_set)
477 {
478 	if (ppn_set->initialized) {
479 		/* Do not call these functions on NULL inputs. */
480 		kfree(ppn_set->produce_ppns);
481 		kfree(ppn_set->consume_ppns);
482 	}
483 	memset(ppn_set, 0, sizeof(*ppn_set));
484 }
485 
486 /*
487  * Populates the list of PPNs in the hypercall structure with the PPNS
488  * of the produce queue and the consume queue.
489  */
qp_populate_ppn_set(u8 * call_buf,const struct ppn_set * ppn_set)490 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
491 {
492 	if (vmci_use_ppn64()) {
493 		memcpy(call_buf, ppn_set->produce_ppns,
494 		       ppn_set->num_produce_pages *
495 		       sizeof(*ppn_set->produce_ppns));
496 		memcpy(call_buf +
497 		       ppn_set->num_produce_pages *
498 		       sizeof(*ppn_set->produce_ppns),
499 		       ppn_set->consume_ppns,
500 		       ppn_set->num_consume_pages *
501 		       sizeof(*ppn_set->consume_ppns));
502 	} else {
503 		int i;
504 		u32 *ppns = (u32 *) call_buf;
505 
506 		for (i = 0; i < ppn_set->num_produce_pages; i++)
507 			ppns[i] = (u32) ppn_set->produce_ppns[i];
508 
509 		ppns = &ppns[ppn_set->num_produce_pages];
510 
511 		for (i = 0; i < ppn_set->num_consume_pages; i++)
512 			ppns[i] = (u32) ppn_set->consume_ppns[i];
513 	}
514 
515 	return VMCI_SUCCESS;
516 }
517 
518 /*
519  * Allocates kernel VA space of specified size plus space for the queue
520  * and kernel interface.  This is different from the guest queue allocator,
521  * because we do not allocate our own queue header/data pages here but
522  * share those of the guest.
523  */
qp_host_alloc_queue(u64 size)524 static struct vmci_queue *qp_host_alloc_queue(u64 size)
525 {
526 	struct vmci_queue *queue;
527 	size_t queue_page_size;
528 	u64 num_pages;
529 	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
530 
531 	if (size > SIZE_MAX - PAGE_SIZE)
532 		return NULL;
533 	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
534 	if (num_pages > (SIZE_MAX - queue_size) /
535 		 sizeof(*queue->kernel_if->u.h.page))
536 		return NULL;
537 
538 	queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
539 
540 	if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
541 		return NULL;
542 
543 	queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
544 	if (queue) {
545 		queue->q_header = NULL;
546 		queue->saved_header = NULL;
547 		queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
548 		queue->kernel_if->host = true;
549 		queue->kernel_if->mutex = NULL;
550 		queue->kernel_if->num_pages = num_pages;
551 		queue->kernel_if->u.h.header_page =
552 		    (struct page **)((u8 *)queue + queue_size);
553 		queue->kernel_if->u.h.page =
554 			&queue->kernel_if->u.h.header_page[1];
555 	}
556 
557 	return queue;
558 }
559 
560 /*
561  * Frees kernel memory for a given queue (header plus translation
562  * structure).
563  */
qp_host_free_queue(struct vmci_queue * queue,u64 queue_size)564 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
565 {
566 	kfree(queue);
567 }
568 
569 /*
570  * Initialize the mutex for the pair of queues.  This mutex is used to
571  * protect the q_header and the buffer from changing out from under any
572  * users of either queue.  Of course, it's only any good if the mutexes
573  * are actually acquired.  Queue structure must lie on non-paged memory
574  * or we cannot guarantee access to the mutex.
575  */
qp_init_queue_mutex(struct vmci_queue * produce_q,struct vmci_queue * consume_q)576 static void qp_init_queue_mutex(struct vmci_queue *produce_q,
577 				struct vmci_queue *consume_q)
578 {
579 	/*
580 	 * Only the host queue has shared state - the guest queues do not
581 	 * need to synchronize access using a queue mutex.
582 	 */
583 
584 	if (produce_q->kernel_if->host) {
585 		produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
586 		consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
587 		mutex_init(produce_q->kernel_if->mutex);
588 	}
589 }
590 
591 /*
592  * Cleans up the mutex for the pair of queues.
593  */
qp_cleanup_queue_mutex(struct vmci_queue * produce_q,struct vmci_queue * consume_q)594 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
595 				   struct vmci_queue *consume_q)
596 {
597 	if (produce_q->kernel_if->host) {
598 		produce_q->kernel_if->mutex = NULL;
599 		consume_q->kernel_if->mutex = NULL;
600 	}
601 }
602 
603 /*
604  * Acquire the mutex for the queue.  Note that the produce_q and
605  * the consume_q share a mutex.  So, only one of the two need to
606  * be passed in to this routine.  Either will work just fine.
607  */
qp_acquire_queue_mutex(struct vmci_queue * queue)608 static void qp_acquire_queue_mutex(struct vmci_queue *queue)
609 {
610 	if (queue->kernel_if->host)
611 		mutex_lock(queue->kernel_if->mutex);
612 }
613 
614 /*
615  * Release the mutex for the queue.  Note that the produce_q and
616  * the consume_q share a mutex.  So, only one of the two need to
617  * be passed in to this routine.  Either will work just fine.
618  */
qp_release_queue_mutex(struct vmci_queue * queue)619 static void qp_release_queue_mutex(struct vmci_queue *queue)
620 {
621 	if (queue->kernel_if->host)
622 		mutex_unlock(queue->kernel_if->mutex);
623 }
624 
625 /*
626  * Helper function to release pages in the PageStoreAttachInfo
627  * previously obtained using get_user_pages.
628  */
qp_release_pages(struct page ** pages,u64 num_pages,bool dirty)629 static void qp_release_pages(struct page **pages,
630 			     u64 num_pages, bool dirty)
631 {
632 	int i;
633 
634 	for (i = 0; i < num_pages; i++) {
635 		if (dirty)
636 			set_page_dirty_lock(pages[i]);
637 
638 		put_page(pages[i]);
639 		pages[i] = NULL;
640 	}
641 }
642 
643 /*
644  * Lock the user pages referenced by the {produce,consume}Buffer
645  * struct into memory and populate the {produce,consume}Pages
646  * arrays in the attach structure with them.
647  */
qp_host_get_user_memory(u64 produce_uva,u64 consume_uva,struct vmci_queue * produce_q,struct vmci_queue * consume_q)648 static int qp_host_get_user_memory(u64 produce_uva,
649 				   u64 consume_uva,
650 				   struct vmci_queue *produce_q,
651 				   struct vmci_queue *consume_q)
652 {
653 	int retval;
654 	int err = VMCI_SUCCESS;
655 
656 	retval = get_user_pages_fast((uintptr_t) produce_uva,
657 				     produce_q->kernel_if->num_pages,
658 				     FOLL_WRITE,
659 				     produce_q->kernel_if->u.h.header_page);
660 	if (retval < (int)produce_q->kernel_if->num_pages) {
661 		pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
662 			retval);
663 		if (retval > 0)
664 			qp_release_pages(produce_q->kernel_if->u.h.header_page,
665 					retval, false);
666 		err = VMCI_ERROR_NO_MEM;
667 		goto out;
668 	}
669 
670 	retval = get_user_pages_fast((uintptr_t) consume_uva,
671 				     consume_q->kernel_if->num_pages,
672 				     FOLL_WRITE,
673 				     consume_q->kernel_if->u.h.header_page);
674 	if (retval < (int)consume_q->kernel_if->num_pages) {
675 		pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
676 			retval);
677 		if (retval > 0)
678 			qp_release_pages(consume_q->kernel_if->u.h.header_page,
679 					retval, false);
680 		qp_release_pages(produce_q->kernel_if->u.h.header_page,
681 				 produce_q->kernel_if->num_pages, false);
682 		err = VMCI_ERROR_NO_MEM;
683 	}
684 
685  out:
686 	return err;
687 }
688 
689 /*
690  * Registers the specification of the user pages used for backing a queue
691  * pair. Enough information to map in pages is stored in the OS specific
692  * part of the struct vmci_queue structure.
693  */
qp_host_register_user_memory(struct vmci_qp_page_store * page_store,struct vmci_queue * produce_q,struct vmci_queue * consume_q)694 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
695 					struct vmci_queue *produce_q,
696 					struct vmci_queue *consume_q)
697 {
698 	u64 produce_uva;
699 	u64 consume_uva;
700 
701 	/*
702 	 * The new style and the old style mapping only differs in
703 	 * that we either get a single or two UVAs, so we split the
704 	 * single UVA range at the appropriate spot.
705 	 */
706 	produce_uva = page_store->pages;
707 	consume_uva = page_store->pages +
708 	    produce_q->kernel_if->num_pages * PAGE_SIZE;
709 	return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
710 				       consume_q);
711 }
712 
713 /*
714  * Releases and removes the references to user pages stored in the attach
715  * struct.  Pages are released from the page cache and may become
716  * swappable again.
717  */
qp_host_unregister_user_memory(struct vmci_queue * produce_q,struct vmci_queue * consume_q)718 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
719 					   struct vmci_queue *consume_q)
720 {
721 	qp_release_pages(produce_q->kernel_if->u.h.header_page,
722 			 produce_q->kernel_if->num_pages, true);
723 	memset(produce_q->kernel_if->u.h.header_page, 0,
724 	       sizeof(*produce_q->kernel_if->u.h.header_page) *
725 	       produce_q->kernel_if->num_pages);
726 	qp_release_pages(consume_q->kernel_if->u.h.header_page,
727 			 consume_q->kernel_if->num_pages, true);
728 	memset(consume_q->kernel_if->u.h.header_page, 0,
729 	       sizeof(*consume_q->kernel_if->u.h.header_page) *
730 	       consume_q->kernel_if->num_pages);
731 }
732 
733 /*
734  * Once qp_host_register_user_memory has been performed on a
735  * queue, the queue pair headers can be mapped into the
736  * kernel. Once mapped, they must be unmapped with
737  * qp_host_unmap_queues prior to calling
738  * qp_host_unregister_user_memory.
739  * Pages are pinned.
740  */
qp_host_map_queues(struct vmci_queue * produce_q,struct vmci_queue * consume_q)741 static int qp_host_map_queues(struct vmci_queue *produce_q,
742 			      struct vmci_queue *consume_q)
743 {
744 	int result;
745 
746 	if (!produce_q->q_header || !consume_q->q_header) {
747 		struct page *headers[2];
748 
749 		if (produce_q->q_header != consume_q->q_header)
750 			return VMCI_ERROR_QUEUEPAIR_MISMATCH;
751 
752 		if (produce_q->kernel_if->u.h.header_page == NULL ||
753 		    *produce_q->kernel_if->u.h.header_page == NULL)
754 			return VMCI_ERROR_UNAVAILABLE;
755 
756 		headers[0] = *produce_q->kernel_if->u.h.header_page;
757 		headers[1] = *consume_q->kernel_if->u.h.header_page;
758 
759 		produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
760 		if (produce_q->q_header != NULL) {
761 			consume_q->q_header =
762 			    (struct vmci_queue_header *)((u8 *)
763 							 produce_q->q_header +
764 							 PAGE_SIZE);
765 			result = VMCI_SUCCESS;
766 		} else {
767 			pr_warn("vmap failed\n");
768 			result = VMCI_ERROR_NO_MEM;
769 		}
770 	} else {
771 		result = VMCI_SUCCESS;
772 	}
773 
774 	return result;
775 }
776 
777 /*
778  * Unmaps previously mapped queue pair headers from the kernel.
779  * Pages are unpinned.
780  */
qp_host_unmap_queues(u32 gid,struct vmci_queue * produce_q,struct vmci_queue * consume_q)781 static int qp_host_unmap_queues(u32 gid,
782 				struct vmci_queue *produce_q,
783 				struct vmci_queue *consume_q)
784 {
785 	if (produce_q->q_header) {
786 		if (produce_q->q_header < consume_q->q_header)
787 			vunmap(produce_q->q_header);
788 		else
789 			vunmap(consume_q->q_header);
790 
791 		produce_q->q_header = NULL;
792 		consume_q->q_header = NULL;
793 	}
794 
795 	return VMCI_SUCCESS;
796 }
797 
798 /*
799  * Finds the entry in the list corresponding to a given handle. Assumes
800  * that the list is locked.
801  */
qp_list_find(struct qp_list * qp_list,struct vmci_handle handle)802 static struct qp_entry *qp_list_find(struct qp_list *qp_list,
803 				     struct vmci_handle handle)
804 {
805 	struct qp_entry *entry;
806 
807 	if (vmci_handle_is_invalid(handle))
808 		return NULL;
809 
810 	list_for_each_entry(entry, &qp_list->head, list_item) {
811 		if (vmci_handle_is_equal(entry->handle, handle))
812 			return entry;
813 	}
814 
815 	return NULL;
816 }
817 
818 /*
819  * Finds the entry in the list corresponding to a given handle.
820  */
821 static struct qp_guest_endpoint *
qp_guest_handle_to_entry(struct vmci_handle handle)822 qp_guest_handle_to_entry(struct vmci_handle handle)
823 {
824 	struct qp_guest_endpoint *entry;
825 	struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
826 
827 	entry = qp ? container_of(
828 		qp, struct qp_guest_endpoint, qp) : NULL;
829 	return entry;
830 }
831 
832 /*
833  * Finds the entry in the list corresponding to a given handle.
834  */
835 static struct qp_broker_entry *
qp_broker_handle_to_entry(struct vmci_handle handle)836 qp_broker_handle_to_entry(struct vmci_handle handle)
837 {
838 	struct qp_broker_entry *entry;
839 	struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
840 
841 	entry = qp ? container_of(
842 		qp, struct qp_broker_entry, qp) : NULL;
843 	return entry;
844 }
845 
846 /*
847  * Dispatches a queue pair event message directly into the local event
848  * queue.
849  */
qp_notify_peer_local(bool attach,struct vmci_handle handle)850 static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
851 {
852 	u32 context_id = vmci_get_context_id();
853 	struct vmci_event_qp ev;
854 
855 	ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
856 	ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
857 					  VMCI_CONTEXT_RESOURCE_ID);
858 	ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
859 	ev.msg.event_data.event =
860 	    attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
861 	ev.payload.peer_id = context_id;
862 	ev.payload.handle = handle;
863 
864 	return vmci_event_dispatch(&ev.msg.hdr);
865 }
866 
867 /*
868  * Allocates and initializes a qp_guest_endpoint structure.
869  * Allocates a queue_pair rid (and handle) iff the given entry has
870  * an invalid handle.  0 through VMCI_RESERVED_RESOURCE_ID_MAX
871  * are reserved handles.  Assumes that the QP list mutex is held
872  * by the caller.
873  */
874 static struct qp_guest_endpoint *
qp_guest_endpoint_create(struct vmci_handle handle,u32 peer,u32 flags,u64 produce_size,u64 consume_size,void * produce_q,void * consume_q)875 qp_guest_endpoint_create(struct vmci_handle handle,
876 			 u32 peer,
877 			 u32 flags,
878 			 u64 produce_size,
879 			 u64 consume_size,
880 			 void *produce_q,
881 			 void *consume_q)
882 {
883 	int result;
884 	struct qp_guest_endpoint *entry;
885 	/* One page each for the queue headers. */
886 	const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
887 	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
888 
889 	if (vmci_handle_is_invalid(handle)) {
890 		u32 context_id = vmci_get_context_id();
891 
892 		handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
893 	}
894 
895 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
896 	if (entry) {
897 		entry->qp.peer = peer;
898 		entry->qp.flags = flags;
899 		entry->qp.produce_size = produce_size;
900 		entry->qp.consume_size = consume_size;
901 		entry->qp.ref_count = 0;
902 		entry->num_ppns = num_ppns;
903 		entry->produce_q = produce_q;
904 		entry->consume_q = consume_q;
905 		INIT_LIST_HEAD(&entry->qp.list_item);
906 
907 		/* Add resource obj */
908 		result = vmci_resource_add(&entry->resource,
909 					   VMCI_RESOURCE_TYPE_QPAIR_GUEST,
910 					   handle);
911 		entry->qp.handle = vmci_resource_handle(&entry->resource);
912 		if ((result != VMCI_SUCCESS) ||
913 		    qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
914 			pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
915 				handle.context, handle.resource, result);
916 			kfree(entry);
917 			entry = NULL;
918 		}
919 	}
920 	return entry;
921 }
922 
923 /*
924  * Frees a qp_guest_endpoint structure.
925  */
qp_guest_endpoint_destroy(struct qp_guest_endpoint * entry)926 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
927 {
928 	qp_free_ppn_set(&entry->ppn_set);
929 	qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
930 	qp_free_queue(entry->produce_q, entry->qp.produce_size);
931 	qp_free_queue(entry->consume_q, entry->qp.consume_size);
932 	/* Unlink from resource hash table and free callback */
933 	vmci_resource_remove(&entry->resource);
934 
935 	kfree(entry);
936 }
937 
938 /*
939  * Helper to make a queue_pairAlloc hypercall when the driver is
940  * supporting a guest device.
941  */
qp_alloc_hypercall(const struct qp_guest_endpoint * entry)942 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
943 {
944 	struct vmci_qp_alloc_msg *alloc_msg;
945 	size_t msg_size;
946 	size_t ppn_size;
947 	int result;
948 
949 	if (!entry || entry->num_ppns <= 2)
950 		return VMCI_ERROR_INVALID_ARGS;
951 
952 	ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
953 	msg_size = sizeof(*alloc_msg) +
954 	    (size_t) entry->num_ppns * ppn_size;
955 	alloc_msg = kmalloc(msg_size, GFP_KERNEL);
956 	if (!alloc_msg)
957 		return VMCI_ERROR_NO_MEM;
958 
959 	alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
960 					      VMCI_QUEUEPAIR_ALLOC);
961 	alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
962 	alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
963 	alloc_msg->handle = entry->qp.handle;
964 	alloc_msg->peer = entry->qp.peer;
965 	alloc_msg->flags = entry->qp.flags;
966 	alloc_msg->produce_size = entry->qp.produce_size;
967 	alloc_msg->consume_size = entry->qp.consume_size;
968 	alloc_msg->num_ppns = entry->num_ppns;
969 
970 	result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
971 				     &entry->ppn_set);
972 	if (result == VMCI_SUCCESS)
973 		result = vmci_send_datagram(&alloc_msg->hdr);
974 
975 	kfree(alloc_msg);
976 
977 	return result;
978 }
979 
980 /*
981  * Helper to make a queue_pairDetach hypercall when the driver is
982  * supporting a guest device.
983  */
qp_detatch_hypercall(struct vmci_handle handle)984 static int qp_detatch_hypercall(struct vmci_handle handle)
985 {
986 	struct vmci_qp_detach_msg detach_msg;
987 
988 	detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
989 					      VMCI_QUEUEPAIR_DETACH);
990 	detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
991 	detach_msg.hdr.payload_size = sizeof(handle);
992 	detach_msg.handle = handle;
993 
994 	return vmci_send_datagram(&detach_msg.hdr);
995 }
996 
997 /*
998  * Adds the given entry to the list. Assumes that the list is locked.
999  */
qp_list_add_entry(struct qp_list * qp_list,struct qp_entry * entry)1000 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1001 {
1002 	if (entry)
1003 		list_add(&entry->list_item, &qp_list->head);
1004 }
1005 
1006 /*
1007  * Removes the given entry from the list. Assumes that the list is locked.
1008  */
qp_list_remove_entry(struct qp_list * qp_list,struct qp_entry * entry)1009 static void qp_list_remove_entry(struct qp_list *qp_list,
1010 				 struct qp_entry *entry)
1011 {
1012 	if (entry)
1013 		list_del(&entry->list_item);
1014 }
1015 
1016 /*
1017  * Helper for VMCI queue_pair detach interface. Frees the physical
1018  * pages for the queue pair.
1019  */
qp_detatch_guest_work(struct vmci_handle handle)1020 static int qp_detatch_guest_work(struct vmci_handle handle)
1021 {
1022 	int result;
1023 	struct qp_guest_endpoint *entry;
1024 	u32 ref_count = ~0;	/* To avoid compiler warning below */
1025 
1026 	mutex_lock(&qp_guest_endpoints.mutex);
1027 
1028 	entry = qp_guest_handle_to_entry(handle);
1029 	if (!entry) {
1030 		mutex_unlock(&qp_guest_endpoints.mutex);
1031 		return VMCI_ERROR_NOT_FOUND;
1032 	}
1033 
1034 	if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1035 		result = VMCI_SUCCESS;
1036 
1037 		if (entry->qp.ref_count > 1) {
1038 			result = qp_notify_peer_local(false, handle);
1039 			/*
1040 			 * We can fail to notify a local queuepair
1041 			 * because we can't allocate.  We still want
1042 			 * to release the entry if that happens, so
1043 			 * don't bail out yet.
1044 			 */
1045 		}
1046 	} else {
1047 		result = qp_detatch_hypercall(handle);
1048 		if (result < VMCI_SUCCESS) {
1049 			/*
1050 			 * We failed to notify a non-local queuepair.
1051 			 * That other queuepair might still be
1052 			 * accessing the shared memory, so don't
1053 			 * release the entry yet.  It will get cleaned
1054 			 * up by VMCIqueue_pair_Exit() if necessary
1055 			 * (assuming we are going away, otherwise why
1056 			 * did this fail?).
1057 			 */
1058 
1059 			mutex_unlock(&qp_guest_endpoints.mutex);
1060 			return result;
1061 		}
1062 	}
1063 
1064 	/*
1065 	 * If we get here then we either failed to notify a local queuepair, or
1066 	 * we succeeded in all cases.  Release the entry if required.
1067 	 */
1068 
1069 	entry->qp.ref_count--;
1070 	if (entry->qp.ref_count == 0)
1071 		qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1072 
1073 	/* If we didn't remove the entry, this could change once we unlock. */
1074 	if (entry)
1075 		ref_count = entry->qp.ref_count;
1076 
1077 	mutex_unlock(&qp_guest_endpoints.mutex);
1078 
1079 	if (ref_count == 0)
1080 		qp_guest_endpoint_destroy(entry);
1081 
1082 	return result;
1083 }
1084 
1085 /*
1086  * This functions handles the actual allocation of a VMCI queue
1087  * pair guest endpoint. Allocates physical pages for the queue
1088  * pair. It makes OS dependent calls through generic wrappers.
1089  */
qp_alloc_guest_work(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags)1090 static int qp_alloc_guest_work(struct vmci_handle *handle,
1091 			       struct vmci_queue **produce_q,
1092 			       u64 produce_size,
1093 			       struct vmci_queue **consume_q,
1094 			       u64 consume_size,
1095 			       u32 peer,
1096 			       u32 flags,
1097 			       u32 priv_flags)
1098 {
1099 	const u64 num_produce_pages =
1100 	    DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1101 	const u64 num_consume_pages =
1102 	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1103 	void *my_produce_q = NULL;
1104 	void *my_consume_q = NULL;
1105 	int result;
1106 	struct qp_guest_endpoint *queue_pair_entry = NULL;
1107 
1108 	if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1109 		return VMCI_ERROR_NO_ACCESS;
1110 
1111 	mutex_lock(&qp_guest_endpoints.mutex);
1112 
1113 	queue_pair_entry = qp_guest_handle_to_entry(*handle);
1114 	if (queue_pair_entry) {
1115 		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1116 			/* Local attach case. */
1117 			if (queue_pair_entry->qp.ref_count > 1) {
1118 				pr_devel("Error attempting to attach more than once\n");
1119 				result = VMCI_ERROR_UNAVAILABLE;
1120 				goto error_keep_entry;
1121 			}
1122 
1123 			if (queue_pair_entry->qp.produce_size != consume_size ||
1124 			    queue_pair_entry->qp.consume_size !=
1125 			    produce_size ||
1126 			    queue_pair_entry->qp.flags !=
1127 			    (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1128 				pr_devel("Error mismatched queue pair in local attach\n");
1129 				result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1130 				goto error_keep_entry;
1131 			}
1132 
1133 			/*
1134 			 * Do a local attach.  We swap the consume and
1135 			 * produce queues for the attacher and deliver
1136 			 * an attach event.
1137 			 */
1138 			result = qp_notify_peer_local(true, *handle);
1139 			if (result < VMCI_SUCCESS)
1140 				goto error_keep_entry;
1141 
1142 			my_produce_q = queue_pair_entry->consume_q;
1143 			my_consume_q = queue_pair_entry->produce_q;
1144 			goto out;
1145 		}
1146 
1147 		result = VMCI_ERROR_ALREADY_EXISTS;
1148 		goto error_keep_entry;
1149 	}
1150 
1151 	my_produce_q = qp_alloc_queue(produce_size, flags);
1152 	if (!my_produce_q) {
1153 		pr_warn("Error allocating pages for produce queue\n");
1154 		result = VMCI_ERROR_NO_MEM;
1155 		goto error;
1156 	}
1157 
1158 	my_consume_q = qp_alloc_queue(consume_size, flags);
1159 	if (!my_consume_q) {
1160 		pr_warn("Error allocating pages for consume queue\n");
1161 		result = VMCI_ERROR_NO_MEM;
1162 		goto error;
1163 	}
1164 
1165 	queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1166 						    produce_size, consume_size,
1167 						    my_produce_q, my_consume_q);
1168 	if (!queue_pair_entry) {
1169 		pr_warn("Error allocating memory in %s\n", __func__);
1170 		result = VMCI_ERROR_NO_MEM;
1171 		goto error;
1172 	}
1173 
1174 	result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1175 				  num_consume_pages,
1176 				  &queue_pair_entry->ppn_set);
1177 	if (result < VMCI_SUCCESS) {
1178 		pr_warn("qp_alloc_ppn_set failed\n");
1179 		goto error;
1180 	}
1181 
1182 	/*
1183 	 * It's only necessary to notify the host if this queue pair will be
1184 	 * attached to from another context.
1185 	 */
1186 	if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1187 		/* Local create case. */
1188 		u32 context_id = vmci_get_context_id();
1189 
1190 		/*
1191 		 * Enforce similar checks on local queue pairs as we
1192 		 * do for regular ones.  The handle's context must
1193 		 * match the creator or attacher context id (here they
1194 		 * are both the current context id) and the
1195 		 * attach-only flag cannot exist during create.  We
1196 		 * also ensure specified peer is this context or an
1197 		 * invalid one.
1198 		 */
1199 		if (queue_pair_entry->qp.handle.context != context_id ||
1200 		    (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1201 		     queue_pair_entry->qp.peer != context_id)) {
1202 			result = VMCI_ERROR_NO_ACCESS;
1203 			goto error;
1204 		}
1205 
1206 		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1207 			result = VMCI_ERROR_NOT_FOUND;
1208 			goto error;
1209 		}
1210 	} else {
1211 		result = qp_alloc_hypercall(queue_pair_entry);
1212 		if (result < VMCI_SUCCESS) {
1213 			pr_warn("qp_alloc_hypercall result = %d\n", result);
1214 			goto error;
1215 		}
1216 	}
1217 
1218 	qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1219 			    (struct vmci_queue *)my_consume_q);
1220 
1221 	qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1222 
1223  out:
1224 	queue_pair_entry->qp.ref_count++;
1225 	*handle = queue_pair_entry->qp.handle;
1226 	*produce_q = (struct vmci_queue *)my_produce_q;
1227 	*consume_q = (struct vmci_queue *)my_consume_q;
1228 
1229 	/*
1230 	 * We should initialize the queue pair header pages on a local
1231 	 * queue pair create.  For non-local queue pairs, the
1232 	 * hypervisor initializes the header pages in the create step.
1233 	 */
1234 	if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1235 	    queue_pair_entry->qp.ref_count == 1) {
1236 		vmci_q_header_init((*produce_q)->q_header, *handle);
1237 		vmci_q_header_init((*consume_q)->q_header, *handle);
1238 	}
1239 
1240 	mutex_unlock(&qp_guest_endpoints.mutex);
1241 
1242 	return VMCI_SUCCESS;
1243 
1244  error:
1245 	mutex_unlock(&qp_guest_endpoints.mutex);
1246 	if (queue_pair_entry) {
1247 		/* The queues will be freed inside the destroy routine. */
1248 		qp_guest_endpoint_destroy(queue_pair_entry);
1249 	} else {
1250 		qp_free_queue(my_produce_q, produce_size);
1251 		qp_free_queue(my_consume_q, consume_size);
1252 	}
1253 	return result;
1254 
1255  error_keep_entry:
1256 	/* This path should only be used when an existing entry was found. */
1257 	mutex_unlock(&qp_guest_endpoints.mutex);
1258 	return result;
1259 }
1260 
1261 /*
1262  * The first endpoint issuing a queue pair allocation will create the state
1263  * of the queue pair in the queue pair broker.
1264  *
1265  * If the creator is a guest, it will associate a VMX virtual address range
1266  * with the queue pair as specified by the page_store. For compatibility with
1267  * older VMX'en, that would use a separate step to set the VMX virtual
1268  * address range, the virtual address range can be registered later using
1269  * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1270  * used.
1271  *
1272  * If the creator is the host, a page_store of NULL should be used as well,
1273  * since the host is not able to supply a page store for the queue pair.
1274  *
1275  * For older VMX and host callers, the queue pair will be created in the
1276  * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1277  * created in VMCOQPB_CREATED_MEM state.
1278  */
qp_broker_create(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent)1279 static int qp_broker_create(struct vmci_handle handle,
1280 			    u32 peer,
1281 			    u32 flags,
1282 			    u32 priv_flags,
1283 			    u64 produce_size,
1284 			    u64 consume_size,
1285 			    struct vmci_qp_page_store *page_store,
1286 			    struct vmci_ctx *context,
1287 			    vmci_event_release_cb wakeup_cb,
1288 			    void *client_data, struct qp_broker_entry **ent)
1289 {
1290 	struct qp_broker_entry *entry = NULL;
1291 	const u32 context_id = vmci_ctx_get_id(context);
1292 	bool is_local = flags & VMCI_QPFLAG_LOCAL;
1293 	int result;
1294 	u64 guest_produce_size;
1295 	u64 guest_consume_size;
1296 
1297 	/* Do not create if the caller asked not to. */
1298 	if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1299 		return VMCI_ERROR_NOT_FOUND;
1300 
1301 	/*
1302 	 * Creator's context ID should match handle's context ID or the creator
1303 	 * must allow the context in handle's context ID as the "peer".
1304 	 */
1305 	if (handle.context != context_id && handle.context != peer)
1306 		return VMCI_ERROR_NO_ACCESS;
1307 
1308 	if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1309 		return VMCI_ERROR_DST_UNREACHABLE;
1310 
1311 	/*
1312 	 * Creator's context ID for local queue pairs should match the
1313 	 * peer, if a peer is specified.
1314 	 */
1315 	if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1316 		return VMCI_ERROR_NO_ACCESS;
1317 
1318 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1319 	if (!entry)
1320 		return VMCI_ERROR_NO_MEM;
1321 
1322 	if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1323 		/*
1324 		 * The queue pair broker entry stores values from the guest
1325 		 * point of view, so a creating host side endpoint should swap
1326 		 * produce and consume values -- unless it is a local queue
1327 		 * pair, in which case no swapping is necessary, since the local
1328 		 * attacher will swap queues.
1329 		 */
1330 
1331 		guest_produce_size = consume_size;
1332 		guest_consume_size = produce_size;
1333 	} else {
1334 		guest_produce_size = produce_size;
1335 		guest_consume_size = consume_size;
1336 	}
1337 
1338 	entry->qp.handle = handle;
1339 	entry->qp.peer = peer;
1340 	entry->qp.flags = flags;
1341 	entry->qp.produce_size = guest_produce_size;
1342 	entry->qp.consume_size = guest_consume_size;
1343 	entry->qp.ref_count = 1;
1344 	entry->create_id = context_id;
1345 	entry->attach_id = VMCI_INVALID_ID;
1346 	entry->state = VMCIQPB_NEW;
1347 	entry->require_trusted_attach =
1348 	    !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1349 	entry->created_by_trusted =
1350 	    !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1351 	entry->vmci_page_files = false;
1352 	entry->wakeup_cb = wakeup_cb;
1353 	entry->client_data = client_data;
1354 	entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1355 	if (entry->produce_q == NULL) {
1356 		result = VMCI_ERROR_NO_MEM;
1357 		goto error;
1358 	}
1359 	entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1360 	if (entry->consume_q == NULL) {
1361 		result = VMCI_ERROR_NO_MEM;
1362 		goto error;
1363 	}
1364 
1365 	qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1366 
1367 	INIT_LIST_HEAD(&entry->qp.list_item);
1368 
1369 	if (is_local) {
1370 		u8 *tmp;
1371 
1372 		entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1373 					   PAGE_SIZE, GFP_KERNEL);
1374 		if (entry->local_mem == NULL) {
1375 			result = VMCI_ERROR_NO_MEM;
1376 			goto error;
1377 		}
1378 		entry->state = VMCIQPB_CREATED_MEM;
1379 		entry->produce_q->q_header = entry->local_mem;
1380 		tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1381 		    (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1382 		entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1383 	} else if (page_store) {
1384 		/*
1385 		 * The VMX already initialized the queue pair headers, so no
1386 		 * need for the kernel side to do that.
1387 		 */
1388 		result = qp_host_register_user_memory(page_store,
1389 						      entry->produce_q,
1390 						      entry->consume_q);
1391 		if (result < VMCI_SUCCESS)
1392 			goto error;
1393 
1394 		entry->state = VMCIQPB_CREATED_MEM;
1395 	} else {
1396 		/*
1397 		 * A create without a page_store may be either a host
1398 		 * side create (in which case we are waiting for the
1399 		 * guest side to supply the memory) or an old style
1400 		 * queue pair create (in which case we will expect a
1401 		 * set page store call as the next step).
1402 		 */
1403 		entry->state = VMCIQPB_CREATED_NO_MEM;
1404 	}
1405 
1406 	qp_list_add_entry(&qp_broker_list, &entry->qp);
1407 	if (ent != NULL)
1408 		*ent = entry;
1409 
1410 	/* Add to resource obj */
1411 	result = vmci_resource_add(&entry->resource,
1412 				   VMCI_RESOURCE_TYPE_QPAIR_HOST,
1413 				   handle);
1414 	if (result != VMCI_SUCCESS) {
1415 		pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1416 			handle.context, handle.resource, result);
1417 		goto error;
1418 	}
1419 
1420 	entry->qp.handle = vmci_resource_handle(&entry->resource);
1421 	if (is_local) {
1422 		vmci_q_header_init(entry->produce_q->q_header,
1423 				   entry->qp.handle);
1424 		vmci_q_header_init(entry->consume_q->q_header,
1425 				   entry->qp.handle);
1426 	}
1427 
1428 	vmci_ctx_qp_create(context, entry->qp.handle);
1429 
1430 	return VMCI_SUCCESS;
1431 
1432  error:
1433 	if (entry != NULL) {
1434 		qp_host_free_queue(entry->produce_q, guest_produce_size);
1435 		qp_host_free_queue(entry->consume_q, guest_consume_size);
1436 		kfree(entry);
1437 	}
1438 
1439 	return result;
1440 }
1441 
1442 /*
1443  * Enqueues an event datagram to notify the peer VM attached to
1444  * the given queue pair handle about attach/detach event by the
1445  * given VM.  Returns Payload size of datagram enqueued on
1446  * success, error code otherwise.
1447  */
qp_notify_peer(bool attach,struct vmci_handle handle,u32 my_id,u32 peer_id)1448 static int qp_notify_peer(bool attach,
1449 			  struct vmci_handle handle,
1450 			  u32 my_id,
1451 			  u32 peer_id)
1452 {
1453 	int rv;
1454 	struct vmci_event_qp ev;
1455 
1456 	if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1457 	    peer_id == VMCI_INVALID_ID)
1458 		return VMCI_ERROR_INVALID_ARGS;
1459 
1460 	/*
1461 	 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1462 	 * number of pending events from the hypervisor to a given VM
1463 	 * otherwise a rogue VM could do an arbitrary number of attach
1464 	 * and detach operations causing memory pressure in the host
1465 	 * kernel.
1466 	 */
1467 
1468 	ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1469 	ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1470 					  VMCI_CONTEXT_RESOURCE_ID);
1471 	ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1472 	ev.msg.event_data.event = attach ?
1473 	    VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1474 	ev.payload.handle = handle;
1475 	ev.payload.peer_id = my_id;
1476 
1477 	rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1478 				    &ev.msg.hdr, false);
1479 	if (rv < VMCI_SUCCESS)
1480 		pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1481 			attach ? "ATTACH" : "DETACH", peer_id);
1482 
1483 	return rv;
1484 }
1485 
1486 /*
1487  * The second endpoint issuing a queue pair allocation will attach to
1488  * the queue pair registered with the queue pair broker.
1489  *
1490  * If the attacher is a guest, it will associate a VMX virtual address
1491  * range with the queue pair as specified by the page_store. At this
1492  * point, the already attach host endpoint may start using the queue
1493  * pair, and an attach event is sent to it. For compatibility with
1494  * older VMX'en, that used a separate step to set the VMX virtual
1495  * address range, the virtual address range can be registered later
1496  * using vmci_qp_broker_set_page_store. In that case, a page_store of
1497  * NULL should be used, and the attach event will be generated once
1498  * the actual page store has been set.
1499  *
1500  * If the attacher is the host, a page_store of NULL should be used as
1501  * well, since the page store information is already set by the guest.
1502  *
1503  * For new VMX and host callers, the queue pair will be moved to the
1504  * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1505  * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1506  */
qp_broker_attach(struct qp_broker_entry * entry,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent)1507 static int qp_broker_attach(struct qp_broker_entry *entry,
1508 			    u32 peer,
1509 			    u32 flags,
1510 			    u32 priv_flags,
1511 			    u64 produce_size,
1512 			    u64 consume_size,
1513 			    struct vmci_qp_page_store *page_store,
1514 			    struct vmci_ctx *context,
1515 			    vmci_event_release_cb wakeup_cb,
1516 			    void *client_data,
1517 			    struct qp_broker_entry **ent)
1518 {
1519 	const u32 context_id = vmci_ctx_get_id(context);
1520 	bool is_local = flags & VMCI_QPFLAG_LOCAL;
1521 	int result;
1522 
1523 	if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1524 	    entry->state != VMCIQPB_CREATED_MEM)
1525 		return VMCI_ERROR_UNAVAILABLE;
1526 
1527 	if (is_local) {
1528 		if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1529 		    context_id != entry->create_id) {
1530 			return VMCI_ERROR_INVALID_ARGS;
1531 		}
1532 	} else if (context_id == entry->create_id ||
1533 		   context_id == entry->attach_id) {
1534 		return VMCI_ERROR_ALREADY_EXISTS;
1535 	}
1536 
1537 	if (VMCI_CONTEXT_IS_VM(context_id) &&
1538 	    VMCI_CONTEXT_IS_VM(entry->create_id))
1539 		return VMCI_ERROR_DST_UNREACHABLE;
1540 
1541 	/*
1542 	 * If we are attaching from a restricted context then the queuepair
1543 	 * must have been created by a trusted endpoint.
1544 	 */
1545 	if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1546 	    !entry->created_by_trusted)
1547 		return VMCI_ERROR_NO_ACCESS;
1548 
1549 	/*
1550 	 * If we are attaching to a queuepair that was created by a restricted
1551 	 * context then we must be trusted.
1552 	 */
1553 	if (entry->require_trusted_attach &&
1554 	    (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1555 		return VMCI_ERROR_NO_ACCESS;
1556 
1557 	/*
1558 	 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1559 	 * control check is not performed.
1560 	 */
1561 	if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1562 		return VMCI_ERROR_NO_ACCESS;
1563 
1564 	if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1565 		/*
1566 		 * Do not attach if the caller doesn't support Host Queue Pairs
1567 		 * and a host created this queue pair.
1568 		 */
1569 
1570 		if (!vmci_ctx_supports_host_qp(context))
1571 			return VMCI_ERROR_INVALID_RESOURCE;
1572 
1573 	} else if (context_id == VMCI_HOST_CONTEXT_ID) {
1574 		struct vmci_ctx *create_context;
1575 		bool supports_host_qp;
1576 
1577 		/*
1578 		 * Do not attach a host to a user created queue pair if that
1579 		 * user doesn't support host queue pair end points.
1580 		 */
1581 
1582 		create_context = vmci_ctx_get(entry->create_id);
1583 		supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1584 		vmci_ctx_put(create_context);
1585 
1586 		if (!supports_host_qp)
1587 			return VMCI_ERROR_INVALID_RESOURCE;
1588 	}
1589 
1590 	if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1591 		return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1592 
1593 	if (context_id != VMCI_HOST_CONTEXT_ID) {
1594 		/*
1595 		 * The queue pair broker entry stores values from the guest
1596 		 * point of view, so an attaching guest should match the values
1597 		 * stored in the entry.
1598 		 */
1599 
1600 		if (entry->qp.produce_size != produce_size ||
1601 		    entry->qp.consume_size != consume_size) {
1602 			return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1603 		}
1604 	} else if (entry->qp.produce_size != consume_size ||
1605 		   entry->qp.consume_size != produce_size) {
1606 		return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1607 	}
1608 
1609 	if (context_id != VMCI_HOST_CONTEXT_ID) {
1610 		/*
1611 		 * If a guest attached to a queue pair, it will supply
1612 		 * the backing memory.  If this is a pre NOVMVM vmx,
1613 		 * the backing memory will be supplied by calling
1614 		 * vmci_qp_broker_set_page_store() following the
1615 		 * return of the vmci_qp_broker_alloc() call. If it is
1616 		 * a vmx of version NOVMVM or later, the page store
1617 		 * must be supplied as part of the
1618 		 * vmci_qp_broker_alloc call.  Under all circumstances
1619 		 * must the initially created queue pair not have any
1620 		 * memory associated with it already.
1621 		 */
1622 
1623 		if (entry->state != VMCIQPB_CREATED_NO_MEM)
1624 			return VMCI_ERROR_INVALID_ARGS;
1625 
1626 		if (page_store != NULL) {
1627 			/*
1628 			 * Patch up host state to point to guest
1629 			 * supplied memory. The VMX already
1630 			 * initialized the queue pair headers, so no
1631 			 * need for the kernel side to do that.
1632 			 */
1633 
1634 			result = qp_host_register_user_memory(page_store,
1635 							      entry->produce_q,
1636 							      entry->consume_q);
1637 			if (result < VMCI_SUCCESS)
1638 				return result;
1639 
1640 			entry->state = VMCIQPB_ATTACHED_MEM;
1641 		} else {
1642 			entry->state = VMCIQPB_ATTACHED_NO_MEM;
1643 		}
1644 	} else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1645 		/*
1646 		 * The host side is attempting to attach to a queue
1647 		 * pair that doesn't have any memory associated with
1648 		 * it. This must be a pre NOVMVM vmx that hasn't set
1649 		 * the page store information yet, or a quiesced VM.
1650 		 */
1651 
1652 		return VMCI_ERROR_UNAVAILABLE;
1653 	} else {
1654 		/* The host side has successfully attached to a queue pair. */
1655 		entry->state = VMCIQPB_ATTACHED_MEM;
1656 	}
1657 
1658 	if (entry->state == VMCIQPB_ATTACHED_MEM) {
1659 		result =
1660 		    qp_notify_peer(true, entry->qp.handle, context_id,
1661 				   entry->create_id);
1662 		if (result < VMCI_SUCCESS)
1663 			pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1664 				entry->create_id, entry->qp.handle.context,
1665 				entry->qp.handle.resource);
1666 	}
1667 
1668 	entry->attach_id = context_id;
1669 	entry->qp.ref_count++;
1670 	if (wakeup_cb) {
1671 		entry->wakeup_cb = wakeup_cb;
1672 		entry->client_data = client_data;
1673 	}
1674 
1675 	/*
1676 	 * When attaching to local queue pairs, the context already has
1677 	 * an entry tracking the queue pair, so don't add another one.
1678 	 */
1679 	if (!is_local)
1680 		vmci_ctx_qp_create(context, entry->qp.handle);
1681 
1682 	if (ent != NULL)
1683 		*ent = entry;
1684 
1685 	return VMCI_SUCCESS;
1686 }
1687 
1688 /*
1689  * queue_pair_Alloc for use when setting up queue pair endpoints
1690  * on the host.
1691  */
qp_broker_alloc(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context,vmci_event_release_cb wakeup_cb,void * client_data,struct qp_broker_entry ** ent,bool * swap)1692 static int qp_broker_alloc(struct vmci_handle handle,
1693 			   u32 peer,
1694 			   u32 flags,
1695 			   u32 priv_flags,
1696 			   u64 produce_size,
1697 			   u64 consume_size,
1698 			   struct vmci_qp_page_store *page_store,
1699 			   struct vmci_ctx *context,
1700 			   vmci_event_release_cb wakeup_cb,
1701 			   void *client_data,
1702 			   struct qp_broker_entry **ent,
1703 			   bool *swap)
1704 {
1705 	const u32 context_id = vmci_ctx_get_id(context);
1706 	bool create;
1707 	struct qp_broker_entry *entry = NULL;
1708 	bool is_local = flags & VMCI_QPFLAG_LOCAL;
1709 	int result;
1710 
1711 	if (vmci_handle_is_invalid(handle) ||
1712 	    (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1713 	    !(produce_size || consume_size) ||
1714 	    !context || context_id == VMCI_INVALID_ID ||
1715 	    handle.context == VMCI_INVALID_ID) {
1716 		return VMCI_ERROR_INVALID_ARGS;
1717 	}
1718 
1719 	if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1720 		return VMCI_ERROR_INVALID_ARGS;
1721 
1722 	/*
1723 	 * In the initial argument check, we ensure that non-vmkernel hosts
1724 	 * are not allowed to create local queue pairs.
1725 	 */
1726 
1727 	mutex_lock(&qp_broker_list.mutex);
1728 
1729 	if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1730 		pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1731 			 context_id, handle.context, handle.resource);
1732 		mutex_unlock(&qp_broker_list.mutex);
1733 		return VMCI_ERROR_ALREADY_EXISTS;
1734 	}
1735 
1736 	if (handle.resource != VMCI_INVALID_ID)
1737 		entry = qp_broker_handle_to_entry(handle);
1738 
1739 	if (!entry) {
1740 		create = true;
1741 		result =
1742 		    qp_broker_create(handle, peer, flags, priv_flags,
1743 				     produce_size, consume_size, page_store,
1744 				     context, wakeup_cb, client_data, ent);
1745 	} else {
1746 		create = false;
1747 		result =
1748 		    qp_broker_attach(entry, peer, flags, priv_flags,
1749 				     produce_size, consume_size, page_store,
1750 				     context, wakeup_cb, client_data, ent);
1751 	}
1752 
1753 	mutex_unlock(&qp_broker_list.mutex);
1754 
1755 	if (swap)
1756 		*swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1757 		    !(create && is_local);
1758 
1759 	return result;
1760 }
1761 
1762 /*
1763  * This function implements the kernel API for allocating a queue
1764  * pair.
1765  */
qp_alloc_host_work(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags,vmci_event_release_cb wakeup_cb,void * client_data)1766 static int qp_alloc_host_work(struct vmci_handle *handle,
1767 			      struct vmci_queue **produce_q,
1768 			      u64 produce_size,
1769 			      struct vmci_queue **consume_q,
1770 			      u64 consume_size,
1771 			      u32 peer,
1772 			      u32 flags,
1773 			      u32 priv_flags,
1774 			      vmci_event_release_cb wakeup_cb,
1775 			      void *client_data)
1776 {
1777 	struct vmci_handle new_handle;
1778 	struct vmci_ctx *context;
1779 	struct qp_broker_entry *entry;
1780 	int result;
1781 	bool swap;
1782 
1783 	if (vmci_handle_is_invalid(*handle)) {
1784 		new_handle = vmci_make_handle(
1785 			VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1786 	} else
1787 		new_handle = *handle;
1788 
1789 	context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1790 	entry = NULL;
1791 	result =
1792 	    qp_broker_alloc(new_handle, peer, flags, priv_flags,
1793 			    produce_size, consume_size, NULL, context,
1794 			    wakeup_cb, client_data, &entry, &swap);
1795 	if (result == VMCI_SUCCESS) {
1796 		if (swap) {
1797 			/*
1798 			 * If this is a local queue pair, the attacher
1799 			 * will swap around produce and consume
1800 			 * queues.
1801 			 */
1802 
1803 			*produce_q = entry->consume_q;
1804 			*consume_q = entry->produce_q;
1805 		} else {
1806 			*produce_q = entry->produce_q;
1807 			*consume_q = entry->consume_q;
1808 		}
1809 
1810 		*handle = vmci_resource_handle(&entry->resource);
1811 	} else {
1812 		*handle = VMCI_INVALID_HANDLE;
1813 		pr_devel("queue pair broker failed to alloc (result=%d)\n",
1814 			 result);
1815 	}
1816 	vmci_ctx_put(context);
1817 	return result;
1818 }
1819 
1820 /*
1821  * Allocates a VMCI queue_pair. Only checks validity of input
1822  * arguments. The real work is done in the host or guest
1823  * specific function.
1824  */
vmci_qp_alloc(struct vmci_handle * handle,struct vmci_queue ** produce_q,u64 produce_size,struct vmci_queue ** consume_q,u64 consume_size,u32 peer,u32 flags,u32 priv_flags,bool guest_endpoint,vmci_event_release_cb wakeup_cb,void * client_data)1825 int vmci_qp_alloc(struct vmci_handle *handle,
1826 		  struct vmci_queue **produce_q,
1827 		  u64 produce_size,
1828 		  struct vmci_queue **consume_q,
1829 		  u64 consume_size,
1830 		  u32 peer,
1831 		  u32 flags,
1832 		  u32 priv_flags,
1833 		  bool guest_endpoint,
1834 		  vmci_event_release_cb wakeup_cb,
1835 		  void *client_data)
1836 {
1837 	if (!handle || !produce_q || !consume_q ||
1838 	    (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1839 		return VMCI_ERROR_INVALID_ARGS;
1840 
1841 	if (guest_endpoint) {
1842 		return qp_alloc_guest_work(handle, produce_q,
1843 					   produce_size, consume_q,
1844 					   consume_size, peer,
1845 					   flags, priv_flags);
1846 	} else {
1847 		return qp_alloc_host_work(handle, produce_q,
1848 					  produce_size, consume_q,
1849 					  consume_size, peer, flags,
1850 					  priv_flags, wakeup_cb, client_data);
1851 	}
1852 }
1853 
1854 /*
1855  * This function implements the host kernel API for detaching from
1856  * a queue pair.
1857  */
qp_detatch_host_work(struct vmci_handle handle)1858 static int qp_detatch_host_work(struct vmci_handle handle)
1859 {
1860 	int result;
1861 	struct vmci_ctx *context;
1862 
1863 	context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1864 
1865 	result = vmci_qp_broker_detach(handle, context);
1866 
1867 	vmci_ctx_put(context);
1868 	return result;
1869 }
1870 
1871 /*
1872  * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1873  * Real work is done in the host or guest specific function.
1874  */
qp_detatch(struct vmci_handle handle,bool guest_endpoint)1875 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1876 {
1877 	if (vmci_handle_is_invalid(handle))
1878 		return VMCI_ERROR_INVALID_ARGS;
1879 
1880 	if (guest_endpoint)
1881 		return qp_detatch_guest_work(handle);
1882 	else
1883 		return qp_detatch_host_work(handle);
1884 }
1885 
1886 /*
1887  * Returns the entry from the head of the list. Assumes that the list is
1888  * locked.
1889  */
qp_list_get_head(struct qp_list * qp_list)1890 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1891 {
1892 	if (!list_empty(&qp_list->head)) {
1893 		struct qp_entry *entry =
1894 		    list_first_entry(&qp_list->head, struct qp_entry,
1895 				     list_item);
1896 		return entry;
1897 	}
1898 
1899 	return NULL;
1900 }
1901 
vmci_qp_broker_exit(void)1902 void vmci_qp_broker_exit(void)
1903 {
1904 	struct qp_entry *entry;
1905 	struct qp_broker_entry *be;
1906 
1907 	mutex_lock(&qp_broker_list.mutex);
1908 
1909 	while ((entry = qp_list_get_head(&qp_broker_list))) {
1910 		be = (struct qp_broker_entry *)entry;
1911 
1912 		qp_list_remove_entry(&qp_broker_list, entry);
1913 		kfree(be);
1914 	}
1915 
1916 	mutex_unlock(&qp_broker_list.mutex);
1917 }
1918 
1919 /*
1920  * Requests that a queue pair be allocated with the VMCI queue
1921  * pair broker. Allocates a queue pair entry if one does not
1922  * exist. Attaches to one if it exists, and retrieves the page
1923  * files backing that queue_pair.  Assumes that the queue pair
1924  * broker lock is held.
1925  */
vmci_qp_broker_alloc(struct vmci_handle handle,u32 peer,u32 flags,u32 priv_flags,u64 produce_size,u64 consume_size,struct vmci_qp_page_store * page_store,struct vmci_ctx * context)1926 int vmci_qp_broker_alloc(struct vmci_handle handle,
1927 			 u32 peer,
1928 			 u32 flags,
1929 			 u32 priv_flags,
1930 			 u64 produce_size,
1931 			 u64 consume_size,
1932 			 struct vmci_qp_page_store *page_store,
1933 			 struct vmci_ctx *context)
1934 {
1935 	return qp_broker_alloc(handle, peer, flags, priv_flags,
1936 			       produce_size, consume_size,
1937 			       page_store, context, NULL, NULL, NULL, NULL);
1938 }
1939 
1940 /*
1941  * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
1942  * step to add the UVAs of the VMX mapping of the queue pair. This function
1943  * provides backwards compatibility with such VMX'en, and takes care of
1944  * registering the page store for a queue pair previously allocated by the
1945  * VMX during create or attach. This function will move the queue pair state
1946  * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
1947  * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
1948  * attached state with memory, the queue pair is ready to be used by the
1949  * host peer, and an attached event will be generated.
1950  *
1951  * Assumes that the queue pair broker lock is held.
1952  *
1953  * This function is only used by the hosted platform, since there is no
1954  * issue with backwards compatibility for vmkernel.
1955  */
vmci_qp_broker_set_page_store(struct vmci_handle handle,u64 produce_uva,u64 consume_uva,struct vmci_ctx * context)1956 int vmci_qp_broker_set_page_store(struct vmci_handle handle,
1957 				  u64 produce_uva,
1958 				  u64 consume_uva,
1959 				  struct vmci_ctx *context)
1960 {
1961 	struct qp_broker_entry *entry;
1962 	int result;
1963 	const u32 context_id = vmci_ctx_get_id(context);
1964 
1965 	if (vmci_handle_is_invalid(handle) || !context ||
1966 	    context_id == VMCI_INVALID_ID)
1967 		return VMCI_ERROR_INVALID_ARGS;
1968 
1969 	/*
1970 	 * We only support guest to host queue pairs, so the VMX must
1971 	 * supply UVAs for the mapped page files.
1972 	 */
1973 
1974 	if (produce_uva == 0 || consume_uva == 0)
1975 		return VMCI_ERROR_INVALID_ARGS;
1976 
1977 	mutex_lock(&qp_broker_list.mutex);
1978 
1979 	if (!vmci_ctx_qp_exists(context, handle)) {
1980 		pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
1981 			context_id, handle.context, handle.resource);
1982 		result = VMCI_ERROR_NOT_FOUND;
1983 		goto out;
1984 	}
1985 
1986 	entry = qp_broker_handle_to_entry(handle);
1987 	if (!entry) {
1988 		result = VMCI_ERROR_NOT_FOUND;
1989 		goto out;
1990 	}
1991 
1992 	/*
1993 	 * If I'm the owner then I can set the page store.
1994 	 *
1995 	 * Or, if a host created the queue_pair and I'm the attached peer
1996 	 * then I can set the page store.
1997 	 */
1998 	if (entry->create_id != context_id &&
1999 	    (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2000 	     entry->attach_id != context_id)) {
2001 		result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2002 		goto out;
2003 	}
2004 
2005 	if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2006 	    entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2007 		result = VMCI_ERROR_UNAVAILABLE;
2008 		goto out;
2009 	}
2010 
2011 	result = qp_host_get_user_memory(produce_uva, consume_uva,
2012 					 entry->produce_q, entry->consume_q);
2013 	if (result < VMCI_SUCCESS)
2014 		goto out;
2015 
2016 	result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2017 	if (result < VMCI_SUCCESS) {
2018 		qp_host_unregister_user_memory(entry->produce_q,
2019 					       entry->consume_q);
2020 		goto out;
2021 	}
2022 
2023 	if (entry->state == VMCIQPB_CREATED_NO_MEM)
2024 		entry->state = VMCIQPB_CREATED_MEM;
2025 	else
2026 		entry->state = VMCIQPB_ATTACHED_MEM;
2027 
2028 	entry->vmci_page_files = true;
2029 
2030 	if (entry->state == VMCIQPB_ATTACHED_MEM) {
2031 		result =
2032 		    qp_notify_peer(true, handle, context_id, entry->create_id);
2033 		if (result < VMCI_SUCCESS) {
2034 			pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2035 				entry->create_id, entry->qp.handle.context,
2036 				entry->qp.handle.resource);
2037 		}
2038 	}
2039 
2040 	result = VMCI_SUCCESS;
2041  out:
2042 	mutex_unlock(&qp_broker_list.mutex);
2043 	return result;
2044 }
2045 
2046 /*
2047  * Resets saved queue headers for the given QP broker
2048  * entry. Should be used when guest memory becomes available
2049  * again, or the guest detaches.
2050  */
qp_reset_saved_headers(struct qp_broker_entry * entry)2051 static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2052 {
2053 	entry->produce_q->saved_header = NULL;
2054 	entry->consume_q->saved_header = NULL;
2055 }
2056 
2057 /*
2058  * The main entry point for detaching from a queue pair registered with the
2059  * queue pair broker. If more than one endpoint is attached to the queue
2060  * pair, the first endpoint will mainly decrement a reference count and
2061  * generate a notification to its peer. The last endpoint will clean up
2062  * the queue pair state registered with the broker.
2063  *
2064  * When a guest endpoint detaches, it will unmap and unregister the guest
2065  * memory backing the queue pair. If the host is still attached, it will
2066  * no longer be able to access the queue pair content.
2067  *
2068  * If the queue pair is already in a state where there is no memory
2069  * registered for the queue pair (any *_NO_MEM state), it will transition to
2070  * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2071  * endpoint is the first of two endpoints to detach. If the host endpoint is
2072  * the first out of two to detach, the queue pair will move to the
2073  * VMCIQPB_SHUTDOWN_MEM state.
2074  */
vmci_qp_broker_detach(struct vmci_handle handle,struct vmci_ctx * context)2075 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2076 {
2077 	struct qp_broker_entry *entry;
2078 	const u32 context_id = vmci_ctx_get_id(context);
2079 	u32 peer_id;
2080 	bool is_local = false;
2081 	int result;
2082 
2083 	if (vmci_handle_is_invalid(handle) || !context ||
2084 	    context_id == VMCI_INVALID_ID) {
2085 		return VMCI_ERROR_INVALID_ARGS;
2086 	}
2087 
2088 	mutex_lock(&qp_broker_list.mutex);
2089 
2090 	if (!vmci_ctx_qp_exists(context, handle)) {
2091 		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2092 			 context_id, handle.context, handle.resource);
2093 		result = VMCI_ERROR_NOT_FOUND;
2094 		goto out;
2095 	}
2096 
2097 	entry = qp_broker_handle_to_entry(handle);
2098 	if (!entry) {
2099 		pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2100 			 context_id, handle.context, handle.resource);
2101 		result = VMCI_ERROR_NOT_FOUND;
2102 		goto out;
2103 	}
2104 
2105 	if (context_id != entry->create_id && context_id != entry->attach_id) {
2106 		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2107 		goto out;
2108 	}
2109 
2110 	if (context_id == entry->create_id) {
2111 		peer_id = entry->attach_id;
2112 		entry->create_id = VMCI_INVALID_ID;
2113 	} else {
2114 		peer_id = entry->create_id;
2115 		entry->attach_id = VMCI_INVALID_ID;
2116 	}
2117 	entry->qp.ref_count--;
2118 
2119 	is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2120 
2121 	if (context_id != VMCI_HOST_CONTEXT_ID) {
2122 		bool headers_mapped;
2123 
2124 		/*
2125 		 * Pre NOVMVM vmx'en may detach from a queue pair
2126 		 * before setting the page store, and in that case
2127 		 * there is no user memory to detach from. Also, more
2128 		 * recent VMX'en may detach from a queue pair in the
2129 		 * quiesced state.
2130 		 */
2131 
2132 		qp_acquire_queue_mutex(entry->produce_q);
2133 		headers_mapped = entry->produce_q->q_header ||
2134 		    entry->consume_q->q_header;
2135 		if (QPBROKERSTATE_HAS_MEM(entry)) {
2136 			result =
2137 			    qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2138 						 entry->produce_q,
2139 						 entry->consume_q);
2140 			if (result < VMCI_SUCCESS)
2141 				pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2142 					handle.context, handle.resource,
2143 					result);
2144 
2145 			qp_host_unregister_user_memory(entry->produce_q,
2146 						       entry->consume_q);
2147 
2148 		}
2149 
2150 		if (!headers_mapped)
2151 			qp_reset_saved_headers(entry);
2152 
2153 		qp_release_queue_mutex(entry->produce_q);
2154 
2155 		if (!headers_mapped && entry->wakeup_cb)
2156 			entry->wakeup_cb(entry->client_data);
2157 
2158 	} else {
2159 		if (entry->wakeup_cb) {
2160 			entry->wakeup_cb = NULL;
2161 			entry->client_data = NULL;
2162 		}
2163 	}
2164 
2165 	if (entry->qp.ref_count == 0) {
2166 		qp_list_remove_entry(&qp_broker_list, &entry->qp);
2167 
2168 		if (is_local)
2169 			kfree(entry->local_mem);
2170 
2171 		qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2172 		qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2173 		qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2174 		/* Unlink from resource hash table and free callback */
2175 		vmci_resource_remove(&entry->resource);
2176 
2177 		kfree(entry);
2178 
2179 		vmci_ctx_qp_destroy(context, handle);
2180 	} else {
2181 		qp_notify_peer(false, handle, context_id, peer_id);
2182 		if (context_id == VMCI_HOST_CONTEXT_ID &&
2183 		    QPBROKERSTATE_HAS_MEM(entry)) {
2184 			entry->state = VMCIQPB_SHUTDOWN_MEM;
2185 		} else {
2186 			entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2187 		}
2188 
2189 		if (!is_local)
2190 			vmci_ctx_qp_destroy(context, handle);
2191 
2192 	}
2193 	result = VMCI_SUCCESS;
2194  out:
2195 	mutex_unlock(&qp_broker_list.mutex);
2196 	return result;
2197 }
2198 
2199 /*
2200  * Establishes the necessary mappings for a queue pair given a
2201  * reference to the queue pair guest memory. This is usually
2202  * called when a guest is unquiesced and the VMX is allowed to
2203  * map guest memory once again.
2204  */
vmci_qp_broker_map(struct vmci_handle handle,struct vmci_ctx * context,u64 guest_mem)2205 int vmci_qp_broker_map(struct vmci_handle handle,
2206 		       struct vmci_ctx *context,
2207 		       u64 guest_mem)
2208 {
2209 	struct qp_broker_entry *entry;
2210 	const u32 context_id = vmci_ctx_get_id(context);
2211 	int result;
2212 
2213 	if (vmci_handle_is_invalid(handle) || !context ||
2214 	    context_id == VMCI_INVALID_ID)
2215 		return VMCI_ERROR_INVALID_ARGS;
2216 
2217 	mutex_lock(&qp_broker_list.mutex);
2218 
2219 	if (!vmci_ctx_qp_exists(context, handle)) {
2220 		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2221 			 context_id, handle.context, handle.resource);
2222 		result = VMCI_ERROR_NOT_FOUND;
2223 		goto out;
2224 	}
2225 
2226 	entry = qp_broker_handle_to_entry(handle);
2227 	if (!entry) {
2228 		pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2229 			 context_id, handle.context, handle.resource);
2230 		result = VMCI_ERROR_NOT_FOUND;
2231 		goto out;
2232 	}
2233 
2234 	if (context_id != entry->create_id && context_id != entry->attach_id) {
2235 		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2236 		goto out;
2237 	}
2238 
2239 	result = VMCI_SUCCESS;
2240 
2241 	if (context_id != VMCI_HOST_CONTEXT_ID &&
2242 	    !QPBROKERSTATE_HAS_MEM(entry)) {
2243 		struct vmci_qp_page_store page_store;
2244 
2245 		page_store.pages = guest_mem;
2246 		page_store.len = QPE_NUM_PAGES(entry->qp);
2247 
2248 		qp_acquire_queue_mutex(entry->produce_q);
2249 		qp_reset_saved_headers(entry);
2250 		result =
2251 		    qp_host_register_user_memory(&page_store,
2252 						 entry->produce_q,
2253 						 entry->consume_q);
2254 		qp_release_queue_mutex(entry->produce_q);
2255 		if (result == VMCI_SUCCESS) {
2256 			/* Move state from *_NO_MEM to *_MEM */
2257 
2258 			entry->state++;
2259 
2260 			if (entry->wakeup_cb)
2261 				entry->wakeup_cb(entry->client_data);
2262 		}
2263 	}
2264 
2265  out:
2266 	mutex_unlock(&qp_broker_list.mutex);
2267 	return result;
2268 }
2269 
2270 /*
2271  * Saves a snapshot of the queue headers for the given QP broker
2272  * entry. Should be used when guest memory is unmapped.
2273  * Results:
2274  * VMCI_SUCCESS on success, appropriate error code if guest memory
2275  * can't be accessed..
2276  */
qp_save_headers(struct qp_broker_entry * entry)2277 static int qp_save_headers(struct qp_broker_entry *entry)
2278 {
2279 	int result;
2280 
2281 	if (entry->produce_q->saved_header != NULL &&
2282 	    entry->consume_q->saved_header != NULL) {
2283 		/*
2284 		 *  If the headers have already been saved, we don't need to do
2285 		 *  it again, and we don't want to map in the headers
2286 		 *  unnecessarily.
2287 		 */
2288 
2289 		return VMCI_SUCCESS;
2290 	}
2291 
2292 	if (NULL == entry->produce_q->q_header ||
2293 	    NULL == entry->consume_q->q_header) {
2294 		result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2295 		if (result < VMCI_SUCCESS)
2296 			return result;
2297 	}
2298 
2299 	memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2300 	       sizeof(entry->saved_produce_q));
2301 	entry->produce_q->saved_header = &entry->saved_produce_q;
2302 	memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2303 	       sizeof(entry->saved_consume_q));
2304 	entry->consume_q->saved_header = &entry->saved_consume_q;
2305 
2306 	return VMCI_SUCCESS;
2307 }
2308 
2309 /*
2310  * Removes all references to the guest memory of a given queue pair, and
2311  * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2312  * called when a VM is being quiesced where access to guest memory should
2313  * avoided.
2314  */
vmci_qp_broker_unmap(struct vmci_handle handle,struct vmci_ctx * context,u32 gid)2315 int vmci_qp_broker_unmap(struct vmci_handle handle,
2316 			 struct vmci_ctx *context,
2317 			 u32 gid)
2318 {
2319 	struct qp_broker_entry *entry;
2320 	const u32 context_id = vmci_ctx_get_id(context);
2321 	int result;
2322 
2323 	if (vmci_handle_is_invalid(handle) || !context ||
2324 	    context_id == VMCI_INVALID_ID)
2325 		return VMCI_ERROR_INVALID_ARGS;
2326 
2327 	mutex_lock(&qp_broker_list.mutex);
2328 
2329 	if (!vmci_ctx_qp_exists(context, handle)) {
2330 		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2331 			 context_id, handle.context, handle.resource);
2332 		result = VMCI_ERROR_NOT_FOUND;
2333 		goto out;
2334 	}
2335 
2336 	entry = qp_broker_handle_to_entry(handle);
2337 	if (!entry) {
2338 		pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2339 			 context_id, handle.context, handle.resource);
2340 		result = VMCI_ERROR_NOT_FOUND;
2341 		goto out;
2342 	}
2343 
2344 	if (context_id != entry->create_id && context_id != entry->attach_id) {
2345 		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2346 		goto out;
2347 	}
2348 
2349 	if (context_id != VMCI_HOST_CONTEXT_ID &&
2350 	    QPBROKERSTATE_HAS_MEM(entry)) {
2351 		qp_acquire_queue_mutex(entry->produce_q);
2352 		result = qp_save_headers(entry);
2353 		if (result < VMCI_SUCCESS)
2354 			pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2355 				handle.context, handle.resource, result);
2356 
2357 		qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2358 
2359 		/*
2360 		 * On hosted, when we unmap queue pairs, the VMX will also
2361 		 * unmap the guest memory, so we invalidate the previously
2362 		 * registered memory. If the queue pair is mapped again at a
2363 		 * later point in time, we will need to reregister the user
2364 		 * memory with a possibly new user VA.
2365 		 */
2366 		qp_host_unregister_user_memory(entry->produce_q,
2367 					       entry->consume_q);
2368 
2369 		/*
2370 		 * Move state from *_MEM to *_NO_MEM.
2371 		 */
2372 		entry->state--;
2373 
2374 		qp_release_queue_mutex(entry->produce_q);
2375 	}
2376 
2377 	result = VMCI_SUCCESS;
2378 
2379  out:
2380 	mutex_unlock(&qp_broker_list.mutex);
2381 	return result;
2382 }
2383 
2384 /*
2385  * Destroys all guest queue pair endpoints. If active guest queue
2386  * pairs still exist, hypercalls to attempt detach from these
2387  * queue pairs will be made. Any failure to detach is silently
2388  * ignored.
2389  */
vmci_qp_guest_endpoints_exit(void)2390 void vmci_qp_guest_endpoints_exit(void)
2391 {
2392 	struct qp_entry *entry;
2393 	struct qp_guest_endpoint *ep;
2394 
2395 	mutex_lock(&qp_guest_endpoints.mutex);
2396 
2397 	while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2398 		ep = (struct qp_guest_endpoint *)entry;
2399 
2400 		/* Don't make a hypercall for local queue_pairs. */
2401 		if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2402 			qp_detatch_hypercall(entry->handle);
2403 
2404 		/* We cannot fail the exit, so let's reset ref_count. */
2405 		entry->ref_count = 0;
2406 		qp_list_remove_entry(&qp_guest_endpoints, entry);
2407 
2408 		qp_guest_endpoint_destroy(ep);
2409 	}
2410 
2411 	mutex_unlock(&qp_guest_endpoints.mutex);
2412 }
2413 
2414 /*
2415  * Helper routine that will lock the queue pair before subsequent
2416  * operations.
2417  * Note: Non-blocking on the host side is currently only implemented in ESX.
2418  * Since non-blocking isn't yet implemented on the host personality we
2419  * have no reason to acquire a spin lock.  So to avoid the use of an
2420  * unnecessary lock only acquire the mutex if we can block.
2421  */
qp_lock(const struct vmci_qp * qpair)2422 static void qp_lock(const struct vmci_qp *qpair)
2423 {
2424 	qp_acquire_queue_mutex(qpair->produce_q);
2425 }
2426 
2427 /*
2428  * Helper routine that unlocks the queue pair after calling
2429  * qp_lock.
2430  */
qp_unlock(const struct vmci_qp * qpair)2431 static void qp_unlock(const struct vmci_qp *qpair)
2432 {
2433 	qp_release_queue_mutex(qpair->produce_q);
2434 }
2435 
2436 /*
2437  * The queue headers may not be mapped at all times. If a queue is
2438  * currently not mapped, it will be attempted to do so.
2439  */
qp_map_queue_headers(struct vmci_queue * produce_q,struct vmci_queue * consume_q)2440 static int qp_map_queue_headers(struct vmci_queue *produce_q,
2441 				struct vmci_queue *consume_q)
2442 {
2443 	int result;
2444 
2445 	if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2446 		result = qp_host_map_queues(produce_q, consume_q);
2447 		if (result < VMCI_SUCCESS)
2448 			return (produce_q->saved_header &&
2449 				consume_q->saved_header) ?
2450 			    VMCI_ERROR_QUEUEPAIR_NOT_READY :
2451 			    VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2452 	}
2453 
2454 	return VMCI_SUCCESS;
2455 }
2456 
2457 /*
2458  * Helper routine that will retrieve the produce and consume
2459  * headers of a given queue pair. If the guest memory of the
2460  * queue pair is currently not available, the saved queue headers
2461  * will be returned, if these are available.
2462  */
qp_get_queue_headers(const struct vmci_qp * qpair,struct vmci_queue_header ** produce_q_header,struct vmci_queue_header ** consume_q_header)2463 static int qp_get_queue_headers(const struct vmci_qp *qpair,
2464 				struct vmci_queue_header **produce_q_header,
2465 				struct vmci_queue_header **consume_q_header)
2466 {
2467 	int result;
2468 
2469 	result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2470 	if (result == VMCI_SUCCESS) {
2471 		*produce_q_header = qpair->produce_q->q_header;
2472 		*consume_q_header = qpair->consume_q->q_header;
2473 	} else if (qpair->produce_q->saved_header &&
2474 		   qpair->consume_q->saved_header) {
2475 		*produce_q_header = qpair->produce_q->saved_header;
2476 		*consume_q_header = qpair->consume_q->saved_header;
2477 		result = VMCI_SUCCESS;
2478 	}
2479 
2480 	return result;
2481 }
2482 
2483 /*
2484  * Callback from VMCI queue pair broker indicating that a queue
2485  * pair that was previously not ready, now either is ready or
2486  * gone forever.
2487  */
qp_wakeup_cb(void * client_data)2488 static int qp_wakeup_cb(void *client_data)
2489 {
2490 	struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2491 
2492 	qp_lock(qpair);
2493 	while (qpair->blocked > 0) {
2494 		qpair->blocked--;
2495 		qpair->generation++;
2496 		wake_up(&qpair->event);
2497 	}
2498 	qp_unlock(qpair);
2499 
2500 	return VMCI_SUCCESS;
2501 }
2502 
2503 /*
2504  * Makes the calling thread wait for the queue pair to become
2505  * ready for host side access.  Returns true when thread is
2506  * woken up after queue pair state change, false otherwise.
2507  */
qp_wait_for_ready_queue(struct vmci_qp * qpair)2508 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2509 {
2510 	unsigned int generation;
2511 
2512 	qpair->blocked++;
2513 	generation = qpair->generation;
2514 	qp_unlock(qpair);
2515 	wait_event(qpair->event, generation != qpair->generation);
2516 	qp_lock(qpair);
2517 
2518 	return true;
2519 }
2520 
2521 /*
2522  * Enqueues a given buffer to the produce queue using the provided
2523  * function. As many bytes as possible (space available in the queue)
2524  * are enqueued.  Assumes the queue->mutex has been acquired.  Returns
2525  * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2526  * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2527  * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2528  * an error occured when accessing the buffer,
2529  * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2530  * available.  Otherwise, the number of bytes written to the queue is
2531  * returned.  Updates the tail pointer of the produce queue.
2532  */
qp_enqueue_locked(struct vmci_queue * produce_q,struct vmci_queue * consume_q,const u64 produce_q_size,struct iov_iter * from)2533 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2534 				 struct vmci_queue *consume_q,
2535 				 const u64 produce_q_size,
2536 				 struct iov_iter *from)
2537 {
2538 	s64 free_space;
2539 	u64 tail;
2540 	size_t buf_size = iov_iter_count(from);
2541 	size_t written;
2542 	ssize_t result;
2543 
2544 	result = qp_map_queue_headers(produce_q, consume_q);
2545 	if (unlikely(result != VMCI_SUCCESS))
2546 		return result;
2547 
2548 	free_space = vmci_q_header_free_space(produce_q->q_header,
2549 					      consume_q->q_header,
2550 					      produce_q_size);
2551 	if (free_space == 0)
2552 		return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2553 
2554 	if (free_space < VMCI_SUCCESS)
2555 		return (ssize_t) free_space;
2556 
2557 	written = (size_t) (free_space > buf_size ? buf_size : free_space);
2558 	tail = vmci_q_header_producer_tail(produce_q->q_header);
2559 	if (likely(tail + written < produce_q_size)) {
2560 		result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
2561 	} else {
2562 		/* Tail pointer wraps around. */
2563 
2564 		const size_t tmp = (size_t) (produce_q_size - tail);
2565 
2566 		result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
2567 		if (result >= VMCI_SUCCESS)
2568 			result = qp_memcpy_to_queue_iter(produce_q, 0, from,
2569 						 written - tmp);
2570 	}
2571 
2572 	if (result < VMCI_SUCCESS)
2573 		return result;
2574 
2575 	vmci_q_header_add_producer_tail(produce_q->q_header, written,
2576 					produce_q_size);
2577 	return written;
2578 }
2579 
2580 /*
2581  * Dequeues data (if available) from the given consume queue. Writes data
2582  * to the user provided buffer using the provided function.
2583  * Assumes the queue->mutex has been acquired.
2584  * Results:
2585  * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2586  * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2587  * (as defined by the queue size).
2588  * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2589  * Otherwise the number of bytes dequeued is returned.
2590  * Side effects:
2591  * Updates the head pointer of the consume queue.
2592  */
qp_dequeue_locked(struct vmci_queue * produce_q,struct vmci_queue * consume_q,const u64 consume_q_size,struct iov_iter * to,bool update_consumer)2593 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2594 				 struct vmci_queue *consume_q,
2595 				 const u64 consume_q_size,
2596 				 struct iov_iter *to,
2597 				 bool update_consumer)
2598 {
2599 	size_t buf_size = iov_iter_count(to);
2600 	s64 buf_ready;
2601 	u64 head;
2602 	size_t read;
2603 	ssize_t result;
2604 
2605 	result = qp_map_queue_headers(produce_q, consume_q);
2606 	if (unlikely(result != VMCI_SUCCESS))
2607 		return result;
2608 
2609 	buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2610 					    produce_q->q_header,
2611 					    consume_q_size);
2612 	if (buf_ready == 0)
2613 		return VMCI_ERROR_QUEUEPAIR_NODATA;
2614 
2615 	if (buf_ready < VMCI_SUCCESS)
2616 		return (ssize_t) buf_ready;
2617 
2618 	read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2619 	head = vmci_q_header_consumer_head(produce_q->q_header);
2620 	if (likely(head + read < consume_q_size)) {
2621 		result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
2622 	} else {
2623 		/* Head pointer wraps around. */
2624 
2625 		const size_t tmp = (size_t) (consume_q_size - head);
2626 
2627 		result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
2628 		if (result >= VMCI_SUCCESS)
2629 			result = qp_memcpy_from_queue_iter(to, consume_q, 0,
2630 						   read - tmp);
2631 
2632 	}
2633 
2634 	if (result < VMCI_SUCCESS)
2635 		return result;
2636 
2637 	if (update_consumer)
2638 		vmci_q_header_add_consumer_head(produce_q->q_header,
2639 						read, consume_q_size);
2640 
2641 	return read;
2642 }
2643 
2644 /*
2645  * vmci_qpair_alloc() - Allocates a queue pair.
2646  * @qpair:      Pointer for the new vmci_qp struct.
2647  * @handle:     Handle to track the resource.
2648  * @produce_qsize:      Desired size of the producer queue.
2649  * @consume_qsize:      Desired size of the consumer queue.
2650  * @peer:       ContextID of the peer.
2651  * @flags:      VMCI flags.
2652  * @priv_flags: VMCI priviledge flags.
2653  *
2654  * This is the client interface for allocating the memory for a
2655  * vmci_qp structure and then attaching to the underlying
2656  * queue.  If an error occurs allocating the memory for the
2657  * vmci_qp structure no attempt is made to attach.  If an
2658  * error occurs attaching, then the structure is freed.
2659  */
vmci_qpair_alloc(struct vmci_qp ** qpair,struct vmci_handle * handle,u64 produce_qsize,u64 consume_qsize,u32 peer,u32 flags,u32 priv_flags)2660 int vmci_qpair_alloc(struct vmci_qp **qpair,
2661 		     struct vmci_handle *handle,
2662 		     u64 produce_qsize,
2663 		     u64 consume_qsize,
2664 		     u32 peer,
2665 		     u32 flags,
2666 		     u32 priv_flags)
2667 {
2668 	struct vmci_qp *my_qpair;
2669 	int retval;
2670 	struct vmci_handle src = VMCI_INVALID_HANDLE;
2671 	struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2672 	enum vmci_route route;
2673 	vmci_event_release_cb wakeup_cb;
2674 	void *client_data;
2675 
2676 	/*
2677 	 * Restrict the size of a queuepair.  The device already
2678 	 * enforces a limit on the total amount of memory that can be
2679 	 * allocated to queuepairs for a guest.  However, we try to
2680 	 * allocate this memory before we make the queuepair
2681 	 * allocation hypercall.  On Linux, we allocate each page
2682 	 * separately, which means rather than fail, the guest will
2683 	 * thrash while it tries to allocate, and will become
2684 	 * increasingly unresponsive to the point where it appears to
2685 	 * be hung.  So we place a limit on the size of an individual
2686 	 * queuepair here, and leave the device to enforce the
2687 	 * restriction on total queuepair memory.  (Note that this
2688 	 * doesn't prevent all cases; a user with only this much
2689 	 * physical memory could still get into trouble.)  The error
2690 	 * used by the device is NO_RESOURCES, so use that here too.
2691 	 */
2692 
2693 	if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2694 	    produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2695 		return VMCI_ERROR_NO_RESOURCES;
2696 
2697 	retval = vmci_route(&src, &dst, false, &route);
2698 	if (retval < VMCI_SUCCESS)
2699 		route = vmci_guest_code_active() ?
2700 		    VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2701 
2702 	if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2703 		pr_devel("NONBLOCK OR PINNED set");
2704 		return VMCI_ERROR_INVALID_ARGS;
2705 	}
2706 
2707 	my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2708 	if (!my_qpair)
2709 		return VMCI_ERROR_NO_MEM;
2710 
2711 	my_qpair->produce_q_size = produce_qsize;
2712 	my_qpair->consume_q_size = consume_qsize;
2713 	my_qpair->peer = peer;
2714 	my_qpair->flags = flags;
2715 	my_qpair->priv_flags = priv_flags;
2716 
2717 	wakeup_cb = NULL;
2718 	client_data = NULL;
2719 
2720 	if (VMCI_ROUTE_AS_HOST == route) {
2721 		my_qpair->guest_endpoint = false;
2722 		if (!(flags & VMCI_QPFLAG_LOCAL)) {
2723 			my_qpair->blocked = 0;
2724 			my_qpair->generation = 0;
2725 			init_waitqueue_head(&my_qpair->event);
2726 			wakeup_cb = qp_wakeup_cb;
2727 			client_data = (void *)my_qpair;
2728 		}
2729 	} else {
2730 		my_qpair->guest_endpoint = true;
2731 	}
2732 
2733 	retval = vmci_qp_alloc(handle,
2734 			       &my_qpair->produce_q,
2735 			       my_qpair->produce_q_size,
2736 			       &my_qpair->consume_q,
2737 			       my_qpair->consume_q_size,
2738 			       my_qpair->peer,
2739 			       my_qpair->flags,
2740 			       my_qpair->priv_flags,
2741 			       my_qpair->guest_endpoint,
2742 			       wakeup_cb, client_data);
2743 
2744 	if (retval < VMCI_SUCCESS) {
2745 		kfree(my_qpair);
2746 		return retval;
2747 	}
2748 
2749 	*qpair = my_qpair;
2750 	my_qpair->handle = *handle;
2751 
2752 	return retval;
2753 }
2754 EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2755 
2756 /*
2757  * vmci_qpair_detach() - Detatches the client from a queue pair.
2758  * @qpair:      Reference of a pointer to the qpair struct.
2759  *
2760  * This is the client interface for detaching from a VMCIQPair.
2761  * Note that this routine will free the memory allocated for the
2762  * vmci_qp structure too.
2763  */
vmci_qpair_detach(struct vmci_qp ** qpair)2764 int vmci_qpair_detach(struct vmci_qp **qpair)
2765 {
2766 	int result;
2767 	struct vmci_qp *old_qpair;
2768 
2769 	if (!qpair || !(*qpair))
2770 		return VMCI_ERROR_INVALID_ARGS;
2771 
2772 	old_qpair = *qpair;
2773 	result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2774 
2775 	/*
2776 	 * The guest can fail to detach for a number of reasons, and
2777 	 * if it does so, it will cleanup the entry (if there is one).
2778 	 * The host can fail too, but it won't cleanup the entry
2779 	 * immediately, it will do that later when the context is
2780 	 * freed.  Either way, we need to release the qpair struct
2781 	 * here; there isn't much the caller can do, and we don't want
2782 	 * to leak.
2783 	 */
2784 
2785 	memset(old_qpair, 0, sizeof(*old_qpair));
2786 	old_qpair->handle = VMCI_INVALID_HANDLE;
2787 	old_qpair->peer = VMCI_INVALID_ID;
2788 	kfree(old_qpair);
2789 	*qpair = NULL;
2790 
2791 	return result;
2792 }
2793 EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2794 
2795 /*
2796  * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2797  * @qpair:      Pointer to the queue pair struct.
2798  * @producer_tail:      Reference used for storing producer tail index.
2799  * @consumer_head:      Reference used for storing the consumer head index.
2800  *
2801  * This is the client interface for getting the current indexes of the
2802  * QPair from the point of the view of the caller as the producer.
2803  */
vmci_qpair_get_produce_indexes(const struct vmci_qp * qpair,u64 * producer_tail,u64 * consumer_head)2804 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2805 				   u64 *producer_tail,
2806 				   u64 *consumer_head)
2807 {
2808 	struct vmci_queue_header *produce_q_header;
2809 	struct vmci_queue_header *consume_q_header;
2810 	int result;
2811 
2812 	if (!qpair)
2813 		return VMCI_ERROR_INVALID_ARGS;
2814 
2815 	qp_lock(qpair);
2816 	result =
2817 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2818 	if (result == VMCI_SUCCESS)
2819 		vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2820 					   producer_tail, consumer_head);
2821 	qp_unlock(qpair);
2822 
2823 	if (result == VMCI_SUCCESS &&
2824 	    ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2825 	     (consumer_head && *consumer_head >= qpair->produce_q_size)))
2826 		return VMCI_ERROR_INVALID_SIZE;
2827 
2828 	return result;
2829 }
2830 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2831 
2832 /*
2833  * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer.
2834  * @qpair:      Pointer to the queue pair struct.
2835  * @consumer_tail:      Reference used for storing consumer tail index.
2836  * @producer_head:      Reference used for storing the producer head index.
2837  *
2838  * This is the client interface for getting the current indexes of the
2839  * QPair from the point of the view of the caller as the consumer.
2840  */
vmci_qpair_get_consume_indexes(const struct vmci_qp * qpair,u64 * consumer_tail,u64 * producer_head)2841 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2842 				   u64 *consumer_tail,
2843 				   u64 *producer_head)
2844 {
2845 	struct vmci_queue_header *produce_q_header;
2846 	struct vmci_queue_header *consume_q_header;
2847 	int result;
2848 
2849 	if (!qpair)
2850 		return VMCI_ERROR_INVALID_ARGS;
2851 
2852 	qp_lock(qpair);
2853 	result =
2854 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2855 	if (result == VMCI_SUCCESS)
2856 		vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2857 					   consumer_tail, producer_head);
2858 	qp_unlock(qpair);
2859 
2860 	if (result == VMCI_SUCCESS &&
2861 	    ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2862 	     (producer_head && *producer_head >= qpair->consume_q_size)))
2863 		return VMCI_ERROR_INVALID_SIZE;
2864 
2865 	return result;
2866 }
2867 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2868 
2869 /*
2870  * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2871  * @qpair:      Pointer to the queue pair struct.
2872  *
2873  * This is the client interface for getting the amount of free
2874  * space in the QPair from the point of the view of the caller as
2875  * the producer which is the common case.  Returns < 0 if err, else
2876  * available bytes into which data can be enqueued if > 0.
2877  */
vmci_qpair_produce_free_space(const struct vmci_qp * qpair)2878 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2879 {
2880 	struct vmci_queue_header *produce_q_header;
2881 	struct vmci_queue_header *consume_q_header;
2882 	s64 result;
2883 
2884 	if (!qpair)
2885 		return VMCI_ERROR_INVALID_ARGS;
2886 
2887 	qp_lock(qpair);
2888 	result =
2889 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2890 	if (result == VMCI_SUCCESS)
2891 		result = vmci_q_header_free_space(produce_q_header,
2892 						  consume_q_header,
2893 						  qpair->produce_q_size);
2894 	else
2895 		result = 0;
2896 
2897 	qp_unlock(qpair);
2898 
2899 	return result;
2900 }
2901 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2902 
2903 /*
2904  * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
2905  * @qpair:      Pointer to the queue pair struct.
2906  *
2907  * This is the client interface for getting the amount of free
2908  * space in the QPair from the point of the view of the caller as
2909  * the consumer which is not the common case.  Returns < 0 if err, else
2910  * available bytes into which data can be enqueued if > 0.
2911  */
vmci_qpair_consume_free_space(const struct vmci_qp * qpair)2912 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
2913 {
2914 	struct vmci_queue_header *produce_q_header;
2915 	struct vmci_queue_header *consume_q_header;
2916 	s64 result;
2917 
2918 	if (!qpair)
2919 		return VMCI_ERROR_INVALID_ARGS;
2920 
2921 	qp_lock(qpair);
2922 	result =
2923 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2924 	if (result == VMCI_SUCCESS)
2925 		result = vmci_q_header_free_space(consume_q_header,
2926 						  produce_q_header,
2927 						  qpair->consume_q_size);
2928 	else
2929 		result = 0;
2930 
2931 	qp_unlock(qpair);
2932 
2933 	return result;
2934 }
2935 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
2936 
2937 /*
2938  * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
2939  * producer queue.
2940  * @qpair:      Pointer to the queue pair struct.
2941  *
2942  * This is the client interface for getting the amount of
2943  * enqueued data in the QPair from the point of the view of the
2944  * caller as the producer which is not the common case.  Returns < 0 if err,
2945  * else available bytes that may be read.
2946  */
vmci_qpair_produce_buf_ready(const struct vmci_qp * qpair)2947 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
2948 {
2949 	struct vmci_queue_header *produce_q_header;
2950 	struct vmci_queue_header *consume_q_header;
2951 	s64 result;
2952 
2953 	if (!qpair)
2954 		return VMCI_ERROR_INVALID_ARGS;
2955 
2956 	qp_lock(qpair);
2957 	result =
2958 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2959 	if (result == VMCI_SUCCESS)
2960 		result = vmci_q_header_buf_ready(produce_q_header,
2961 						 consume_q_header,
2962 						 qpair->produce_q_size);
2963 	else
2964 		result = 0;
2965 
2966 	qp_unlock(qpair);
2967 
2968 	return result;
2969 }
2970 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
2971 
2972 /*
2973  * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
2974  * consumer queue.
2975  * @qpair:      Pointer to the queue pair struct.
2976  *
2977  * This is the client interface for getting the amount of
2978  * enqueued data in the QPair from the point of the view of the
2979  * caller as the consumer which is the normal case.  Returns < 0 if err,
2980  * else available bytes that may be read.
2981  */
vmci_qpair_consume_buf_ready(const struct vmci_qp * qpair)2982 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
2983 {
2984 	struct vmci_queue_header *produce_q_header;
2985 	struct vmci_queue_header *consume_q_header;
2986 	s64 result;
2987 
2988 	if (!qpair)
2989 		return VMCI_ERROR_INVALID_ARGS;
2990 
2991 	qp_lock(qpair);
2992 	result =
2993 	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2994 	if (result == VMCI_SUCCESS)
2995 		result = vmci_q_header_buf_ready(consume_q_header,
2996 						 produce_q_header,
2997 						 qpair->consume_q_size);
2998 	else
2999 		result = 0;
3000 
3001 	qp_unlock(qpair);
3002 
3003 	return result;
3004 }
3005 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3006 
3007 /*
3008  * vmci_qpair_enqueue() - Throw data on the queue.
3009  * @qpair:      Pointer to the queue pair struct.
3010  * @buf:        Pointer to buffer containing data
3011  * @buf_size:   Length of buffer.
3012  * @buf_type:   Buffer type (Unused).
3013  *
3014  * This is the client interface for enqueueing data into the queue.
3015  * Returns number of bytes enqueued or < 0 on error.
3016  */
vmci_qpair_enqueue(struct vmci_qp * qpair,const void * buf,size_t buf_size,int buf_type)3017 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3018 			   const void *buf,
3019 			   size_t buf_size,
3020 			   int buf_type)
3021 {
3022 	ssize_t result;
3023 	struct iov_iter from;
3024 	struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
3025 
3026 	if (!qpair || !buf)
3027 		return VMCI_ERROR_INVALID_ARGS;
3028 
3029 	iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
3030 
3031 	qp_lock(qpair);
3032 
3033 	do {
3034 		result = qp_enqueue_locked(qpair->produce_q,
3035 					   qpair->consume_q,
3036 					   qpair->produce_q_size,
3037 					   &from);
3038 
3039 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3040 		    !qp_wait_for_ready_queue(qpair))
3041 			result = VMCI_ERROR_WOULD_BLOCK;
3042 
3043 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3044 
3045 	qp_unlock(qpair);
3046 
3047 	return result;
3048 }
3049 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3050 
3051 /*
3052  * vmci_qpair_dequeue() - Get data from the queue.
3053  * @qpair:      Pointer to the queue pair struct.
3054  * @buf:        Pointer to buffer for the data
3055  * @buf_size:   Length of buffer.
3056  * @buf_type:   Buffer type (Unused).
3057  *
3058  * This is the client interface for dequeueing data from the queue.
3059  * Returns number of bytes dequeued or < 0 on error.
3060  */
vmci_qpair_dequeue(struct vmci_qp * qpair,void * buf,size_t buf_size,int buf_type)3061 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3062 			   void *buf,
3063 			   size_t buf_size,
3064 			   int buf_type)
3065 {
3066 	ssize_t result;
3067 	struct iov_iter to;
3068 	struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3069 
3070 	if (!qpair || !buf)
3071 		return VMCI_ERROR_INVALID_ARGS;
3072 
3073 	iov_iter_kvec(&to, READ, &v, 1, buf_size);
3074 
3075 	qp_lock(qpair);
3076 
3077 	do {
3078 		result = qp_dequeue_locked(qpair->produce_q,
3079 					   qpair->consume_q,
3080 					   qpair->consume_q_size,
3081 					   &to, true);
3082 
3083 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3084 		    !qp_wait_for_ready_queue(qpair))
3085 			result = VMCI_ERROR_WOULD_BLOCK;
3086 
3087 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3088 
3089 	qp_unlock(qpair);
3090 
3091 	return result;
3092 }
3093 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3094 
3095 /*
3096  * vmci_qpair_peek() - Peek at the data in the queue.
3097  * @qpair:      Pointer to the queue pair struct.
3098  * @buf:        Pointer to buffer for the data
3099  * @buf_size:   Length of buffer.
3100  * @buf_type:   Buffer type (Unused on Linux).
3101  *
3102  * This is the client interface for peeking into a queue.  (I.e.,
3103  * copy data from the queue without updating the head pointer.)
3104  * Returns number of bytes dequeued or < 0 on error.
3105  */
vmci_qpair_peek(struct vmci_qp * qpair,void * buf,size_t buf_size,int buf_type)3106 ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3107 			void *buf,
3108 			size_t buf_size,
3109 			int buf_type)
3110 {
3111 	struct iov_iter to;
3112 	struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3113 	ssize_t result;
3114 
3115 	if (!qpair || !buf)
3116 		return VMCI_ERROR_INVALID_ARGS;
3117 
3118 	iov_iter_kvec(&to, READ, &v, 1, buf_size);
3119 
3120 	qp_lock(qpair);
3121 
3122 	do {
3123 		result = qp_dequeue_locked(qpair->produce_q,
3124 					   qpair->consume_q,
3125 					   qpair->consume_q_size,
3126 					   &to, false);
3127 
3128 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3129 		    !qp_wait_for_ready_queue(qpair))
3130 			result = VMCI_ERROR_WOULD_BLOCK;
3131 
3132 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3133 
3134 	qp_unlock(qpair);
3135 
3136 	return result;
3137 }
3138 EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3139 
3140 /*
3141  * vmci_qpair_enquev() - Throw data on the queue using iov.
3142  * @qpair:      Pointer to the queue pair struct.
3143  * @iov:        Pointer to buffer containing data
3144  * @iov_size:   Length of buffer.
3145  * @buf_type:   Buffer type (Unused).
3146  *
3147  * This is the client interface for enqueueing data into the queue.
3148  * This function uses IO vectors to handle the work. Returns number
3149  * of bytes enqueued or < 0 on error.
3150  */
vmci_qpair_enquev(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3151 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3152 			  struct msghdr *msg,
3153 			  size_t iov_size,
3154 			  int buf_type)
3155 {
3156 	ssize_t result;
3157 
3158 	if (!qpair)
3159 		return VMCI_ERROR_INVALID_ARGS;
3160 
3161 	qp_lock(qpair);
3162 
3163 	do {
3164 		result = qp_enqueue_locked(qpair->produce_q,
3165 					   qpair->consume_q,
3166 					   qpair->produce_q_size,
3167 					   &msg->msg_iter);
3168 
3169 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3170 		    !qp_wait_for_ready_queue(qpair))
3171 			result = VMCI_ERROR_WOULD_BLOCK;
3172 
3173 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3174 
3175 	qp_unlock(qpair);
3176 
3177 	return result;
3178 }
3179 EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3180 
3181 /*
3182  * vmci_qpair_dequev() - Get data from the queue using iov.
3183  * @qpair:      Pointer to the queue pair struct.
3184  * @iov:        Pointer to buffer for the data
3185  * @iov_size:   Length of buffer.
3186  * @buf_type:   Buffer type (Unused).
3187  *
3188  * This is the client interface for dequeueing data from the queue.
3189  * This function uses IO vectors to handle the work. Returns number
3190  * of bytes dequeued or < 0 on error.
3191  */
vmci_qpair_dequev(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3192 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3193 			  struct msghdr *msg,
3194 			  size_t iov_size,
3195 			  int buf_type)
3196 {
3197 	ssize_t result;
3198 
3199 	if (!qpair)
3200 		return VMCI_ERROR_INVALID_ARGS;
3201 
3202 	qp_lock(qpair);
3203 
3204 	do {
3205 		result = qp_dequeue_locked(qpair->produce_q,
3206 					   qpair->consume_q,
3207 					   qpair->consume_q_size,
3208 					   &msg->msg_iter, true);
3209 
3210 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3211 		    !qp_wait_for_ready_queue(qpair))
3212 			result = VMCI_ERROR_WOULD_BLOCK;
3213 
3214 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3215 
3216 	qp_unlock(qpair);
3217 
3218 	return result;
3219 }
3220 EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3221 
3222 /*
3223  * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3224  * @qpair:      Pointer to the queue pair struct.
3225  * @iov:        Pointer to buffer for the data
3226  * @iov_size:   Length of buffer.
3227  * @buf_type:   Buffer type (Unused on Linux).
3228  *
3229  * This is the client interface for peeking into a queue.  (I.e.,
3230  * copy data from the queue without updating the head pointer.)
3231  * This function uses IO vectors to handle the work. Returns number
3232  * of bytes peeked or < 0 on error.
3233  */
vmci_qpair_peekv(struct vmci_qp * qpair,struct msghdr * msg,size_t iov_size,int buf_type)3234 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3235 			 struct msghdr *msg,
3236 			 size_t iov_size,
3237 			 int buf_type)
3238 {
3239 	ssize_t result;
3240 
3241 	if (!qpair)
3242 		return VMCI_ERROR_INVALID_ARGS;
3243 
3244 	qp_lock(qpair);
3245 
3246 	do {
3247 		result = qp_dequeue_locked(qpair->produce_q,
3248 					   qpair->consume_q,
3249 					   qpair->consume_q_size,
3250 					   &msg->msg_iter, false);
3251 
3252 		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3253 		    !qp_wait_for_ready_queue(qpair))
3254 			result = VMCI_ERROR_WOULD_BLOCK;
3255 
3256 	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3257 
3258 	qp_unlock(qpair);
3259 	return result;
3260 }
3261 EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
3262