• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3  * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
4  *
5  * Copyright (C) 2007-2016 Oracle Corporation
6  */
7 
8 #include <linux/device.h>
9 #include <linux/io.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/sizes.h>
13 #include <linux/slab.h>
14 #include <linux/vbox_err.h>
15 #include <linux/vbox_utils.h>
16 #include <linux/vmalloc.h>
17 #include "vboxguest_core.h"
18 #include "vboxguest_version.h"
19 
20 /* Get the pointer to the first HGCM parameter. */
21 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
22 	((struct vmmdev_hgcm_function_parameter *)( \
23 		(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
24 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
25 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
26 	((struct vmmdev_hgcm_function_parameter32 *)( \
27 		(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 
29 #define GUEST_MAPPINGS_TRIES	5
30 
31 #define VBG_KERNEL_REQUEST \
32 	(VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
33 	 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
34 
35 /**
36  * Reserves memory in which the VMM can relocate any guest mappings
37  * that are floating around.
38  *
39  * This operation is a little bit tricky since the VMM might not accept
40  * just any address because of address clashes between the three contexts
41  * it operates in, so we try several times.
42  *
43  * Failure to reserve the guest mappings is ignored.
44  *
45  * @gdev:		The Guest extension device.
46  */
vbg_guest_mappings_init(struct vbg_dev * gdev)47 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
48 {
49 	struct vmmdev_hypervisorinfo *req;
50 	void *guest_mappings[GUEST_MAPPINGS_TRIES];
51 	struct page **pages = NULL;
52 	u32 size, hypervisor_size;
53 	int i, rc;
54 
55 	/* Query the required space. */
56 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
57 			    VBG_KERNEL_REQUEST);
58 	if (!req)
59 		return;
60 
61 	req->hypervisor_start = 0;
62 	req->hypervisor_size = 0;
63 	rc = vbg_req_perform(gdev, req);
64 	if (rc < 0)
65 		goto out;
66 
67 	/*
68 	 * The VMM will report back if there is nothing it wants to map, like
69 	 * for instance in VT-x and AMD-V mode.
70 	 */
71 	if (req->hypervisor_size == 0)
72 		goto out;
73 
74 	hypervisor_size = req->hypervisor_size;
75 	/* Add 4M so that we can align the vmap to 4MiB as the host requires. */
76 	size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
77 
78 	pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
79 	if (!pages)
80 		goto out;
81 
82 	gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
83 	if (!gdev->guest_mappings_dummy_page)
84 		goto out;
85 
86 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
87 		pages[i] = gdev->guest_mappings_dummy_page;
88 
89 	/*
90 	 * Try several times, the VMM might not accept some addresses because
91 	 * of address clashes between the three contexts.
92 	 */
93 	for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
94 		guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
95 					 VM_MAP, PAGE_KERNEL_RO);
96 		if (!guest_mappings[i])
97 			break;
98 
99 		req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
100 		req->header.rc = VERR_INTERNAL_ERROR;
101 		req->hypervisor_size = hypervisor_size;
102 		req->hypervisor_start =
103 			(unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
104 
105 		rc = vbg_req_perform(gdev, req);
106 		if (rc >= 0) {
107 			gdev->guest_mappings = guest_mappings[i];
108 			break;
109 		}
110 	}
111 
112 	/* Free vmap's from failed attempts. */
113 	while (--i >= 0)
114 		vunmap(guest_mappings[i]);
115 
116 	/* On failure free the dummy-page backing the vmap */
117 	if (!gdev->guest_mappings) {
118 		__free_page(gdev->guest_mappings_dummy_page);
119 		gdev->guest_mappings_dummy_page = NULL;
120 	}
121 
122 out:
123 	vbg_req_free(req, sizeof(*req));
124 	kfree(pages);
125 }
126 
127 /**
128  * Undo what vbg_guest_mappings_init did.
129  *
130  * @gdev:		The Guest extension device.
131  */
vbg_guest_mappings_exit(struct vbg_dev * gdev)132 static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
133 {
134 	struct vmmdev_hypervisorinfo *req;
135 	int rc;
136 
137 	if (!gdev->guest_mappings)
138 		return;
139 
140 	/*
141 	 * Tell the host that we're going to free the memory we reserved for
142 	 * it, the free it up. (Leak the memory if anything goes wrong here.)
143 	 */
144 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
145 			    VBG_KERNEL_REQUEST);
146 	if (!req)
147 		return;
148 
149 	req->hypervisor_start = 0;
150 	req->hypervisor_size = 0;
151 
152 	rc = vbg_req_perform(gdev, req);
153 
154 	vbg_req_free(req, sizeof(*req));
155 
156 	if (rc < 0) {
157 		vbg_err("%s error: %d\n", __func__, rc);
158 		return;
159 	}
160 
161 	vunmap(gdev->guest_mappings);
162 	gdev->guest_mappings = NULL;
163 
164 	__free_page(gdev->guest_mappings_dummy_page);
165 	gdev->guest_mappings_dummy_page = NULL;
166 }
167 
168 /**
169  * Report the guest information to the host.
170  * Return: 0 or negative errno value.
171  * @gdev:		The Guest extension device.
172  */
vbg_report_guest_info(struct vbg_dev * gdev)173 static int vbg_report_guest_info(struct vbg_dev *gdev)
174 {
175 	/*
176 	 * Allocate and fill in the two guest info reports.
177 	 */
178 	struct vmmdev_guest_info *req1 = NULL;
179 	struct vmmdev_guest_info2 *req2 = NULL;
180 	int rc, ret = -ENOMEM;
181 
182 	req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
183 			     VBG_KERNEL_REQUEST);
184 	req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
185 			     VBG_KERNEL_REQUEST);
186 	if (!req1 || !req2)
187 		goto out_free;
188 
189 	req1->interface_version = VMMDEV_VERSION;
190 	req1->os_type = VMMDEV_OSTYPE_LINUX26;
191 #if __BITS_PER_LONG == 64
192 	req1->os_type |= VMMDEV_OSTYPE_X64;
193 #endif
194 
195 	req2->additions_major = VBG_VERSION_MAJOR;
196 	req2->additions_minor = VBG_VERSION_MINOR;
197 	req2->additions_build = VBG_VERSION_BUILD;
198 	req2->additions_revision = VBG_SVN_REV;
199 	req2->additions_features =
200 		VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
201 	strscpy(req2->name, VBG_VERSION_STRING,
202 		sizeof(req2->name));
203 
204 	/*
205 	 * There are two protocols here:
206 	 *      1. INFO2 + INFO1. Supported by >=3.2.51.
207 	 *      2. INFO1 and optionally INFO2. The old protocol.
208 	 *
209 	 * We try protocol 2 first.  It will fail with VERR_NOT_SUPPORTED
210 	 * if not supported by the VMMDev (message ordering requirement).
211 	 */
212 	rc = vbg_req_perform(gdev, req2);
213 	if (rc >= 0) {
214 		rc = vbg_req_perform(gdev, req1);
215 	} else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
216 		rc = vbg_req_perform(gdev, req1);
217 		if (rc >= 0) {
218 			rc = vbg_req_perform(gdev, req2);
219 			if (rc == VERR_NOT_IMPLEMENTED)
220 				rc = VINF_SUCCESS;
221 		}
222 	}
223 	ret = vbg_status_code_to_errno(rc);
224 
225 out_free:
226 	vbg_req_free(req2, sizeof(*req2));
227 	vbg_req_free(req1, sizeof(*req1));
228 	return ret;
229 }
230 
231 /**
232  * Report the guest driver status to the host.
233  * Return: 0 or negative errno value.
234  * @gdev:		The Guest extension device.
235  * @active:		Flag whether the driver is now active or not.
236  */
vbg_report_driver_status(struct vbg_dev * gdev,bool active)237 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
238 {
239 	struct vmmdev_guest_status *req;
240 	int rc;
241 
242 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
243 			    VBG_KERNEL_REQUEST);
244 	if (!req)
245 		return -ENOMEM;
246 
247 	req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
248 	if (active)
249 		req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
250 	else
251 		req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
252 	req->flags = 0;
253 
254 	rc = vbg_req_perform(gdev, req);
255 	if (rc == VERR_NOT_IMPLEMENTED)	/* Compatibility with older hosts. */
256 		rc = VINF_SUCCESS;
257 
258 	vbg_req_free(req, sizeof(*req));
259 
260 	return vbg_status_code_to_errno(rc);
261 }
262 
263 /**
264  * Inflate the balloon by one chunk. The caller owns the balloon mutex.
265  * Return: 0 or negative errno value.
266  * @gdev:		The Guest extension device.
267  * @chunk_idx:		Index of the chunk.
268  */
vbg_balloon_inflate(struct vbg_dev * gdev,u32 chunk_idx)269 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
270 {
271 	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
272 	struct page **pages;
273 	int i, rc, ret;
274 
275 	pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
276 			      sizeof(*pages),
277 			      GFP_KERNEL | __GFP_NOWARN);
278 	if (!pages)
279 		return -ENOMEM;
280 
281 	req->header.size = sizeof(*req);
282 	req->inflate = true;
283 	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
284 
285 	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
286 		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
287 		if (!pages[i]) {
288 			ret = -ENOMEM;
289 			goto out_error;
290 		}
291 
292 		req->phys_page[i] = page_to_phys(pages[i]);
293 	}
294 
295 	rc = vbg_req_perform(gdev, req);
296 	if (rc < 0) {
297 		vbg_err("%s error, rc: %d\n", __func__, rc);
298 		ret = vbg_status_code_to_errno(rc);
299 		goto out_error;
300 	}
301 
302 	gdev->mem_balloon.pages[chunk_idx] = pages;
303 
304 	return 0;
305 
306 out_error:
307 	while (--i >= 0)
308 		__free_page(pages[i]);
309 	kfree(pages);
310 
311 	return ret;
312 }
313 
314 /**
315  * Deflate the balloon by one chunk. The caller owns the balloon mutex.
316  * Return: 0 or negative errno value.
317  * @gdev:		The Guest extension device.
318  * @chunk_idx:		Index of the chunk.
319  */
vbg_balloon_deflate(struct vbg_dev * gdev,u32 chunk_idx)320 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
321 {
322 	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
323 	struct page **pages = gdev->mem_balloon.pages[chunk_idx];
324 	int i, rc;
325 
326 	req->header.size = sizeof(*req);
327 	req->inflate = false;
328 	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
329 
330 	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
331 		req->phys_page[i] = page_to_phys(pages[i]);
332 
333 	rc = vbg_req_perform(gdev, req);
334 	if (rc < 0) {
335 		vbg_err("%s error, rc: %d\n", __func__, rc);
336 		return vbg_status_code_to_errno(rc);
337 	}
338 
339 	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
340 		__free_page(pages[i]);
341 	kfree(pages);
342 	gdev->mem_balloon.pages[chunk_idx] = NULL;
343 
344 	return 0;
345 }
346 
347 /**
348  * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
349  * the host wants the balloon to be and adjust accordingly.
350  */
vbg_balloon_work(struct work_struct * work)351 static void vbg_balloon_work(struct work_struct *work)
352 {
353 	struct vbg_dev *gdev =
354 		container_of(work, struct vbg_dev, mem_balloon.work);
355 	struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
356 	u32 i, chunks;
357 	int rc, ret;
358 
359 	/*
360 	 * Setting this bit means that we request the value from the host and
361 	 * change the guest memory balloon according to the returned value.
362 	 */
363 	req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
364 	rc = vbg_req_perform(gdev, req);
365 	if (rc < 0) {
366 		vbg_err("%s error, rc: %d)\n", __func__, rc);
367 		return;
368 	}
369 
370 	/*
371 	 * The host always returns the same maximum amount of chunks, so
372 	 * we do this once.
373 	 */
374 	if (!gdev->mem_balloon.max_chunks) {
375 		gdev->mem_balloon.pages =
376 			devm_kcalloc(gdev->dev, req->phys_mem_chunks,
377 				     sizeof(struct page **), GFP_KERNEL);
378 		if (!gdev->mem_balloon.pages)
379 			return;
380 
381 		gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
382 	}
383 
384 	chunks = req->balloon_chunks;
385 	if (chunks > gdev->mem_balloon.max_chunks) {
386 		vbg_err("%s: illegal balloon size %u (max=%u)\n",
387 			__func__, chunks, gdev->mem_balloon.max_chunks);
388 		return;
389 	}
390 
391 	if (chunks > gdev->mem_balloon.chunks) {
392 		/* inflate */
393 		for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
394 			ret = vbg_balloon_inflate(gdev, i);
395 			if (ret < 0)
396 				return;
397 
398 			gdev->mem_balloon.chunks++;
399 		}
400 	} else {
401 		/* deflate */
402 		for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
403 			ret = vbg_balloon_deflate(gdev, i);
404 			if (ret < 0)
405 				return;
406 
407 			gdev->mem_balloon.chunks--;
408 		}
409 	}
410 }
411 
412 /**
413  * Callback for heartbeat timer.
414  */
vbg_heartbeat_timer(struct timer_list * t)415 static void vbg_heartbeat_timer(struct timer_list *t)
416 {
417 	struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
418 
419 	vbg_req_perform(gdev, gdev->guest_heartbeat_req);
420 	mod_timer(&gdev->heartbeat_timer,
421 		  msecs_to_jiffies(gdev->heartbeat_interval_ms));
422 }
423 
424 /**
425  * Configure the host to check guest's heartbeat
426  * and get heartbeat interval from the host.
427  * Return: 0 or negative errno value.
428  * @gdev:		The Guest extension device.
429  * @enabled:		Set true to enable guest heartbeat checks on host.
430  */
vbg_heartbeat_host_config(struct vbg_dev * gdev,bool enabled)431 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
432 {
433 	struct vmmdev_heartbeat *req;
434 	int rc;
435 
436 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
437 			    VBG_KERNEL_REQUEST);
438 	if (!req)
439 		return -ENOMEM;
440 
441 	req->enabled = enabled;
442 	req->interval_ns = 0;
443 	rc = vbg_req_perform(gdev, req);
444 	do_div(req->interval_ns, 1000000); /* ns -> ms */
445 	gdev->heartbeat_interval_ms = req->interval_ns;
446 	vbg_req_free(req, sizeof(*req));
447 
448 	return vbg_status_code_to_errno(rc);
449 }
450 
451 /**
452  * Initializes the heartbeat timer. This feature may be disabled by the host.
453  * Return: 0 or negative errno value.
454  * @gdev:		The Guest extension device.
455  */
vbg_heartbeat_init(struct vbg_dev * gdev)456 static int vbg_heartbeat_init(struct vbg_dev *gdev)
457 {
458 	int ret;
459 
460 	/* Make sure that heartbeat checking is disabled if we fail. */
461 	ret = vbg_heartbeat_host_config(gdev, false);
462 	if (ret < 0)
463 		return ret;
464 
465 	ret = vbg_heartbeat_host_config(gdev, true);
466 	if (ret < 0)
467 		return ret;
468 
469 	gdev->guest_heartbeat_req = vbg_req_alloc(
470 					sizeof(*gdev->guest_heartbeat_req),
471 					VMMDEVREQ_GUEST_HEARTBEAT,
472 					VBG_KERNEL_REQUEST);
473 	if (!gdev->guest_heartbeat_req)
474 		return -ENOMEM;
475 
476 	vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
477 		 __func__, gdev->heartbeat_interval_ms);
478 	mod_timer(&gdev->heartbeat_timer, 0);
479 
480 	return 0;
481 }
482 
483 /**
484  * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
485  * @gdev:		The Guest extension device.
486  */
vbg_heartbeat_exit(struct vbg_dev * gdev)487 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
488 {
489 	del_timer_sync(&gdev->heartbeat_timer);
490 	vbg_heartbeat_host_config(gdev, false);
491 	vbg_req_free(gdev->guest_heartbeat_req,
492 		     sizeof(*gdev->guest_heartbeat_req));
493 }
494 
495 /**
496  * Applies a change to the bit usage tracker.
497  * Return: true if the mask changed, false if not.
498  * @tracker:		The bit usage tracker.
499  * @changed:		The bits to change.
500  * @previous:		The previous value of the bits.
501  */
vbg_track_bit_usage(struct vbg_bit_usage_tracker * tracker,u32 changed,u32 previous)502 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
503 				u32 changed, u32 previous)
504 {
505 	bool global_change = false;
506 
507 	while (changed) {
508 		u32 bit = ffs(changed) - 1;
509 		u32 bitmask = BIT(bit);
510 
511 		if (bitmask & previous) {
512 			tracker->per_bit_usage[bit] -= 1;
513 			if (tracker->per_bit_usage[bit] == 0) {
514 				global_change = true;
515 				tracker->mask &= ~bitmask;
516 			}
517 		} else {
518 			tracker->per_bit_usage[bit] += 1;
519 			if (tracker->per_bit_usage[bit] == 1) {
520 				global_change = true;
521 				tracker->mask |= bitmask;
522 			}
523 		}
524 
525 		changed &= ~bitmask;
526 	}
527 
528 	return global_change;
529 }
530 
531 /**
532  * Init and termination worker for resetting the (host) event filter on the host
533  * Return: 0 or negative errno value.
534  * @gdev:		   The Guest extension device.
535  * @fixed_events:	   Fixed events (init time).
536  */
vbg_reset_host_event_filter(struct vbg_dev * gdev,u32 fixed_events)537 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
538 				       u32 fixed_events)
539 {
540 	struct vmmdev_mask *req;
541 	int rc;
542 
543 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
544 			    VBG_KERNEL_REQUEST);
545 	if (!req)
546 		return -ENOMEM;
547 
548 	req->not_mask = U32_MAX & ~fixed_events;
549 	req->or_mask = fixed_events;
550 	rc = vbg_req_perform(gdev, req);
551 	if (rc < 0)
552 		vbg_err("%s error, rc: %d\n", __func__, rc);
553 
554 	vbg_req_free(req, sizeof(*req));
555 	return vbg_status_code_to_errno(rc);
556 }
557 
558 /**
559  * Changes the event filter mask for the given session.
560  *
561  * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
562  * do session cleanup. Takes the session mutex.
563  *
564  * Return: 0 or negative errno value.
565  * @gdev:			The Guest extension device.
566  * @session:			The session.
567  * @or_mask:			The events to add.
568  * @not_mask:			The events to remove.
569  * @session_termination:	Set if we're called by the session cleanup code.
570  *				This tweaks the error handling so we perform
571  *				proper session cleanup even if the host
572  *				misbehaves.
573  */
vbg_set_session_event_filter(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,bool session_termination)574 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
575 					struct vbg_session *session,
576 					u32 or_mask, u32 not_mask,
577 					bool session_termination)
578 {
579 	struct vmmdev_mask *req;
580 	u32 changed, previous;
581 	int rc, ret = 0;
582 
583 	/*
584 	 * Allocate a request buffer before taking the spinlock, when
585 	 * the session is being terminated the requestor is the kernel,
586 	 * as we're cleaning up.
587 	 */
588 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
589 			    session_termination ? VBG_KERNEL_REQUEST :
590 						  session->requestor);
591 	if (!req) {
592 		if (!session_termination)
593 			return -ENOMEM;
594 		/* Ignore allocation failure, we must do session cleanup. */
595 	}
596 
597 	mutex_lock(&gdev->session_mutex);
598 
599 	/* Apply the changes to the session mask. */
600 	previous = session->event_filter;
601 	session->event_filter |= or_mask;
602 	session->event_filter &= ~not_mask;
603 
604 	/* If anything actually changed, update the global usage counters. */
605 	changed = previous ^ session->event_filter;
606 	if (!changed)
607 		goto out;
608 
609 	vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
610 	or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
611 
612 	if (gdev->event_filter_host == or_mask || !req)
613 		goto out;
614 
615 	gdev->event_filter_host = or_mask;
616 	req->or_mask = or_mask;
617 	req->not_mask = ~or_mask;
618 	rc = vbg_req_perform(gdev, req);
619 	if (rc < 0) {
620 		ret = vbg_status_code_to_errno(rc);
621 
622 		/* Failed, roll back (unless it's session termination time). */
623 		gdev->event_filter_host = U32_MAX;
624 		if (session_termination)
625 			goto out;
626 
627 		vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
628 				    session->event_filter);
629 		session->event_filter = previous;
630 	}
631 
632 out:
633 	mutex_unlock(&gdev->session_mutex);
634 	vbg_req_free(req, sizeof(*req));
635 
636 	return ret;
637 }
638 
639 /**
640  * Init and termination worker for set guest capabilities to zero on the host.
641  * Return: 0 or negative errno value.
642  * @gdev:		The Guest extension device.
643  */
vbg_reset_host_capabilities(struct vbg_dev * gdev)644 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
645 {
646 	struct vmmdev_mask *req;
647 	int rc;
648 
649 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
650 			    VBG_KERNEL_REQUEST);
651 	if (!req)
652 		return -ENOMEM;
653 
654 	req->not_mask = U32_MAX;
655 	req->or_mask = 0;
656 	rc = vbg_req_perform(gdev, req);
657 	if (rc < 0)
658 		vbg_err("%s error, rc: %d\n", __func__, rc);
659 
660 	vbg_req_free(req, sizeof(*req));
661 	return vbg_status_code_to_errno(rc);
662 }
663 
664 /**
665  * Set guest capabilities on the host.
666  * Must be called with gdev->session_mutex hold.
667  * Return: 0 or negative errno value.
668  * @gdev:			The Guest extension device.
669  * @session:			The session.
670  * @session_termination:	Set if we're called by the session cleanup code.
671  */
vbg_set_host_capabilities(struct vbg_dev * gdev,struct vbg_session * session,bool session_termination)672 static int vbg_set_host_capabilities(struct vbg_dev *gdev,
673 				     struct vbg_session *session,
674 				     bool session_termination)
675 {
676 	struct vmmdev_mask *req;
677 	u32 caps;
678 	int rc;
679 
680 	WARN_ON(!mutex_is_locked(&gdev->session_mutex));
681 
682 	caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask;
683 
684 	if (gdev->guest_caps_host == caps)
685 		return 0;
686 
687 	/* On termination the requestor is the kernel, as we're cleaning up. */
688 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
689 			    session_termination ? VBG_KERNEL_REQUEST :
690 						  session->requestor);
691 	if (!req) {
692 		gdev->guest_caps_host = U32_MAX;
693 		return -ENOMEM;
694 	}
695 
696 	req->or_mask = caps;
697 	req->not_mask = ~caps;
698 	rc = vbg_req_perform(gdev, req);
699 	vbg_req_free(req, sizeof(*req));
700 
701 	gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX;
702 
703 	return vbg_status_code_to_errno(rc);
704 }
705 
706 /**
707  * Acquire (get exclusive access) guest capabilities for a session.
708  * Takes the session mutex.
709  * Return: 0 or negative errno value.
710  * @gdev:			The Guest extension device.
711  * @session:			The session.
712  * @flags:			Flags (VBGL_IOC_AGC_FLAGS_XXX).
713  * @or_mask:			The capabilities to add.
714  * @not_mask:			The capabilities to remove.
715  * @session_termination:	Set if we're called by the session cleanup code.
716  *				This tweaks the error handling so we perform
717  *				proper session cleanup even if the host
718  *				misbehaves.
719  */
vbg_acquire_session_capabilities(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,u32 flags,bool session_termination)720 static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
721 					    struct vbg_session *session,
722 					    u32 or_mask, u32 not_mask,
723 					    u32 flags, bool session_termination)
724 {
725 	unsigned long irqflags;
726 	bool wakeup = false;
727 	int ret = 0;
728 
729 	mutex_lock(&gdev->session_mutex);
730 
731 	if (gdev->set_guest_caps_tracker.mask & or_mask) {
732 		vbg_err("%s error: cannot acquire caps which are currently set\n",
733 			__func__);
734 		ret = -EINVAL;
735 		goto out;
736 	}
737 
738 	/*
739 	 * Mark any caps in the or_mask as now being in acquire-mode. Note
740 	 * once caps are in acquire_mode they always stay in this mode.
741 	 * This impacts event handling, so we take the event-lock.
742 	 */
743 	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
744 	gdev->acquire_mode_guest_caps |= or_mask;
745 	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
746 
747 	/* If we only have to switch the caps to acquire mode, we're done. */
748 	if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
749 		goto out;
750 
751 	not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
752 	not_mask &= session->acquired_guest_caps;
753 	or_mask &= ~session->acquired_guest_caps;
754 
755 	if (or_mask == 0 && not_mask == 0)
756 		goto out;
757 
758 	if (gdev->acquired_guest_caps & or_mask) {
759 		ret = -EBUSY;
760 		goto out;
761 	}
762 
763 	gdev->acquired_guest_caps |= or_mask;
764 	gdev->acquired_guest_caps &= ~not_mask;
765 	/* session->acquired_guest_caps impacts event handling, take the lock */
766 	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
767 	session->acquired_guest_caps |= or_mask;
768 	session->acquired_guest_caps &= ~not_mask;
769 	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
770 
771 	ret = vbg_set_host_capabilities(gdev, session, session_termination);
772 	/* Roll back on failure, unless it's session termination time. */
773 	if (ret < 0 && !session_termination) {
774 		gdev->acquired_guest_caps &= ~or_mask;
775 		gdev->acquired_guest_caps |= not_mask;
776 		spin_lock_irqsave(&gdev->event_spinlock, irqflags);
777 		session->acquired_guest_caps &= ~or_mask;
778 		session->acquired_guest_caps |= not_mask;
779 		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
780 	}
781 
782 	/*
783 	 * If we added a capability, check if that means some other thread in
784 	 * our session should be unblocked because there are events pending
785 	 * (the result of vbg_get_allowed_event_mask_for_session() may change).
786 	 *
787 	 * HACK ALERT! When the seamless support capability is added we generate
788 	 *	a seamless change event so that the ring-3 client can sync with
789 	 *	the seamless state.
790 	 */
791 	if (ret == 0 && or_mask != 0) {
792 		spin_lock_irqsave(&gdev->event_spinlock, irqflags);
793 
794 		if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
795 			gdev->pending_events |=
796 				VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
797 
798 		if (gdev->pending_events)
799 			wakeup = true;
800 
801 		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
802 
803 		if (wakeup)
804 			wake_up(&gdev->event_wq);
805 	}
806 
807 out:
808 	mutex_unlock(&gdev->session_mutex);
809 
810 	return ret;
811 }
812 
813 /**
814  * Sets the guest capabilities for a session. Takes the session mutex.
815  * Return: 0 or negative errno value.
816  * @gdev:			The Guest extension device.
817  * @session:			The session.
818  * @or_mask:			The capabilities to add.
819  * @not_mask:			The capabilities to remove.
820  * @session_termination:	Set if we're called by the session cleanup code.
821  *				This tweaks the error handling so we perform
822  *				proper session cleanup even if the host
823  *				misbehaves.
824  */
vbg_set_session_capabilities(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,bool session_termination)825 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
826 					struct vbg_session *session,
827 					u32 or_mask, u32 not_mask,
828 					bool session_termination)
829 {
830 	u32 changed, previous;
831 	int ret = 0;
832 
833 	mutex_lock(&gdev->session_mutex);
834 
835 	if (gdev->acquire_mode_guest_caps & or_mask) {
836 		vbg_err("%s error: cannot set caps which are in acquire_mode\n",
837 			__func__);
838 		ret = -EBUSY;
839 		goto out;
840 	}
841 
842 	/* Apply the changes to the session mask. */
843 	previous = session->set_guest_caps;
844 	session->set_guest_caps |= or_mask;
845 	session->set_guest_caps &= ~not_mask;
846 
847 	/* If anything actually changed, update the global usage counters. */
848 	changed = previous ^ session->set_guest_caps;
849 	if (!changed)
850 		goto out;
851 
852 	vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
853 
854 	ret = vbg_set_host_capabilities(gdev, session, session_termination);
855 	/* Roll back on failure, unless it's session termination time. */
856 	if (ret < 0 && !session_termination) {
857 		vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
858 				    session->set_guest_caps);
859 		session->set_guest_caps = previous;
860 	}
861 
862 out:
863 	mutex_unlock(&gdev->session_mutex);
864 
865 	return ret;
866 }
867 
868 /**
869  * vbg_query_host_version get the host feature mask and version information.
870  * Return: 0 or negative errno value.
871  * @gdev:		The Guest extension device.
872  */
vbg_query_host_version(struct vbg_dev * gdev)873 static int vbg_query_host_version(struct vbg_dev *gdev)
874 {
875 	struct vmmdev_host_version *req;
876 	int rc, ret;
877 
878 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
879 			    VBG_KERNEL_REQUEST);
880 	if (!req)
881 		return -ENOMEM;
882 
883 	rc = vbg_req_perform(gdev, req);
884 	ret = vbg_status_code_to_errno(rc);
885 	if (ret) {
886 		vbg_err("%s error: %d\n", __func__, rc);
887 		goto out;
888 	}
889 
890 	snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
891 		 req->major, req->minor, req->build, req->revision);
892 	gdev->host_features = req->features;
893 
894 	vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
895 		 gdev->host_features);
896 
897 	if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
898 		vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
899 		ret = -ENODEV;
900 	}
901 
902 out:
903 	vbg_req_free(req, sizeof(*req));
904 	return ret;
905 }
906 
907 /**
908  * Initializes the VBoxGuest device extension when the
909  * device driver is loaded.
910  *
911  * The native code locates the VMMDev on the PCI bus and retrieve
912  * the MMIO and I/O port ranges, this function will take care of
913  * mapping the MMIO memory (if present). Upon successful return
914  * the native code should set up the interrupt handler.
915  *
916  * Return: 0 or negative errno value.
917  *
918  * @gdev:		The Guest extension device.
919  * @fixed_events:	Events that will be enabled upon init and no client
920  *			will ever be allowed to mask.
921  */
vbg_core_init(struct vbg_dev * gdev,u32 fixed_events)922 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
923 {
924 	int ret = -ENOMEM;
925 
926 	gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
927 	gdev->event_filter_host = U32_MAX;	/* forces a report */
928 	gdev->guest_caps_host = U32_MAX;	/* forces a report */
929 
930 	init_waitqueue_head(&gdev->event_wq);
931 	init_waitqueue_head(&gdev->hgcm_wq);
932 	spin_lock_init(&gdev->event_spinlock);
933 	mutex_init(&gdev->session_mutex);
934 	mutex_init(&gdev->cancel_req_mutex);
935 	timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
936 	INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
937 
938 	gdev->mem_balloon.get_req =
939 		vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
940 			      VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
941 			      VBG_KERNEL_REQUEST);
942 	gdev->mem_balloon.change_req =
943 		vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
944 			      VMMDEVREQ_CHANGE_MEMBALLOON,
945 			      VBG_KERNEL_REQUEST);
946 	gdev->cancel_req =
947 		vbg_req_alloc(sizeof(*(gdev->cancel_req)),
948 			      VMMDEVREQ_HGCM_CANCEL2,
949 			      VBG_KERNEL_REQUEST);
950 	gdev->ack_events_req =
951 		vbg_req_alloc(sizeof(*gdev->ack_events_req),
952 			      VMMDEVREQ_ACKNOWLEDGE_EVENTS,
953 			      VBG_KERNEL_REQUEST);
954 	gdev->mouse_status_req =
955 		vbg_req_alloc(sizeof(*gdev->mouse_status_req),
956 			      VMMDEVREQ_GET_MOUSE_STATUS,
957 			      VBG_KERNEL_REQUEST);
958 
959 	if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
960 	    !gdev->cancel_req || !gdev->ack_events_req ||
961 	    !gdev->mouse_status_req)
962 		goto err_free_reqs;
963 
964 	ret = vbg_query_host_version(gdev);
965 	if (ret)
966 		goto err_free_reqs;
967 
968 	ret = vbg_report_guest_info(gdev);
969 	if (ret) {
970 		vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
971 		goto err_free_reqs;
972 	}
973 
974 	ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
975 	if (ret) {
976 		vbg_err("vboxguest: Error setting fixed event filter: %d\n",
977 			ret);
978 		goto err_free_reqs;
979 	}
980 
981 	ret = vbg_reset_host_capabilities(gdev);
982 	if (ret) {
983 		vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
984 			ret);
985 		goto err_free_reqs;
986 	}
987 
988 	ret = vbg_core_set_mouse_status(gdev, 0);
989 	if (ret) {
990 		vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
991 		goto err_free_reqs;
992 	}
993 
994 	/* These may fail without requiring the driver init to fail. */
995 	vbg_guest_mappings_init(gdev);
996 	vbg_heartbeat_init(gdev);
997 
998 	/* All Done! */
999 	ret = vbg_report_driver_status(gdev, true);
1000 	if (ret < 0)
1001 		vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
1002 
1003 	return 0;
1004 
1005 err_free_reqs:
1006 	vbg_req_free(gdev->mouse_status_req,
1007 		     sizeof(*gdev->mouse_status_req));
1008 	vbg_req_free(gdev->ack_events_req,
1009 		     sizeof(*gdev->ack_events_req));
1010 	vbg_req_free(gdev->cancel_req,
1011 		     sizeof(*gdev->cancel_req));
1012 	vbg_req_free(gdev->mem_balloon.change_req,
1013 		     sizeof(*gdev->mem_balloon.change_req));
1014 	vbg_req_free(gdev->mem_balloon.get_req,
1015 		     sizeof(*gdev->mem_balloon.get_req));
1016 	return ret;
1017 }
1018 
1019 /**
1020  * Call this on exit to clean-up vboxguest-core managed resources.
1021  *
1022  * The native code should call this before the driver is loaded,
1023  * but don't call this on shutdown.
1024  * @gdev:		The Guest extension device.
1025  */
vbg_core_exit(struct vbg_dev * gdev)1026 void vbg_core_exit(struct vbg_dev *gdev)
1027 {
1028 	vbg_heartbeat_exit(gdev);
1029 	vbg_guest_mappings_exit(gdev);
1030 
1031 	/* Clear the host flags (mouse status etc). */
1032 	vbg_reset_host_event_filter(gdev, 0);
1033 	vbg_reset_host_capabilities(gdev);
1034 	vbg_core_set_mouse_status(gdev, 0);
1035 
1036 	vbg_req_free(gdev->mouse_status_req,
1037 		     sizeof(*gdev->mouse_status_req));
1038 	vbg_req_free(gdev->ack_events_req,
1039 		     sizeof(*gdev->ack_events_req));
1040 	vbg_req_free(gdev->cancel_req,
1041 		     sizeof(*gdev->cancel_req));
1042 	vbg_req_free(gdev->mem_balloon.change_req,
1043 		     sizeof(*gdev->mem_balloon.change_req));
1044 	vbg_req_free(gdev->mem_balloon.get_req,
1045 		     sizeof(*gdev->mem_balloon.get_req));
1046 }
1047 
1048 /**
1049  * Creates a VBoxGuest user session.
1050  *
1051  * vboxguest_linux.c calls this when userspace opens the char-device.
1052  * Return: A pointer to the new session or an ERR_PTR on error.
1053  * @gdev:		The Guest extension device.
1054  * @requestor:		VMMDEV_REQUESTOR_* flags
1055  */
vbg_core_open_session(struct vbg_dev * gdev,u32 requestor)1056 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
1057 {
1058 	struct vbg_session *session;
1059 
1060 	session = kzalloc(sizeof(*session), GFP_KERNEL);
1061 	if (!session)
1062 		return ERR_PTR(-ENOMEM);
1063 
1064 	session->gdev = gdev;
1065 	session->requestor = requestor;
1066 
1067 	return session;
1068 }
1069 
1070 /**
1071  * Closes a VBoxGuest session.
1072  * @session:		The session to close (and free).
1073  */
vbg_core_close_session(struct vbg_session * session)1074 void vbg_core_close_session(struct vbg_session *session)
1075 {
1076 	struct vbg_dev *gdev = session->gdev;
1077 	int i, rc;
1078 
1079 	vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true);
1080 	vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
1081 	vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
1082 
1083 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1084 		if (!session->hgcm_client_ids[i])
1085 			continue;
1086 
1087 		/* requestor is kernel here, as we're cleaning up. */
1088 		vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
1089 				    session->hgcm_client_ids[i], &rc);
1090 	}
1091 
1092 	kfree(session);
1093 }
1094 
vbg_ioctl_chk(struct vbg_ioctl_hdr * hdr,size_t in_size,size_t out_size)1095 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
1096 			 size_t out_size)
1097 {
1098 	if (hdr->size_in  != (sizeof(*hdr) + in_size) ||
1099 	    hdr->size_out != (sizeof(*hdr) + out_size))
1100 		return -EINVAL;
1101 
1102 	return 0;
1103 }
1104 
vbg_ioctl_driver_version_info(struct vbg_ioctl_driver_version_info * info)1105 static int vbg_ioctl_driver_version_info(
1106 	struct vbg_ioctl_driver_version_info *info)
1107 {
1108 	const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
1109 	u16 min_maj_version, req_maj_version;
1110 
1111 	if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
1112 		return -EINVAL;
1113 
1114 	req_maj_version = info->u.in.req_version >> 16;
1115 	min_maj_version = info->u.in.min_version >> 16;
1116 
1117 	if (info->u.in.min_version > info->u.in.req_version ||
1118 	    min_maj_version != req_maj_version)
1119 		return -EINVAL;
1120 
1121 	if (info->u.in.min_version <= VBG_IOC_VERSION &&
1122 	    min_maj_version == vbg_maj_version) {
1123 		info->u.out.session_version = VBG_IOC_VERSION;
1124 	} else {
1125 		info->u.out.session_version = U32_MAX;
1126 		info->hdr.rc = VERR_VERSION_MISMATCH;
1127 	}
1128 
1129 	info->u.out.driver_version  = VBG_IOC_VERSION;
1130 	info->u.out.driver_revision = 0;
1131 	info->u.out.reserved1      = 0;
1132 	info->u.out.reserved2      = 0;
1133 
1134 	return 0;
1135 }
1136 
1137 /* Must be called with the event_lock held */
vbg_get_allowed_event_mask_for_session(struct vbg_dev * gdev,struct vbg_session * session)1138 static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev,
1139 						  struct vbg_session *session)
1140 {
1141 	u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
1142 	u32 session_acquired_caps = session->acquired_guest_caps;
1143 	u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
1144 
1145 	if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) &&
1146 	    !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
1147 		allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
1148 
1149 	if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) &&
1150 	    !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
1151 		allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
1152 
1153 	return allowed_events;
1154 }
1155 
vbg_wait_event_cond(struct vbg_dev * gdev,struct vbg_session * session,u32 event_mask)1156 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
1157 				struct vbg_session *session,
1158 				u32 event_mask)
1159 {
1160 	unsigned long flags;
1161 	bool wakeup;
1162 	u32 events;
1163 
1164 	spin_lock_irqsave(&gdev->event_spinlock, flags);
1165 
1166 	events = gdev->pending_events & event_mask;
1167 	events &= vbg_get_allowed_event_mask_for_session(gdev, session);
1168 	wakeup = events || session->cancel_waiters;
1169 
1170 	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1171 
1172 	return wakeup;
1173 }
1174 
1175 /* Must be called with the event_lock held */
vbg_consume_events_locked(struct vbg_dev * gdev,struct vbg_session * session,u32 event_mask)1176 static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
1177 				     struct vbg_session *session,
1178 				     u32 event_mask)
1179 {
1180 	u32 events = gdev->pending_events & event_mask;
1181 
1182 	events &= vbg_get_allowed_event_mask_for_session(gdev, session);
1183 	gdev->pending_events &= ~events;
1184 	return events;
1185 }
1186 
vbg_ioctl_wait_for_events(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_wait_for_events * wait)1187 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1188 				     struct vbg_session *session,
1189 				     struct vbg_ioctl_wait_for_events *wait)
1190 {
1191 	u32 timeout_ms = wait->u.in.timeout_ms;
1192 	u32 event_mask = wait->u.in.events;
1193 	unsigned long flags;
1194 	long timeout;
1195 	int ret = 0;
1196 
1197 	if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1198 		return -EINVAL;
1199 
1200 	if (timeout_ms == U32_MAX)
1201 		timeout = MAX_SCHEDULE_TIMEOUT;
1202 	else
1203 		timeout = msecs_to_jiffies(timeout_ms);
1204 
1205 	wait->u.out.events = 0;
1206 	do {
1207 		timeout = wait_event_interruptible_timeout(
1208 				gdev->event_wq,
1209 				vbg_wait_event_cond(gdev, session, event_mask),
1210 				timeout);
1211 
1212 		spin_lock_irqsave(&gdev->event_spinlock, flags);
1213 
1214 		if (timeout < 0 || session->cancel_waiters) {
1215 			ret = -EINTR;
1216 		} else if (timeout == 0) {
1217 			ret = -ETIMEDOUT;
1218 		} else {
1219 			wait->u.out.events =
1220 			   vbg_consume_events_locked(gdev, session, event_mask);
1221 		}
1222 
1223 		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1224 
1225 		/*
1226 		 * Someone else may have consumed the event(s) first, in
1227 		 * which case we go back to waiting.
1228 		 */
1229 	} while (ret == 0 && wait->u.out.events == 0);
1230 
1231 	return ret;
1232 }
1233 
vbg_ioctl_interrupt_all_wait_events(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hdr * hdr)1234 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1235 					       struct vbg_session *session,
1236 					       struct vbg_ioctl_hdr *hdr)
1237 {
1238 	unsigned long flags;
1239 
1240 	if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1241 		return -EINVAL;
1242 
1243 	spin_lock_irqsave(&gdev->event_spinlock, flags);
1244 	session->cancel_waiters = true;
1245 	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1246 
1247 	wake_up(&gdev->event_wq);
1248 
1249 	return 0;
1250 }
1251 
1252 /**
1253  * Checks if the VMM request is allowed in the context of the given session.
1254  * Return: 0 or negative errno value.
1255  * @gdev:		The Guest extension device.
1256  * @session:		The calling session.
1257  * @req:		The request.
1258  */
vbg_req_allowed(struct vbg_dev * gdev,struct vbg_session * session,const struct vmmdev_request_header * req)1259 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1260 			   const struct vmmdev_request_header *req)
1261 {
1262 	const struct vmmdev_guest_status *guest_status;
1263 	bool trusted_apps_only;
1264 
1265 	switch (req->request_type) {
1266 	/* Trusted users apps only. */
1267 	case VMMDEVREQ_QUERY_CREDENTIALS:
1268 	case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1269 	case VMMDEVREQ_REGISTER_SHARED_MODULE:
1270 	case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1271 	case VMMDEVREQ_WRITE_COREDUMP:
1272 	case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1273 	case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1274 	case VMMDEVREQ_CHECK_SHARED_MODULES:
1275 	case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1276 	case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1277 	case VMMDEVREQ_REPORT_GUEST_STATS:
1278 	case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1279 	case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1280 		trusted_apps_only = true;
1281 		break;
1282 
1283 	/* Anyone. */
1284 	case VMMDEVREQ_GET_MOUSE_STATUS:
1285 	case VMMDEVREQ_SET_MOUSE_STATUS:
1286 	case VMMDEVREQ_SET_POINTER_SHAPE:
1287 	case VMMDEVREQ_GET_HOST_VERSION:
1288 	case VMMDEVREQ_IDLE:
1289 	case VMMDEVREQ_GET_HOST_TIME:
1290 	case VMMDEVREQ_SET_POWER_STATUS:
1291 	case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1292 	case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1293 	case VMMDEVREQ_REPORT_GUEST_STATUS:
1294 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1295 	case VMMDEVREQ_VIDEMODE_SUPPORTED:
1296 	case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1297 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1298 	case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1299 	case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1300 	case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1301 	case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1302 	case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS:
1303 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1304 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI:
1305 	case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1306 	case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1307 	case VMMDEVREQ_LOG_STRING:
1308 	case VMMDEVREQ_GET_SESSION_ID:
1309 		trusted_apps_only = false;
1310 		break;
1311 
1312 	/* Depends on the request parameters... */
1313 	case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1314 		guest_status = (const struct vmmdev_guest_status *)req;
1315 		switch (guest_status->facility) {
1316 		case VBOXGUEST_FACILITY_TYPE_ALL:
1317 		case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1318 			vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1319 				guest_status->facility);
1320 			return -EPERM;
1321 		case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1322 			trusted_apps_only = true;
1323 			break;
1324 		case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1325 		case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1326 		case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1327 		default:
1328 			trusted_apps_only = false;
1329 			break;
1330 		}
1331 		break;
1332 
1333 	/* Anything else is not allowed. */
1334 	default:
1335 		vbg_err("Denying userspace vmm call type %#08x\n",
1336 			req->request_type);
1337 		return -EPERM;
1338 	}
1339 
1340 	if (trusted_apps_only &&
1341 	    (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1342 		vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1343 			req->request_type);
1344 		return -EPERM;
1345 	}
1346 
1347 	return 0;
1348 }
1349 
vbg_ioctl_vmmrequest(struct vbg_dev * gdev,struct vbg_session * session,void * data)1350 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1351 				struct vbg_session *session, void *data)
1352 {
1353 	struct vbg_ioctl_hdr *hdr = data;
1354 	int ret;
1355 
1356 	if (hdr->size_in != hdr->size_out)
1357 		return -EINVAL;
1358 
1359 	if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1360 		return -E2BIG;
1361 
1362 	if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1363 		return -EINVAL;
1364 
1365 	ret = vbg_req_allowed(gdev, session, data);
1366 	if (ret < 0)
1367 		return ret;
1368 
1369 	vbg_req_perform(gdev, data);
1370 	WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1371 
1372 	return 0;
1373 }
1374 
vbg_ioctl_hgcm_connect(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hgcm_connect * conn)1375 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1376 				  struct vbg_session *session,
1377 				  struct vbg_ioctl_hgcm_connect *conn)
1378 {
1379 	u32 client_id;
1380 	int i, ret;
1381 
1382 	if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1383 		return -EINVAL;
1384 
1385 	/* Find a free place in the sessions clients array and claim it */
1386 	mutex_lock(&gdev->session_mutex);
1387 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1388 		if (!session->hgcm_client_ids[i]) {
1389 			session->hgcm_client_ids[i] = U32_MAX;
1390 			break;
1391 		}
1392 	}
1393 	mutex_unlock(&gdev->session_mutex);
1394 
1395 	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1396 		return -EMFILE;
1397 
1398 	ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1399 			       &client_id, &conn->hdr.rc);
1400 
1401 	mutex_lock(&gdev->session_mutex);
1402 	if (ret == 0 && conn->hdr.rc >= 0) {
1403 		conn->u.out.client_id = client_id;
1404 		session->hgcm_client_ids[i] = client_id;
1405 	} else {
1406 		conn->u.out.client_id = 0;
1407 		session->hgcm_client_ids[i] = 0;
1408 	}
1409 	mutex_unlock(&gdev->session_mutex);
1410 
1411 	return ret;
1412 }
1413 
vbg_ioctl_hgcm_disconnect(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hgcm_disconnect * disconn)1414 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1415 				     struct vbg_session *session,
1416 				     struct vbg_ioctl_hgcm_disconnect *disconn)
1417 {
1418 	u32 client_id;
1419 	int i, ret;
1420 
1421 	if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1422 		return -EINVAL;
1423 
1424 	client_id = disconn->u.in.client_id;
1425 	if (client_id == 0 || client_id == U32_MAX)
1426 		return -EINVAL;
1427 
1428 	mutex_lock(&gdev->session_mutex);
1429 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1430 		if (session->hgcm_client_ids[i] == client_id) {
1431 			session->hgcm_client_ids[i] = U32_MAX;
1432 			break;
1433 		}
1434 	}
1435 	mutex_unlock(&gdev->session_mutex);
1436 
1437 	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1438 		return -EINVAL;
1439 
1440 	ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1441 				  &disconn->hdr.rc);
1442 
1443 	mutex_lock(&gdev->session_mutex);
1444 	if (ret == 0 && disconn->hdr.rc >= 0)
1445 		session->hgcm_client_ids[i] = 0;
1446 	else
1447 		session->hgcm_client_ids[i] = client_id;
1448 	mutex_unlock(&gdev->session_mutex);
1449 
1450 	return ret;
1451 }
1452 
vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)1453 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1454 {
1455 	switch (type) {
1456 	case VMMDEV_HGCM_PARM_TYPE_32BIT:
1457 	case VMMDEV_HGCM_PARM_TYPE_64BIT:
1458 	case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1459 	case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1460 	case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1461 		return true;
1462 	default:
1463 		return false;
1464 	}
1465 }
1466 
vbg_ioctl_hgcm_call(struct vbg_dev * gdev,struct vbg_session * session,bool f32bit,struct vbg_ioctl_hgcm_call * call)1467 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1468 			       struct vbg_session *session, bool f32bit,
1469 			       struct vbg_ioctl_hgcm_call *call)
1470 {
1471 	size_t actual_size;
1472 	u32 client_id;
1473 	int i, ret;
1474 
1475 	if (call->hdr.size_in < sizeof(*call))
1476 		return -EINVAL;
1477 
1478 	if (call->hdr.size_in != call->hdr.size_out)
1479 		return -EINVAL;
1480 
1481 	if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1482 		return -E2BIG;
1483 
1484 	client_id = call->client_id;
1485 	if (client_id == 0 || client_id == U32_MAX)
1486 		return -EINVAL;
1487 
1488 	actual_size = sizeof(*call);
1489 	if (f32bit)
1490 		actual_size += call->parm_count *
1491 			       sizeof(struct vmmdev_hgcm_function_parameter32);
1492 	else
1493 		actual_size += call->parm_count *
1494 			       sizeof(struct vmmdev_hgcm_function_parameter);
1495 	if (call->hdr.size_in < actual_size) {
1496 		vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1497 			  call->hdr.size_in, actual_size);
1498 		return -EINVAL;
1499 	}
1500 	call->hdr.size_out = actual_size;
1501 
1502 	/* Validate parameter types */
1503 	if (f32bit) {
1504 		struct vmmdev_hgcm_function_parameter32 *parm =
1505 			VBG_IOCTL_HGCM_CALL_PARMS32(call);
1506 
1507 		for (i = 0; i < call->parm_count; i++)
1508 			if (!vbg_param_valid(parm[i].type))
1509 				return -EINVAL;
1510 	} else {
1511 		struct vmmdev_hgcm_function_parameter *parm =
1512 			VBG_IOCTL_HGCM_CALL_PARMS(call);
1513 
1514 		for (i = 0; i < call->parm_count; i++)
1515 			if (!vbg_param_valid(parm[i].type))
1516 				return -EINVAL;
1517 	}
1518 
1519 	/*
1520 	 * Validate the client id.
1521 	 */
1522 	mutex_lock(&gdev->session_mutex);
1523 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1524 		if (session->hgcm_client_ids[i] == client_id)
1525 			break;
1526 	mutex_unlock(&gdev->session_mutex);
1527 	if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1528 		vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1529 			  client_id);
1530 		return -EINVAL;
1531 	}
1532 
1533 	if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1534 		ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1535 				      call->function, call->timeout_ms,
1536 				      VBG_IOCTL_HGCM_CALL_PARMS32(call),
1537 				      call->parm_count, &call->hdr.rc);
1538 	else
1539 		ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1540 				    call->function, call->timeout_ms,
1541 				    VBG_IOCTL_HGCM_CALL_PARMS(call),
1542 				    call->parm_count, &call->hdr.rc);
1543 
1544 	if (ret == -E2BIG) {
1545 		/* E2BIG needs to be reported through the hdr.rc field. */
1546 		call->hdr.rc = VERR_OUT_OF_RANGE;
1547 		ret = 0;
1548 	}
1549 
1550 	if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1551 		vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1552 
1553 	return ret;
1554 }
1555 
vbg_ioctl_log(struct vbg_ioctl_log * log)1556 static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1557 {
1558 	if (log->hdr.size_out != sizeof(log->hdr))
1559 		return -EINVAL;
1560 
1561 	vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1562 		 log->u.in.msg);
1563 
1564 	return 0;
1565 }
1566 
vbg_ioctl_change_filter_mask(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_change_filter * filter)1567 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1568 					struct vbg_session *session,
1569 					struct vbg_ioctl_change_filter *filter)
1570 {
1571 	u32 or_mask, not_mask;
1572 
1573 	if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1574 		return -EINVAL;
1575 
1576 	or_mask = filter->u.in.or_mask;
1577 	not_mask = filter->u.in.not_mask;
1578 
1579 	if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1580 		return -EINVAL;
1581 
1582 	return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1583 					    false);
1584 }
1585 
vbg_ioctl_acquire_guest_capabilities(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_acquire_guest_caps * caps)1586 static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev,
1587 	     struct vbg_session *session,
1588 	     struct vbg_ioctl_acquire_guest_caps *caps)
1589 {
1590 	u32 flags, or_mask, not_mask;
1591 
1592 	if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0))
1593 		return -EINVAL;
1594 
1595 	flags = caps->u.in.flags;
1596 	or_mask = caps->u.in.or_mask;
1597 	not_mask = caps->u.in.not_mask;
1598 
1599 	if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK)
1600 		return -EINVAL;
1601 
1602 	if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
1603 		return -EINVAL;
1604 
1605 	return vbg_acquire_session_capabilities(gdev, session, or_mask,
1606 						not_mask, flags, false);
1607 }
1608 
vbg_ioctl_change_guest_capabilities(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_set_guest_caps * caps)1609 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1610 	     struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1611 {
1612 	u32 or_mask, not_mask;
1613 	int ret;
1614 
1615 	if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1616 		return -EINVAL;
1617 
1618 	or_mask = caps->u.in.or_mask;
1619 	not_mask = caps->u.in.not_mask;
1620 
1621 	if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
1622 		return -EINVAL;
1623 
1624 	ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1625 					   false);
1626 	if (ret)
1627 		return ret;
1628 
1629 	caps->u.out.session_caps = session->set_guest_caps;
1630 	caps->u.out.global_caps = gdev->guest_caps_host;
1631 
1632 	return 0;
1633 }
1634 
vbg_ioctl_check_balloon(struct vbg_dev * gdev,struct vbg_ioctl_check_balloon * balloon_info)1635 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1636 				   struct vbg_ioctl_check_balloon *balloon_info)
1637 {
1638 	if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1639 		return -EINVAL;
1640 
1641 	balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1642 	/*
1643 	 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1644 	 * events entirely in the kernel, see vbg_core_isr().
1645 	 */
1646 	balloon_info->u.out.handle_in_r3 = false;
1647 
1648 	return 0;
1649 }
1650 
vbg_ioctl_write_core_dump(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_write_coredump * dump)1651 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1652 				     struct vbg_session *session,
1653 				     struct vbg_ioctl_write_coredump *dump)
1654 {
1655 	struct vmmdev_write_core_dump *req;
1656 
1657 	if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1658 		return -EINVAL;
1659 
1660 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1661 			    session->requestor);
1662 	if (!req)
1663 		return -ENOMEM;
1664 
1665 	req->flags = dump->u.in.flags;
1666 	dump->hdr.rc = vbg_req_perform(gdev, req);
1667 
1668 	vbg_req_free(req, sizeof(*req));
1669 	return 0;
1670 }
1671 
1672 /**
1673  * Common IOCtl for user to kernel communication.
1674  * Return: 0 or negative errno value.
1675  * @session:	The client session.
1676  * @req:	The requested function.
1677  * @data:	The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1678  */
vbg_core_ioctl(struct vbg_session * session,unsigned int req,void * data)1679 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1680 {
1681 	unsigned int req_no_size = req & ~IOCSIZE_MASK;
1682 	struct vbg_dev *gdev = session->gdev;
1683 	struct vbg_ioctl_hdr *hdr = data;
1684 	bool f32bit = false;
1685 
1686 	hdr->rc = VINF_SUCCESS;
1687 	if (!hdr->size_out)
1688 		hdr->size_out = hdr->size_in;
1689 
1690 	/*
1691 	 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1692 	 * already checked by vbg_misc_device_ioctl().
1693 	 */
1694 
1695 	/* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1696 	if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1697 	    req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
1698 	    req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
1699 		return vbg_ioctl_vmmrequest(gdev, session, data);
1700 
1701 	if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1702 		return -EINVAL;
1703 
1704 	/* Fixed size requests. */
1705 	switch (req) {
1706 	case VBG_IOCTL_DRIVER_VERSION_INFO:
1707 		return vbg_ioctl_driver_version_info(data);
1708 	case VBG_IOCTL_HGCM_CONNECT:
1709 		return vbg_ioctl_hgcm_connect(gdev, session, data);
1710 	case VBG_IOCTL_HGCM_DISCONNECT:
1711 		return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1712 	case VBG_IOCTL_WAIT_FOR_EVENTS:
1713 		return vbg_ioctl_wait_for_events(gdev, session, data);
1714 	case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1715 		return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1716 	case VBG_IOCTL_CHANGE_FILTER_MASK:
1717 		return vbg_ioctl_change_filter_mask(gdev, session, data);
1718 	case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
1719 		return vbg_ioctl_acquire_guest_capabilities(gdev, session, data);
1720 	case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1721 		return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1722 	case VBG_IOCTL_CHECK_BALLOON:
1723 		return vbg_ioctl_check_balloon(gdev, data);
1724 	case VBG_IOCTL_WRITE_CORE_DUMP:
1725 		return vbg_ioctl_write_core_dump(gdev, session, data);
1726 	}
1727 
1728 	/* Variable sized requests. */
1729 	switch (req_no_size) {
1730 #ifdef CONFIG_COMPAT
1731 	case VBG_IOCTL_HGCM_CALL_32(0):
1732 		f32bit = true;
1733 		fallthrough;
1734 #endif
1735 	case VBG_IOCTL_HGCM_CALL(0):
1736 		return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1737 	case VBG_IOCTL_LOG(0):
1738 	case VBG_IOCTL_LOG_ALT(0):
1739 		return vbg_ioctl_log(data);
1740 	}
1741 
1742 	vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req);
1743 	return -ENOTTY;
1744 }
1745 
1746 /**
1747  * Report guest supported mouse-features to the host.
1748  *
1749  * Return: 0 or negative errno value.
1750  * @gdev:		The Guest extension device.
1751  * @features:		The set of features to report to the host.
1752  */
vbg_core_set_mouse_status(struct vbg_dev * gdev,u32 features)1753 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1754 {
1755 	struct vmmdev_mouse_status *req;
1756 	int rc;
1757 
1758 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1759 			    VBG_KERNEL_REQUEST);
1760 	if (!req)
1761 		return -ENOMEM;
1762 
1763 	req->mouse_features = features;
1764 	req->pointer_pos_x = 0;
1765 	req->pointer_pos_y = 0;
1766 
1767 	rc = vbg_req_perform(gdev, req);
1768 	if (rc < 0)
1769 		vbg_err("%s error, rc: %d\n", __func__, rc);
1770 
1771 	vbg_req_free(req, sizeof(*req));
1772 	return vbg_status_code_to_errno(rc);
1773 }
1774 
1775 /** Core interrupt service routine. */
vbg_core_isr(int irq,void * dev_id)1776 irqreturn_t vbg_core_isr(int irq, void *dev_id)
1777 {
1778 	struct vbg_dev *gdev = dev_id;
1779 	struct vmmdev_events *req = gdev->ack_events_req;
1780 	bool mouse_position_changed = false;
1781 	unsigned long flags;
1782 	u32 events = 0;
1783 	int rc;
1784 
1785 	if (!gdev->mmio->V.V1_04.have_events)
1786 		return IRQ_NONE;
1787 
1788 	/* Get and acknowlegde events. */
1789 	req->header.rc = VERR_INTERNAL_ERROR;
1790 	req->events = 0;
1791 	rc = vbg_req_perform(gdev, req);
1792 	if (rc < 0) {
1793 		vbg_err("Error performing events req, rc: %d\n", rc);
1794 		return IRQ_NONE;
1795 	}
1796 
1797 	events = req->events;
1798 
1799 	if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1800 		mouse_position_changed = true;
1801 		events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1802 	}
1803 
1804 	if (events & VMMDEV_EVENT_HGCM) {
1805 		wake_up(&gdev->hgcm_wq);
1806 		events &= ~VMMDEV_EVENT_HGCM;
1807 	}
1808 
1809 	if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1810 		schedule_work(&gdev->mem_balloon.work);
1811 		events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1812 	}
1813 
1814 	if (events) {
1815 		spin_lock_irqsave(&gdev->event_spinlock, flags);
1816 		gdev->pending_events |= events;
1817 		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1818 
1819 		wake_up(&gdev->event_wq);
1820 	}
1821 
1822 	if (mouse_position_changed)
1823 		vbg_linux_mouse_event(gdev);
1824 
1825 	return IRQ_HANDLED;
1826 }
1827