• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/android/staging/vsoc.c
3  *
4  * Android Virtual System on a Chip (VSoC) driver
5  *
6  * Copyright (C) 2017 Google, Inc.
7  *
8  * Author: ghartman@google.com
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  *
20  * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
21  *         Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca>
22  *
23  * Based on cirrusfb.c and 8139cp.c:
24  *   Copyright 1999-2001 Jeff Garzik
25  *   Copyright 2001-2004 Jeff Garzik
26  */
27 
28 #include <linux/dma-mapping.h>
29 #include <linux/freezer.h>
30 #include <linux/futex.h>
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/pci.h>
36 #include <linux/proc_fs.h>
37 #include <linux/sched.h>
38 #include <linux/syscalls.h>
39 #include <linux/uaccess.h>
40 #include <linux/interrupt.h>
41 #include <linux/mutex.h>
42 #include <linux/cdev.h>
43 #include <linux/file.h>
44 #include "uapi/vsoc_shm.h"
45 
46 #define VSOC_DEV_NAME "vsoc"
47 
48 /*
49  * Description of the ivshmem-doorbell PCI device used by QEmu. These
50  * constants follow docs/specs/ivshmem-spec.txt, which can be found in
51  * the QEmu repository. This was last reconciled with the version that
52  * came out with 2.8
53  */
54 
55 /*
56  * These constants are determined KVM Inter-VM shared memory device
57  * register offsets
58  */
59 enum {
60 	INTR_MASK = 0x00,	/* Interrupt Mask */
61 	INTR_STATUS = 0x04,	/* Interrupt Status */
62 	IV_POSITION = 0x08,	/* VM ID */
63 	DOORBELL = 0x0c,	/* Doorbell */
64 };
65 
66 static const int REGISTER_BAR;  /* Equal to 0 */
67 static const int MAX_REGISTER_BAR_LEN = 0x100;
68 /*
69  * The MSI-x BAR is not used directly.
70  *
71  * static const int MSI_X_BAR = 1;
72  */
73 static const int SHARED_MEMORY_BAR = 2;
74 
75 struct vsoc_region_data {
76 	char name[VSOC_DEVICE_NAME_SZ + 1];
77 	wait_queue_head_t interrupt_wait_queue;
78 	/* TODO(b/73664181): Use multiple futex wait queues */
79 	wait_queue_head_t futex_wait_queue;
80 	/* Flag indicating that an interrupt has been signalled by the host. */
81 	atomic_t *incoming_signalled;
82 	/* Flag indicating the guest has signalled the host. */
83 	atomic_t *outgoing_signalled;
84 	bool irq_requested;
85 	bool device_created;
86 };
87 
88 struct vsoc_device {
89 	/* Kernel virtual address of REGISTER_BAR. */
90 	void __iomem *regs;
91 	/* Physical address of SHARED_MEMORY_BAR. */
92 	phys_addr_t shm_phys_start;
93 	/* Kernel virtual address of SHARED_MEMORY_BAR. */
94 	void __iomem *kernel_mapped_shm;
95 	/* Size of the entire shared memory window in bytes. */
96 	size_t shm_size;
97 	/*
98 	 * Pointer to the virtual address of the shared memory layout structure.
99 	 * This is probably identical to kernel_mapped_shm, but saving this
100 	 * here saves a lot of annoying casts.
101 	 */
102 	struct vsoc_shm_layout_descriptor *layout;
103 	/*
104 	 * Points to a table of region descriptors in the kernel's virtual
105 	 * address space. Calculated from
106 	 * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
107 	 */
108 	struct vsoc_device_region *regions;
109 	/* Head of a list of permissions that have been granted. */
110 	struct list_head permissions;
111 	struct pci_dev *dev;
112 	/* Per-region (and therefore per-interrupt) information. */
113 	struct vsoc_region_data *regions_data;
114 	/*
115 	 * Table of msi-x entries. This has to be separated from struct
116 	 * vsoc_region_data because the kernel deals with them as an array.
117 	 */
118 	struct msix_entry *msix_entries;
119 	/* Mutex that protectes the permission list */
120 	struct mutex mtx;
121 	/* Major number assigned by the kernel */
122 	int major;
123 	/* Character device assigned by the kernel */
124 	struct cdev cdev;
125 	/* Device class assigned by the kernel */
126 	struct class *class;
127 	/*
128 	 * Flags that indicate what we've initialized. These are used to do an
129 	 * orderly cleanup of the device.
130 	 */
131 	bool enabled_device;
132 	bool requested_regions;
133 	bool cdev_added;
134 	bool class_added;
135 	bool msix_enabled;
136 };
137 
138 static struct vsoc_device vsoc_dev;
139 
140 /*
141  * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
142  */
143 
144 struct fd_scoped_permission_node {
145 	struct fd_scoped_permission permission;
146 	struct list_head list;
147 };
148 
149 struct vsoc_private_data {
150 	struct fd_scoped_permission_node *fd_scoped_permission_node;
151 };
152 
153 static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
154 static int vsoc_mmap(struct file *, struct vm_area_struct *);
155 static int vsoc_open(struct inode *, struct file *);
156 static int vsoc_release(struct inode *, struct file *);
157 static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
158 static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
159 static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
160 static int do_create_fd_scoped_permission(
161 	struct vsoc_device_region *region_p,
162 	struct fd_scoped_permission_node *np,
163 	struct fd_scoped_permission_arg __user *arg);
164 static void do_destroy_fd_scoped_permission(
165 	struct vsoc_device_region *owner_region_p,
166 	struct fd_scoped_permission *perm);
167 static long do_vsoc_describe_region(struct file *,
168 				    struct vsoc_device_region __user *);
169 static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
170 
171 /**
172  * Validate arguments on entry points to the driver.
173  */
vsoc_validate_inode(struct inode * inode)174 inline int vsoc_validate_inode(struct inode *inode)
175 {
176 	if (iminor(inode) >= vsoc_dev.layout->region_count) {
177 		dev_err(&vsoc_dev.dev->dev,
178 			"describe_region: invalid region %d\n", iminor(inode));
179 		return -ENODEV;
180 	}
181 	return 0;
182 }
183 
vsoc_validate_filep(struct file * filp)184 inline int vsoc_validate_filep(struct file *filp)
185 {
186 	int ret = vsoc_validate_inode(file_inode(filp));
187 
188 	if (ret)
189 		return ret;
190 	if (!filp->private_data) {
191 		dev_err(&vsoc_dev.dev->dev,
192 			"No private data on fd, region %d\n",
193 			iminor(file_inode(filp)));
194 		return -EBADFD;
195 	}
196 	return 0;
197 }
198 
199 /* Converts from shared memory offset to virtual address */
shm_off_to_virtual_addr(__u32 offset)200 static inline void *shm_off_to_virtual_addr(__u32 offset)
201 {
202 	return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
203 }
204 
205 /* Converts from shared memory offset to physical address */
shm_off_to_phys_addr(__u32 offset)206 static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
207 {
208 	return vsoc_dev.shm_phys_start + offset;
209 }
210 
211 /**
212  * Convenience functions to obtain the region from the inode or file.
213  * Dangerous to call before validating the inode/file.
214  */
vsoc_region_from_inode(struct inode * inode)215 static inline struct vsoc_device_region *vsoc_region_from_inode(
216 	struct inode *inode)
217 {
218 	return &vsoc_dev.regions[iminor(inode)];
219 }
220 
vsoc_region_from_filep(struct file * inode)221 static inline struct vsoc_device_region *vsoc_region_from_filep(
222 	struct file *inode)
223 {
224 	return vsoc_region_from_inode(file_inode(inode));
225 }
226 
vsoc_device_region_size(struct vsoc_device_region * r)227 static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
228 {
229 	return r->region_end_offset - r->region_begin_offset;
230 }
231 
232 static const struct file_operations vsoc_ops = {
233 	.owner = THIS_MODULE,
234 	.open = vsoc_open,
235 	.mmap = vsoc_mmap,
236 	.read = vsoc_read,
237 	.unlocked_ioctl = vsoc_ioctl,
238 	.compat_ioctl = vsoc_ioctl,
239 	.write = vsoc_write,
240 	.llseek = vsoc_lseek,
241 	.release = vsoc_release,
242 };
243 
244 static struct pci_device_id vsoc_id_table[] = {
245 	{0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
246 	{0},
247 };
248 
249 MODULE_DEVICE_TABLE(pci, vsoc_id_table);
250 
251 static void vsoc_remove_device(struct pci_dev *pdev);
252 static int vsoc_probe_device(struct pci_dev *pdev,
253 			     const struct pci_device_id *ent);
254 
255 static struct pci_driver vsoc_pci_driver = {
256 	.name = "vsoc",
257 	.id_table = vsoc_id_table,
258 	.probe = vsoc_probe_device,
259 	.remove = vsoc_remove_device,
260 };
261 
do_create_fd_scoped_permission(struct vsoc_device_region * region_p,struct fd_scoped_permission_node * np,struct fd_scoped_permission_arg __user * arg)262 static int do_create_fd_scoped_permission(
263 	struct vsoc_device_region *region_p,
264 	struct fd_scoped_permission_node *np,
265 	struct fd_scoped_permission_arg __user *arg)
266 {
267 	struct file *managed_filp;
268 	s32 managed_fd;
269 	atomic_t *owner_ptr = NULL;
270 	struct vsoc_device_region *managed_region_p;
271 
272 	if (copy_from_user(&np->permission,
273 			   &arg->perm, sizeof(np->permission)) ||
274 	    copy_from_user(&managed_fd,
275 			   &arg->managed_region_fd, sizeof(managed_fd))) {
276 		return -EFAULT;
277 	}
278 	managed_filp = fdget(managed_fd).file;
279 	/* Check that it's a valid fd, */
280 	if (!managed_filp || vsoc_validate_filep(managed_filp))
281 		return -EPERM;
282 	/* EEXIST if the given fd already has a permission. */
283 	if (((struct vsoc_private_data *)managed_filp->private_data)->
284 	    fd_scoped_permission_node)
285 		return -EEXIST;
286 	managed_region_p = vsoc_region_from_filep(managed_filp);
287 	/* Check that the provided region is managed by this one */
288 	if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
289 		return -EPERM;
290 	/* The area must be well formed and have non-zero size */
291 	if (np->permission.begin_offset >= np->permission.end_offset)
292 		return -EINVAL;
293 	/* The area must fit in the memory window */
294 	if (np->permission.end_offset >
295 	    vsoc_device_region_size(managed_region_p))
296 		return -ERANGE;
297 	/* The area must be in the region data section */
298 	if (np->permission.begin_offset <
299 	    managed_region_p->offset_of_region_data)
300 		return -ERANGE;
301 	/* The area must be page aligned */
302 	if (!PAGE_ALIGNED(np->permission.begin_offset) ||
303 	    !PAGE_ALIGNED(np->permission.end_offset))
304 		return -EINVAL;
305 	/* Owner offset must be naturally aligned in the window */
306 	if (np->permission.owner_offset &
307 	    (sizeof(np->permission.owner_offset) - 1))
308 		return -EINVAL;
309 	/* The owner flag must reside in the owner memory */
310 	if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
311 	    vsoc_device_region_size(region_p))
312 		return -ERANGE;
313 	/* The owner flag must reside in the data section */
314 	if (np->permission.owner_offset < region_p->offset_of_region_data)
315 		return -EINVAL;
316 	/* The owner value must change to claim the memory */
317 	if (np->permission.owned_value == VSOC_REGION_FREE)
318 		return -EINVAL;
319 	owner_ptr =
320 	    (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
321 						np->permission.owner_offset);
322 	/* We've already verified that this is in the shared memory window, so
323 	 * it should be safe to write to this address.
324 	 */
325 	if (atomic_cmpxchg(owner_ptr,
326 			   VSOC_REGION_FREE,
327 			   np->permission.owned_value) != VSOC_REGION_FREE) {
328 		return -EBUSY;
329 	}
330 	((struct vsoc_private_data *)managed_filp->private_data)->
331 	    fd_scoped_permission_node = np;
332 	/* The file offset needs to be adjusted if the calling
333 	 * process did any read/write operations on the fd
334 	 * before creating the permission.
335 	 */
336 	if (managed_filp->f_pos) {
337 		if (managed_filp->f_pos > np->permission.end_offset) {
338 			/* If the offset is beyond the permission end, set it
339 			 * to the end.
340 			 */
341 			managed_filp->f_pos = np->permission.end_offset;
342 		} else {
343 			/* If the offset is within the permission interval
344 			 * keep it there otherwise reset it to zero.
345 			 */
346 			if (managed_filp->f_pos < np->permission.begin_offset) {
347 				managed_filp->f_pos = 0;
348 			} else {
349 				managed_filp->f_pos -=
350 				    np->permission.begin_offset;
351 			}
352 		}
353 	}
354 	return 0;
355 }
356 
do_destroy_fd_scoped_permission_node(struct vsoc_device_region * owner_region_p,struct fd_scoped_permission_node * node)357 static void do_destroy_fd_scoped_permission_node(
358 	struct vsoc_device_region *owner_region_p,
359 	struct fd_scoped_permission_node *node)
360 {
361 	if (node) {
362 		do_destroy_fd_scoped_permission(owner_region_p,
363 						&node->permission);
364 		mutex_lock(&vsoc_dev.mtx);
365 		list_del(&node->list);
366 		mutex_unlock(&vsoc_dev.mtx);
367 		kfree(node);
368 	}
369 }
370 
do_destroy_fd_scoped_permission(struct vsoc_device_region * owner_region_p,struct fd_scoped_permission * perm)371 static void do_destroy_fd_scoped_permission(
372 		struct vsoc_device_region *owner_region_p,
373 		struct fd_scoped_permission *perm)
374 {
375 	atomic_t *owner_ptr = NULL;
376 	int prev = 0;
377 
378 	if (!perm)
379 		return;
380 	owner_ptr = (atomic_t *)shm_off_to_virtual_addr(
381 		owner_region_p->region_begin_offset + perm->owner_offset);
382 	prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
383 	if (prev != perm->owned_value)
384 		dev_err(&vsoc_dev.dev->dev,
385 			"%x-%x: owner (%s) %x: expected to be %x was %x",
386 			perm->begin_offset, perm->end_offset,
387 			owner_region_p->device_name, perm->owner_offset,
388 			perm->owned_value, prev);
389 }
390 
do_vsoc_describe_region(struct file * filp,struct vsoc_device_region __user * dest)391 static long do_vsoc_describe_region(struct file *filp,
392 				    struct vsoc_device_region __user *dest)
393 {
394 	struct vsoc_device_region *region_p;
395 	int retval = vsoc_validate_filep(filp);
396 
397 	if (retval)
398 		return retval;
399 	region_p = vsoc_region_from_filep(filp);
400 	if (copy_to_user(dest, region_p, sizeof(*region_p)))
401 		return -EFAULT;
402 	return 0;
403 }
404 
405 /**
406  * Implements the inner logic of cond_wait. Copies to and from userspace are
407  * done in the helper function below.
408  */
handle_vsoc_cond_wait(struct file * filp,struct vsoc_cond_wait * arg)409 static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
410 {
411 	DEFINE_WAIT(wait);
412 	u32 region_number = iminor(file_inode(filp));
413 	struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
414 	struct hrtimer_sleeper timeout, *to = NULL;
415 	int ret = 0;
416 	struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
417 	atomic_t *address = NULL;
418 	struct timespec ts;
419 
420 	/* Ensure that the offset is aligned */
421 	if (arg->offset & (sizeof(uint32_t) - 1))
422 		return -EADDRNOTAVAIL;
423 	/* Ensure that the offset is within shared memory */
424 	if (((uint64_t)arg->offset) + region_p->region_begin_offset +
425 	    sizeof(uint32_t) > region_p->region_end_offset)
426 		return -E2BIG;
427 	address = shm_off_to_virtual_addr(region_p->region_begin_offset +
428 					  arg->offset);
429 
430 	/* Ensure that the type of wait is valid */
431 	switch (arg->wait_type) {
432 	case VSOC_WAIT_IF_EQUAL:
433 		break;
434 	case VSOC_WAIT_IF_EQUAL_TIMEOUT:
435 		to = &timeout;
436 		break;
437 	default:
438 		return -EINVAL;
439 	}
440 
441 	if (to) {
442 		/* Copy the user-supplied timesec into the kernel structure.
443 		 * We do things this way to flatten differences between 32 bit
444 		 * and 64 bit timespecs.
445 		 */
446 		ts.tv_sec = arg->wake_time_sec;
447 		ts.tv_nsec = arg->wake_time_nsec;
448 
449 		if (!timespec_valid(&ts))
450 			return -EINVAL;
451 		hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
452 				      HRTIMER_MODE_ABS);
453 		hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts),
454 					     current->timer_slack_ns);
455 
456 		hrtimer_init_sleeper(to, current);
457 	}
458 
459 	while (1) {
460 		prepare_to_wait(&data->futex_wait_queue, &wait,
461 				TASK_INTERRUPTIBLE);
462 		/*
463 		 * Check the sentinel value after prepare_to_wait. If the value
464 		 * changes after this check the writer will call signal,
465 		 * changing the task state from INTERRUPTIBLE to RUNNING. That
466 		 * will ensure that schedule() will eventually schedule this
467 		 * task.
468 		 */
469 		if (atomic_read(address) != arg->value) {
470 			ret = 0;
471 			break;
472 		}
473 		if (to) {
474 			hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
475 			if (likely(to->task))
476 				freezable_schedule();
477 			hrtimer_cancel(&to->timer);
478 			if (!to->task) {
479 				ret = -ETIMEDOUT;
480 				break;
481 			}
482 		} else {
483 			freezable_schedule();
484 		}
485 		/* Count the number of times that we woke up. This is useful
486 		 * for unit testing.
487 		 */
488 		++arg->wakes;
489 		if (signal_pending(current)) {
490 			ret = -EINTR;
491 			break;
492 		}
493 	}
494 	finish_wait(&data->futex_wait_queue, &wait);
495 	if (to)
496 		destroy_hrtimer_on_stack(&to->timer);
497 	return ret;
498 }
499 
500 /**
501  * Handles the details of copying from/to userspace to ensure that the copies
502  * happen on all of the return paths of cond_wait.
503  */
do_vsoc_cond_wait(struct file * filp,struct vsoc_cond_wait __user * untrusted_in)504 static int do_vsoc_cond_wait(struct file *filp,
505 			     struct vsoc_cond_wait __user *untrusted_in)
506 {
507 	struct vsoc_cond_wait arg;
508 	int rval = 0;
509 
510 	if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
511 		return -EFAULT;
512 	/* wakes is an out parameter. Initialize it to something sensible. */
513 	arg.wakes = 0;
514 	rval = handle_vsoc_cond_wait(filp, &arg);
515 	if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
516 		return -EFAULT;
517 	return rval;
518 }
519 
do_vsoc_cond_wake(struct file * filp,uint32_t offset)520 static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
521 {
522 	struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
523 	u32 region_number = iminor(file_inode(filp));
524 	struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
525 	/* Ensure that the offset is aligned */
526 	if (offset & (sizeof(uint32_t) - 1))
527 		return -EADDRNOTAVAIL;
528 	/* Ensure that the offset is within shared memory */
529 	if (((uint64_t)offset) + region_p->region_begin_offset +
530 	    sizeof(uint32_t) > region_p->region_end_offset)
531 		return -E2BIG;
532 	/*
533 	 * TODO(b/73664181): Use multiple futex wait queues.
534 	 * We need to wake every sleeper when the condition changes. Typically
535 	 * only a single thread will be waiting on the condition, but there
536 	 * are exceptions. The worst case is about 10 threads.
537 	 */
538 	wake_up_interruptible_all(&data->futex_wait_queue);
539 	return 0;
540 }
541 
vsoc_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)542 static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
543 {
544 	int rv = 0;
545 	struct vsoc_device_region *region_p;
546 	u32 reg_num;
547 	struct vsoc_region_data *reg_data;
548 	int retval = vsoc_validate_filep(filp);
549 
550 	if (retval)
551 		return retval;
552 	region_p = vsoc_region_from_filep(filp);
553 	reg_num = iminor(file_inode(filp));
554 	reg_data = vsoc_dev.regions_data + reg_num;
555 	switch (cmd) {
556 	case VSOC_CREATE_FD_SCOPED_PERMISSION:
557 		{
558 			struct fd_scoped_permission_node *node = NULL;
559 
560 			node = kzalloc(sizeof(*node), GFP_KERNEL);
561 			/* We can't allocate memory for the permission */
562 			if (!node)
563 				return -ENOMEM;
564 			INIT_LIST_HEAD(&node->list);
565 			rv = do_create_fd_scoped_permission(
566 				region_p,
567 				node,
568 				(struct fd_scoped_permission_arg __user *)arg);
569 			if (!rv) {
570 				mutex_lock(&vsoc_dev.mtx);
571 				list_add(&node->list, &vsoc_dev.permissions);
572 				mutex_unlock(&vsoc_dev.mtx);
573 			} else {
574 				kfree(node);
575 				return rv;
576 			}
577 		}
578 		break;
579 
580 	case VSOC_GET_FD_SCOPED_PERMISSION:
581 		{
582 			struct fd_scoped_permission_node *node =
583 			    ((struct vsoc_private_data *)filp->private_data)->
584 			    fd_scoped_permission_node;
585 			if (!node)
586 				return -ENOENT;
587 			if (copy_to_user
588 			    ((struct fd_scoped_permission __user *)arg,
589 			     &node->permission, sizeof(node->permission)))
590 				return -EFAULT;
591 		}
592 		break;
593 
594 	case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
595 		if (!atomic_xchg(
596 			    reg_data->outgoing_signalled,
597 			    1)) {
598 			writel(reg_num, vsoc_dev.regs + DOORBELL);
599 			return 0;
600 		} else {
601 			return -EBUSY;
602 		}
603 		break;
604 
605 	case VSOC_SEND_INTERRUPT_TO_HOST:
606 		writel(reg_num, vsoc_dev.regs + DOORBELL);
607 		return 0;
608 
609 	case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
610 		wait_event_interruptible(
611 			reg_data->interrupt_wait_queue,
612 			(atomic_read(reg_data->incoming_signalled) != 0));
613 		break;
614 
615 	case VSOC_DESCRIBE_REGION:
616 		return do_vsoc_describe_region(
617 			filp,
618 			(struct vsoc_device_region __user *)arg);
619 
620 	case VSOC_SELF_INTERRUPT:
621 		atomic_set(reg_data->incoming_signalled, 1);
622 		wake_up_interruptible(&reg_data->interrupt_wait_queue);
623 		break;
624 
625 	case VSOC_COND_WAIT:
626 		return do_vsoc_cond_wait(filp,
627 					 (struct vsoc_cond_wait __user *)arg);
628 	case VSOC_COND_WAKE:
629 		return do_vsoc_cond_wake(filp, arg);
630 
631 	default:
632 		return -EINVAL;
633 	}
634 	return 0;
635 }
636 
vsoc_read(struct file * filp,char __user * buffer,size_t len,loff_t * poffset)637 static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
638 			 loff_t *poffset)
639 {
640 	__u32 area_off;
641 	const void *area_p;
642 	ssize_t area_len;
643 	int retval = vsoc_validate_filep(filp);
644 
645 	if (retval)
646 		return retval;
647 	area_len = vsoc_get_area(filp, &area_off);
648 	area_p = shm_off_to_virtual_addr(area_off);
649 	area_p += *poffset;
650 	area_len -= *poffset;
651 	if (area_len <= 0)
652 		return 0;
653 	if (area_len < len)
654 		len = area_len;
655 	if (copy_to_user(buffer, area_p, len))
656 		return -EFAULT;
657 	*poffset += len;
658 	return len;
659 }
660 
vsoc_lseek(struct file * filp,loff_t offset,int origin)661 static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
662 {
663 	ssize_t area_len = 0;
664 	int retval = vsoc_validate_filep(filp);
665 
666 	if (retval)
667 		return retval;
668 	area_len = vsoc_get_area(filp, NULL);
669 	switch (origin) {
670 	case SEEK_SET:
671 		break;
672 
673 	case SEEK_CUR:
674 		if (offset > 0 && offset + filp->f_pos < 0)
675 			return -EOVERFLOW;
676 		offset += filp->f_pos;
677 		break;
678 
679 	case SEEK_END:
680 		if (offset > 0 && offset + area_len < 0)
681 			return -EOVERFLOW;
682 		offset += area_len;
683 		break;
684 
685 	case SEEK_DATA:
686 		if (offset >= area_len)
687 			return -EINVAL;
688 		if (offset < 0)
689 			offset = 0;
690 		break;
691 
692 	case SEEK_HOLE:
693 		/* Next hole is always the end of the region, unless offset is
694 		 * beyond that
695 		 */
696 		if (offset < area_len)
697 			offset = area_len;
698 		break;
699 
700 	default:
701 		return -EINVAL;
702 	}
703 
704 	if (offset < 0 || offset > area_len)
705 		return -EINVAL;
706 	filp->f_pos = offset;
707 
708 	return offset;
709 }
710 
vsoc_write(struct file * filp,const char __user * buffer,size_t len,loff_t * poffset)711 static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
712 			  size_t len, loff_t *poffset)
713 {
714 	__u32 area_off;
715 	void *area_p;
716 	ssize_t area_len;
717 	int retval = vsoc_validate_filep(filp);
718 
719 	if (retval)
720 		return retval;
721 	area_len = vsoc_get_area(filp, &area_off);
722 	area_p = shm_off_to_virtual_addr(area_off);
723 	area_p += *poffset;
724 	area_len -= *poffset;
725 	if (area_len <= 0)
726 		return 0;
727 	if (area_len < len)
728 		len = area_len;
729 	if (copy_from_user(area_p, buffer, len))
730 		return -EFAULT;
731 	*poffset += len;
732 	return len;
733 }
734 
vsoc_interrupt(int irq,void * region_data_v)735 static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
736 {
737 	struct vsoc_region_data *region_data =
738 	    (struct vsoc_region_data *)region_data_v;
739 	int reg_num = region_data - vsoc_dev.regions_data;
740 
741 	if (unlikely(!region_data))
742 		return IRQ_NONE;
743 
744 	if (unlikely(reg_num < 0 ||
745 		     reg_num >= vsoc_dev.layout->region_count)) {
746 		dev_err(&vsoc_dev.dev->dev,
747 			"invalid irq @%p reg_num=0x%04x\n",
748 			region_data, reg_num);
749 		return IRQ_NONE;
750 	}
751 	if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
752 		dev_err(&vsoc_dev.dev->dev,
753 			"irq not aligned @%p reg_num=0x%04x\n",
754 			region_data, reg_num);
755 		return IRQ_NONE;
756 	}
757 	wake_up_interruptible(&region_data->interrupt_wait_queue);
758 	return IRQ_HANDLED;
759 }
760 
vsoc_probe_device(struct pci_dev * pdev,const struct pci_device_id * ent)761 static int vsoc_probe_device(struct pci_dev *pdev,
762 			     const struct pci_device_id *ent)
763 {
764 	int result;
765 	int i;
766 	resource_size_t reg_size;
767 	dev_t devt;
768 
769 	vsoc_dev.dev = pdev;
770 	result = pci_enable_device(pdev);
771 	if (result) {
772 		dev_err(&pdev->dev,
773 			"pci_enable_device failed %s: error %d\n",
774 			pci_name(pdev), result);
775 		return result;
776 	}
777 	vsoc_dev.enabled_device = true;
778 	result = pci_request_regions(pdev, "vsoc");
779 	if (result < 0) {
780 		dev_err(&pdev->dev, "pci_request_regions failed\n");
781 		vsoc_remove_device(pdev);
782 		return -EBUSY;
783 	}
784 	vsoc_dev.requested_regions = true;
785 	/* Set up the control registers in BAR 0 */
786 	reg_size = pci_resource_len(pdev, REGISTER_BAR);
787 	if (reg_size > MAX_REGISTER_BAR_LEN)
788 		vsoc_dev.regs =
789 		    pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
790 	else
791 		vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
792 
793 	if (!vsoc_dev.regs) {
794 		dev_err(&pdev->dev,
795 			"cannot map registers of size %zu\n",
796 		       (size_t)reg_size);
797 		vsoc_remove_device(pdev);
798 		return -EBUSY;
799 	}
800 
801 	/* Map the shared memory in BAR 2 */
802 	vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
803 	vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
804 
805 	dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
806 		 &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
807 	vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
808 	if (!vsoc_dev.kernel_mapped_shm) {
809 		dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
810 		vsoc_remove_device(pdev);
811 		return -EBUSY;
812 	}
813 
814 	vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
815 				vsoc_dev.kernel_mapped_shm;
816 	dev_info(&pdev->dev, "major_version: %d\n",
817 		 vsoc_dev.layout->major_version);
818 	dev_info(&pdev->dev, "minor_version: %d\n",
819 		 vsoc_dev.layout->minor_version);
820 	dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
821 	dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
822 	if (vsoc_dev.layout->major_version !=
823 	    CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
824 		dev_err(&vsoc_dev.dev->dev,
825 			"driver supports only major_version %d\n",
826 			CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
827 		vsoc_remove_device(pdev);
828 		return -EBUSY;
829 	}
830 	result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
831 				     VSOC_DEV_NAME);
832 	if (result) {
833 		dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
834 		vsoc_remove_device(pdev);
835 		return -EBUSY;
836 	}
837 	vsoc_dev.major = MAJOR(devt);
838 	cdev_init(&vsoc_dev.cdev, &vsoc_ops);
839 	vsoc_dev.cdev.owner = THIS_MODULE;
840 	result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
841 	if (result) {
842 		dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
843 		vsoc_remove_device(pdev);
844 		return -EBUSY;
845 	}
846 	vsoc_dev.cdev_added = true;
847 	vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
848 	if (IS_ERR(vsoc_dev.class)) {
849 		dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
850 		vsoc_remove_device(pdev);
851 		return PTR_ERR(vsoc_dev.class);
852 	}
853 	vsoc_dev.class_added = true;
854 	vsoc_dev.regions = (struct vsoc_device_region __force *)
855 		((void *)vsoc_dev.layout +
856 		 vsoc_dev.layout->vsoc_region_desc_offset);
857 	vsoc_dev.msix_entries = kcalloc(
858 			vsoc_dev.layout->region_count,
859 			sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
860 	if (!vsoc_dev.msix_entries) {
861 		dev_err(&vsoc_dev.dev->dev,
862 			"unable to allocate msix_entries\n");
863 		vsoc_remove_device(pdev);
864 		return -ENOSPC;
865 	}
866 	vsoc_dev.regions_data = kcalloc(
867 			vsoc_dev.layout->region_count,
868 			sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
869 	if (!vsoc_dev.regions_data) {
870 		dev_err(&vsoc_dev.dev->dev,
871 			"unable to allocate regions' data\n");
872 		vsoc_remove_device(pdev);
873 		return -ENOSPC;
874 	}
875 	for (i = 0; i < vsoc_dev.layout->region_count; ++i)
876 		vsoc_dev.msix_entries[i].entry = i;
877 
878 	result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
879 				       vsoc_dev.layout->region_count);
880 	if (result) {
881 		dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
882 		vsoc_remove_device(pdev);
883 		return -ENOSPC;
884 	}
885 	/* Check that all regions are well formed */
886 	for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
887 		const struct vsoc_device_region *region = vsoc_dev.regions + i;
888 
889 		if (!PAGE_ALIGNED(region->region_begin_offset) ||
890 		    !PAGE_ALIGNED(region->region_end_offset)) {
891 			dev_err(&vsoc_dev.dev->dev,
892 				"region %d not aligned (%x:%x)", i,
893 				region->region_begin_offset,
894 				region->region_end_offset);
895 			vsoc_remove_device(pdev);
896 			return -EFAULT;
897 		}
898 		if (region->region_begin_offset >= region->region_end_offset ||
899 		    region->region_end_offset > vsoc_dev.shm_size) {
900 			dev_err(&vsoc_dev.dev->dev,
901 				"region %d offsets are wrong: %x %x %zx",
902 				i, region->region_begin_offset,
903 				region->region_end_offset, vsoc_dev.shm_size);
904 			vsoc_remove_device(pdev);
905 			return -EFAULT;
906 		}
907 		if (region->managed_by >= vsoc_dev.layout->region_count) {
908 			dev_err(&vsoc_dev.dev->dev,
909 				"region %d has invalid owner: %u",
910 				i, region->managed_by);
911 			vsoc_remove_device(pdev);
912 			return -EFAULT;
913 		}
914 	}
915 	vsoc_dev.msix_enabled = true;
916 	for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
917 		const struct vsoc_device_region *region = vsoc_dev.regions + i;
918 		size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
919 		const struct vsoc_signal_table_layout *h_to_g_signal_table =
920 			&region->host_to_guest_signal_table;
921 		const struct vsoc_signal_table_layout *g_to_h_signal_table =
922 			&region->guest_to_host_signal_table;
923 
924 		vsoc_dev.regions_data[i].name[name_sz] = '\0';
925 		memcpy(vsoc_dev.regions_data[i].name, region->device_name,
926 		       name_sz);
927 		dev_info(&pdev->dev, "region %d name=%s\n",
928 			 i, vsoc_dev.regions_data[i].name);
929 		init_waitqueue_head(
930 				&vsoc_dev.regions_data[i].interrupt_wait_queue);
931 		init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
932 		vsoc_dev.regions_data[i].incoming_signalled =
933 			shm_off_to_virtual_addr(region->region_begin_offset) +
934 			h_to_g_signal_table->interrupt_signalled_offset;
935 		vsoc_dev.regions_data[i].outgoing_signalled =
936 			shm_off_to_virtual_addr(region->region_begin_offset) +
937 			g_to_h_signal_table->interrupt_signalled_offset;
938 		result = request_irq(
939 				vsoc_dev.msix_entries[i].vector,
940 				vsoc_interrupt, 0,
941 				vsoc_dev.regions_data[i].name,
942 				vsoc_dev.regions_data + i);
943 		if (result) {
944 			dev_info(&pdev->dev,
945 				 "request_irq failed irq=%d vector=%d\n",
946 				i, vsoc_dev.msix_entries[i].vector);
947 			vsoc_remove_device(pdev);
948 			return -ENOSPC;
949 		}
950 		vsoc_dev.regions_data[i].irq_requested = true;
951 		if (!device_create(vsoc_dev.class, NULL,
952 				   MKDEV(vsoc_dev.major, i),
953 				   NULL, vsoc_dev.regions_data[i].name)) {
954 			dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
955 			vsoc_remove_device(pdev);
956 			return -EBUSY;
957 		}
958 		vsoc_dev.regions_data[i].device_created = true;
959 	}
960 	return 0;
961 }
962 
963 /*
964  * This should undo all of the allocations in the probe function in reverse
965  * order.
966  *
967  * Notes:
968  *
969  *   The device may have been partially initialized, so double check
970  *   that the allocations happened.
971  *
972  *   This function may be called multiple times, so mark resources as freed
973  *   as they are deallocated.
974  */
vsoc_remove_device(struct pci_dev * pdev)975 static void vsoc_remove_device(struct pci_dev *pdev)
976 {
977 	int i;
978 	/*
979 	 * pdev is the first thing to be set on probe and the last thing
980 	 * to be cleared here. If it's NULL then there is no cleanup.
981 	 */
982 	if (!pdev || !vsoc_dev.dev)
983 		return;
984 	dev_info(&pdev->dev, "remove_device\n");
985 	if (vsoc_dev.regions_data) {
986 		for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
987 			if (vsoc_dev.regions_data[i].device_created) {
988 				device_destroy(vsoc_dev.class,
989 					       MKDEV(vsoc_dev.major, i));
990 				vsoc_dev.regions_data[i].device_created = false;
991 			}
992 			if (vsoc_dev.regions_data[i].irq_requested)
993 				free_irq(vsoc_dev.msix_entries[i].vector, NULL);
994 			vsoc_dev.regions_data[i].irq_requested = false;
995 		}
996 		kfree(vsoc_dev.regions_data);
997 		vsoc_dev.regions_data = NULL;
998 	}
999 	if (vsoc_dev.msix_enabled) {
1000 		pci_disable_msix(pdev);
1001 		vsoc_dev.msix_enabled = false;
1002 	}
1003 	kfree(vsoc_dev.msix_entries);
1004 	vsoc_dev.msix_entries = NULL;
1005 	vsoc_dev.regions = NULL;
1006 	if (vsoc_dev.class_added) {
1007 		class_destroy(vsoc_dev.class);
1008 		vsoc_dev.class_added = false;
1009 	}
1010 	if (vsoc_dev.cdev_added) {
1011 		cdev_del(&vsoc_dev.cdev);
1012 		vsoc_dev.cdev_added = false;
1013 	}
1014 	if (vsoc_dev.major && vsoc_dev.layout) {
1015 		unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
1016 					 vsoc_dev.layout->region_count);
1017 		vsoc_dev.major = 0;
1018 	}
1019 	vsoc_dev.layout = NULL;
1020 	if (vsoc_dev.kernel_mapped_shm) {
1021 		pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
1022 		vsoc_dev.kernel_mapped_shm = NULL;
1023 	}
1024 	if (vsoc_dev.regs) {
1025 		pci_iounmap(pdev, vsoc_dev.regs);
1026 		vsoc_dev.regs = NULL;
1027 	}
1028 	if (vsoc_dev.requested_regions) {
1029 		pci_release_regions(pdev);
1030 		vsoc_dev.requested_regions = false;
1031 	}
1032 	if (vsoc_dev.enabled_device) {
1033 		pci_disable_device(pdev);
1034 		vsoc_dev.enabled_device = false;
1035 	}
1036 	/* Do this last: it indicates that the device is not initialized. */
1037 	vsoc_dev.dev = NULL;
1038 }
1039 
vsoc_cleanup_module(void)1040 static void __exit vsoc_cleanup_module(void)
1041 {
1042 	vsoc_remove_device(vsoc_dev.dev);
1043 	pci_unregister_driver(&vsoc_pci_driver);
1044 }
1045 
vsoc_init_module(void)1046 static int __init vsoc_init_module(void)
1047 {
1048 	int err = -ENOMEM;
1049 
1050 	INIT_LIST_HEAD(&vsoc_dev.permissions);
1051 	mutex_init(&vsoc_dev.mtx);
1052 
1053 	err = pci_register_driver(&vsoc_pci_driver);
1054 	if (err < 0)
1055 		return err;
1056 	return 0;
1057 }
1058 
vsoc_open(struct inode * inode,struct file * filp)1059 static int vsoc_open(struct inode *inode, struct file *filp)
1060 {
1061 	/* Can't use vsoc_validate_filep because filp is still incomplete */
1062 	int ret = vsoc_validate_inode(inode);
1063 
1064 	if (ret)
1065 		return ret;
1066 	filp->private_data =
1067 		kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
1068 	if (!filp->private_data)
1069 		return -ENOMEM;
1070 	return 0;
1071 }
1072 
vsoc_release(struct inode * inode,struct file * filp)1073 static int vsoc_release(struct inode *inode, struct file *filp)
1074 {
1075 	struct vsoc_private_data *private_data = NULL;
1076 	struct fd_scoped_permission_node *node = NULL;
1077 	struct vsoc_device_region *owner_region_p = NULL;
1078 	int retval = vsoc_validate_filep(filp);
1079 
1080 	if (retval)
1081 		return retval;
1082 	private_data = (struct vsoc_private_data *)filp->private_data;
1083 	if (!private_data)
1084 		return 0;
1085 
1086 	node = private_data->fd_scoped_permission_node;
1087 	if (node) {
1088 		owner_region_p = vsoc_region_from_inode(inode);
1089 		if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
1090 			owner_region_p =
1091 			    &vsoc_dev.regions[owner_region_p->managed_by];
1092 		}
1093 		do_destroy_fd_scoped_permission_node(owner_region_p, node);
1094 		private_data->fd_scoped_permission_node = NULL;
1095 	}
1096 	kfree(private_data);
1097 	filp->private_data = NULL;
1098 
1099 	return 0;
1100 }
1101 
1102 /*
1103  * Returns the device relative offset and length of the area specified by the
1104  * fd scoped permission. If there is no fd scoped permission set, a default
1105  * permission covering the entire region is assumed, unless the region is owned
1106  * by another one, in which case the default is a permission with zero size.
1107  */
vsoc_get_area(struct file * filp,__u32 * area_offset)1108 static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
1109 {
1110 	__u32 off = 0;
1111 	ssize_t length = 0;
1112 	struct vsoc_device_region *region_p;
1113 	struct fd_scoped_permission *perm;
1114 
1115 	region_p = vsoc_region_from_filep(filp);
1116 	off = region_p->region_begin_offset;
1117 	perm = &((struct vsoc_private_data *)filp->private_data)->
1118 		fd_scoped_permission_node->permission;
1119 	if (perm) {
1120 		off += perm->begin_offset;
1121 		length = perm->end_offset - perm->begin_offset;
1122 	} else if (region_p->managed_by == VSOC_REGION_WHOLE) {
1123 		/* No permission set and the regions is not owned by another,
1124 		 * default to full region access.
1125 		 */
1126 		length = vsoc_device_region_size(region_p);
1127 	} else {
1128 		/* return zero length, access is denied. */
1129 		length = 0;
1130 	}
1131 	if (area_offset)
1132 		*area_offset = off;
1133 	return length;
1134 }
1135 
vsoc_mmap(struct file * filp,struct vm_area_struct * vma)1136 static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
1137 {
1138 	unsigned long len = vma->vm_end - vma->vm_start;
1139 	__u32 area_off;
1140 	phys_addr_t mem_off;
1141 	ssize_t area_len;
1142 	int retval = vsoc_validate_filep(filp);
1143 
1144 	if (retval)
1145 		return retval;
1146 	area_len = vsoc_get_area(filp, &area_off);
1147 	/* Add the requested offset */
1148 	area_off += (vma->vm_pgoff << PAGE_SHIFT);
1149 	area_len -= (vma->vm_pgoff << PAGE_SHIFT);
1150 	if (area_len < len)
1151 		return -EINVAL;
1152 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1153 	mem_off = shm_off_to_phys_addr(area_off);
1154 	if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
1155 			       len, vma->vm_page_prot))
1156 		return -EAGAIN;
1157 	return 0;
1158 }
1159 
1160 module_init(vsoc_init_module);
1161 module_exit(vsoc_cleanup_module);
1162 
1163 MODULE_LICENSE("GPL");
1164 MODULE_AUTHOR("Greg Hartman <ghartman@google.com>");
1165 MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
1166 MODULE_VERSION("1.0");
1167