• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* drivers/misc/qemupipe/qemu_pipe.c
2  *
3  * Copyright (C) 2011 Google, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 
16 /* This source file contains the implementation of a special device driver
17  * that intends to provide a *very* fast communication channel between the
18  * guest system and the QEMU emulator.
19  *
20  * Usage from the guest is simply the following (error handling simplified):
21  *
22  *    int  fd = open("/dev/qemu_pipe",O_RDWR);
23  *    .... write() or read() through the pipe.
24  *
25  * This driver doesn't deal with the exact protocol used during the session.
26  * It is intended to be as simple as something like:
27  *
28  *    // do this _just_ after opening the fd to connect to a specific
29  *    // emulator service.
30  *    const char*  msg = "<pipename>";
31  *    if (write(fd, msg, strlen(msg)+1) < 0) {
32  *       ... could not connect to <pipename> service
33  *       close(fd);
34  *    }
35  *
36  *    // after this, simply read() and write() to communicate with the
37  *    // service. Exact protocol details left as an exercise to the reader.
38  *
39  * This driver is very fast because it doesn't copy any data through
40  * intermediate buffers, since the emulator is capable of translating
41  * guest user addresses into host ones.
42  *
43  * Note that we must however ensure that each user page involved in the
44  * exchange is properly mapped during a transfer.
45  */
46 
47 #include <linux/module.h>
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h>
50 #include <linux/spinlock.h>
51 #include <linux/miscdevice.h>
52 #include <linux/platform_device.h>
53 #include <linux/poll.h>
54 #include <linux/radix-tree.h>
55 #include <linux/sched.h>
56 #include <linux/bitops.h>
57 #include <linux/io.h>
58 #include <linux/slab.h>
59 #include <linux/mm.h>
60 
61 /* Set to 1 for normal debugging, and 2 for extensive one */
62 #define PIPE_DEBUG  0
63 
64 #define PIPE_E(...) printk(KERN_ERR "QEMU Pipe Device:"  __VA_ARGS__)
65 #define PIPE_W(...) printk(KERN_WARNING "QEMU Pipe Device:"  __VA_ARGS__)
66 
67 #if PIPE_DEBUG >= 1
68 #  define  PIPE_D(...)  printk(KERN_INFO "QEMU Pipe Device:"  __VA_ARGS__)
69 #else
70 #  define  PIPE_D(...)  do {} while (0)
71 #endif
72 
73 #if PIPE_DEBUG >= 2
74 #  define  PIPE_DD(...)  printk(KERN_INFO "QEMU Pipe Device:" __VA_ARGS__)
75 #else
76 #  define  PIPE_DD(...)  do {} while (0)
77 #endif
78 
79 /* IMPORTANT: The following constants must match the ones used and defined
80  * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
81  */
82 
83 /* pipe device registers */
84 #define PIPE_REG_COMMAND            0x00  /* write: value = command */
85 #define PIPE_REG_STATUS             0x04  /* read */
86 #define PIPE_REG_CHANNEL            0x08  /* read/write: channel id */
87 #define PIPE_REG_SIZE               0x0c  /* read/write: buffer size */
88 #define PIPE_REG_ADDRESS            0x10  /* write: physical address */
89 #define PIPE_REG_WAKES              0x14  /* read: wake flags */
90 #define PIPE_REG_PARAMS_ADDR_LOW    0x18  /* read/write: batch data address */
91 #define PIPE_REG_PARAMS_ADDR_HIGH   0x1c  /* read/write: batch data address */
92 #define PIPE_REG_ACCESS_PARAMS      0x20  /* write: batch access */
93 #define PIPE_REG_VERSION            0x24  /* read: device version */
94 
95 /* list of commands for PIPE_REG_COMMAND */
96 #define CMD_OPEN               1  /* open new channel */
97 #define CMD_CLOSE              2  /* close channel (from guest) */
98 #define CMD_POLL               3  /* poll read/write status */
99 
100 /* List of bitflags returned in status of CMD_POLL command */
101 #define PIPE_POLL_IN   (1 << 0)
102 #define PIPE_POLL_OUT  (1 << 1)
103 #define PIPE_POLL_HUP  (1 << 2)
104 
105 /* The following commands are related to write operations */
106 #define CMD_WRITE_BUFFER       4  /* send a user buffer to the emulator */
107 #define CMD_WAKE_ON_WRITE      5  /* tell the emulator to wake us when writing
108 				     is possible */
109 
110 /* The following commands are related to read operations, they must be
111  * listed in the same order than the corresponding write ones, since we
112  * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
113  * in qemu_pipe_read_write() below.
114  */
115 #define CMD_READ_BUFFER        6  /* receive a user buffer from the emulator */
116 #define CMD_WAKE_ON_READ       7  /* tell the emulator to wake us when reading
117 				   * is possible */
118 
119 /* Possible status values used to signal errors - see qemu_pipe_error_convert */
120 #define PIPE_ERROR_INVAL       -1
121 #define PIPE_ERROR_AGAIN       -2
122 #define PIPE_ERROR_NOMEM       -3
123 #define PIPE_ERROR_IO          -4
124 
125 /* Bit-flags used to signal events from the emulator */
126 #define PIPE_WAKE_CLOSED       (1 << 0)  /* emulator closed pipe */
127 #define PIPE_WAKE_READ         (1 << 1)  /* pipe can now be read from */
128 #define PIPE_WAKE_WRITE        (1 << 2)  /* pipe can now be written to */
129 
130 struct access_params{
131 	uint32_t channel;
132 	uint32_t size;
133 	uint32_t address;
134 	uint32_t cmd;
135 	uint32_t result;
136 	/* reserved for future extension */
137 	uint32_t flags;
138 };
139 
140 /* The global driver data. Holds a reference to the i/o page used to
141  * communicate with the emulator, and a wake queue for blocked tasks
142  * waiting to be awoken.
143  */
144 struct qemu_pipe_dev {
145 	spinlock_t lock;
146 	unsigned char __iomem *base;
147 	struct access_params *aps;
148 	int irq;
149 	struct radix_tree_root pipes;
150 	u32 version;
151 };
152 
153 static struct qemu_pipe_dev   pipe_dev[1];
154 
155 /* This data type models a given pipe instance */
156 struct qemu_pipe {
157 	struct qemu_pipe_dev *dev;
158 	struct mutex lock;
159 	unsigned long flags;
160 	wait_queue_head_t wake_queue;
161 };
162 
163 
164 /* Bit flags for the 'flags' field */
165 enum {
166 	BIT_CLOSED_ON_HOST = 0,  /* pipe closed by host */
167 	BIT_WAKE_ON_WRITE  = 1,  /* want to be waked on writes */
168 	BIT_WAKE_ON_READ   = 2,  /* want to be waked on reads */
169 };
170 
171 /* This function converts an error code returned by the emulator through
172  * the PIPE_REG_STATUS i/o register into a valid negative errno value.
173  */
qemu_pipe_error_convert(int status)174 static int qemu_pipe_error_convert(int status)
175 {
176 	switch (status) {
177 	case PIPE_ERROR_AGAIN:
178 		status = -EAGAIN; break;
179 	case PIPE_ERROR_NOMEM:
180 		status = -ENOMEM; break;
181 	case PIPE_ERROR_IO:
182 		status = -EIO; break;
183 	default:
184 		status = -EINVAL;
185 	}
186 	return status;
187 }
188 
189 /*
190  * Notice: QEMU will return 0 for unknown register access, indicating
191  * param_acess is not supported
192  */
valid_batchbuffer_addr(struct qemu_pipe_dev * dev,struct access_params * aps)193 static int valid_batchbuffer_addr(struct qemu_pipe_dev *dev,
194 				  struct access_params *aps)
195 {
196 	uint32_t aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
197 	uint32_t apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
198 	uint64_t paddr = ((uint64_t)aph << 32) | apl;
199 
200 	return paddr == (__pa(aps));
201 }
202 
203 /* 0 on success */
setup_access_params_addr(struct qemu_pipe_dev * dev)204 static int setup_access_params_addr(struct qemu_pipe_dev *dev)
205 {
206 	uint64_t paddr;
207 	struct access_params *aps;
208 
209 	aps = kmalloc(sizeof(struct access_params), GFP_KERNEL);
210 	if (!aps)
211 		return -1;
212 
213 	paddr = __pa(aps);
214 	writel((uint32_t)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
215 	writel((uint32_t)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
216 
217 	if (!valid_batchbuffer_addr(dev, aps))
218 		return -1;
219 
220 	dev->aps = aps;
221 	return 0;
222 }
223 
224 /* A value that will not be set by qemu emulator */
225 #define IMPOSSIBLE_BATCH_RESULT (0xdeadbeaf)
226 
access_with_param(struct qemu_pipe_dev * dev,const int cmd,unsigned long address,unsigned long avail,struct qemu_pipe * pipe,int * status)227 static int access_with_param(struct qemu_pipe_dev *dev, const int cmd,
228 			     unsigned long address, unsigned long avail,
229 			     struct qemu_pipe *pipe, int *status)
230 {
231 	struct access_params *aps = dev->aps;
232 
233 	aps->result = IMPOSSIBLE_BATCH_RESULT;
234 	aps->channel = (unsigned long)pipe;
235 	aps->size = avail;
236 	aps->address = address;
237 	aps->cmd = cmd;
238 	writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
239 
240 	/* If aps->result unchanged, then batch command failed */
241 	if (aps->result == IMPOSSIBLE_BATCH_RESULT)
242 		return -1;
243 
244 	*status = aps->result;
245 	return 0;
246 }
247 
248 /* This function is used for both reading from and writing to a given
249  * pipe.
250  */
qemu_pipe_read_write(struct file * filp,char __user * buffer,size_t bufflen,int is_write)251 static ssize_t qemu_pipe_read_write(struct file *filp, char __user *buffer,
252 				    size_t bufflen, int is_write)
253 {
254 	unsigned long irq_flags;
255 	struct qemu_pipe *pipe = filp->private_data;
256 	struct qemu_pipe_dev *dev = pipe->dev;
257 	const int cmd_offset = is_write ? 0
258 					: (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
259 	unsigned long address, address_end;
260 	int count = 0, ret = -EINVAL;
261 
262 	/* If the emulator already closed the pipe, no need to go further */
263 	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) {
264 		PIPE_W("(write=%d) already closed!\n", is_write);
265 		ret = -EIO;
266 		goto out;
267 	}
268 
269 	/* Null reads or writes succeeds */
270 	if (unlikely(bufflen) == 0)
271 		goto out;
272 
273 	/* Check the buffer range for access */
274 	if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
275 			buffer, bufflen)) {
276 		ret = -EFAULT;
277 		PIPE_W("rw access_ok failed\n");
278 		goto out;
279 	}
280 
281 	/* Serialize access to the pipe */
282 	if (mutex_lock_interruptible(&pipe->lock)) {
283 		PIPE_W("(write=%d) interrupted!\n", is_write);
284 		return -ERESTARTSYS;
285 	}
286 
287 	address = (unsigned long)(void *)buffer;
288 	address_end = address + bufflen;
289 
290 	while (address < address_end) {
291 		unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
292 		unsigned long next     = page_end < address_end ? page_end
293 								: address_end;
294 		unsigned long avail    = next - address;
295 		int status, wakeBit;
296 		struct page *page;
297 
298 		/* Either vaddr or paddr depending on the device version */
299 		unsigned long xaddr;
300 
301 		/*
302 		 * We grab the pages on a page-by-page basis in case user
303 		 * space gives us a potentially huge buffer but the read only
304 		 * returns a small amount, then there's no need to pin that
305 		 * much memory to the process.
306 		 */
307 		down_read(&current->mm->mmap_sem);
308 		ret = get_user_pages(current, current->mm, address, 1,
309 				     !is_write, 0, &page, NULL);
310 		up_read(&current->mm->mmap_sem);
311 		if (ret < 0)
312 			return ret;
313 
314 		if (dev->version) {
315 			/* Device version 1 or newer
316 			 * expects the physical address.
317 			 */
318 			xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
319 		} else {
320 			/* Device version 0 expects the
321 			 * virtual address.
322 			 */
323 			xaddr = address;
324 		}
325 
326 		/* Now, try to transfer the bytes in the current page */
327 		spin_lock_irqsave(&dev->lock, irq_flags);
328 		if (dev->aps == NULL || access_with_param(
329 			dev, CMD_WRITE_BUFFER + cmd_offset, xaddr, avail,
330 			pipe, &status) < 0)
331 		{
332 			writel((unsigned long)pipe,
333 				dev->base + PIPE_REG_CHANNEL);
334 			writel(avail, dev->base + PIPE_REG_SIZE);
335 			writel(xaddr, dev->base + PIPE_REG_ADDRESS);
336 			writel(CMD_WRITE_BUFFER + cmd_offset,
337 				dev->base + PIPE_REG_COMMAND);
338 			status = readl(dev->base + PIPE_REG_STATUS);
339 		}
340 		spin_unlock_irqrestore(&dev->lock, irq_flags);
341 
342 		if (status > 0 && !is_write)
343 			set_page_dirty(page);
344 		put_page(page);
345 
346 		if (status > 0) { /* Correct transfer */
347 			count += status;
348 			address += status;
349 			continue;
350 		} else if (status == 0) { /* EOF */
351 			ret = 0;
352 			break;
353 		} else if (status < 0 && count > 0) {
354 			/*
355 			 * An error occured and we already transfered
356 			 * something on one of the previous pages.
357 			 * Just return what we already copied and log this
358 			 * err.
359 			 *
360 			 * Note: This seems like an incorrect approach but
361 			 * cannot change it until we check if any user space
362 			 * ABI relies on this behavior.
363 			 */
364 			if (status != PIPE_ERROR_AGAIN)
365 				PIPE_W("goldfish_pipe: backend returned error %d on %s\n",
366 					    status, is_write ? "write" : "read");
367 			ret = 0;
368 			break;
369 		}
370 
371 		/* If the error is not PIPE_ERROR_AGAIN, or if we are not in
372 		* non-blocking mode, just return the error code.
373 		*/
374 		if (status != PIPE_ERROR_AGAIN ||
375 			(filp->f_flags & O_NONBLOCK) != 0) {
376 			ret = qemu_pipe_error_convert(status);
377 			break;
378 		}
379 
380 		/* We will have to wait until more data/space is available.
381 		* First, mark the pipe as waiting for a specific wake signal.
382 		*/
383 		wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
384 		set_bit(wakeBit, &pipe->flags);
385 
386 		/* Tell the emulator we're going to wait for a wake event */
387 		spin_lock_irqsave(&dev->lock, irq_flags);
388 		writel((unsigned long)pipe, dev->base + PIPE_REG_CHANNEL);
389 		writel(CMD_WAKE_ON_WRITE + cmd_offset,
390 			dev->base + PIPE_REG_COMMAND);
391 		spin_unlock_irqrestore(&dev->lock, irq_flags);
392 
393 		/* Unlock the pipe, then wait for the wake signal */
394 		mutex_unlock(&pipe->lock);
395 
396 		while (test_bit(wakeBit, &pipe->flags)) {
397 			if (wait_event_interruptible(
398 					pipe->wake_queue,
399 					!test_bit(wakeBit, &pipe->flags))) {
400 				ret = -ERESTARTSYS;
401 				PIPE_W("rw, wait_event error\n");
402 				goto out;
403 			}
404 
405 			if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) {
406 				ret = -EIO;
407 				PIPE_W("rw, pipe already closed\n");
408 				goto out;
409 			}
410 		}
411 
412 		/* Try to re-acquire the lock */
413 		if (mutex_lock_interruptible(&pipe->lock)) {
414 			ret = -ERESTARTSYS;
415 			break;
416 		}
417 	}
418 	mutex_unlock(&pipe->lock);
419 out:
420 	if (ret < 0)
421 		return ret;
422 	else
423 		return count;
424 }
425 
qemu_pipe_read(struct file * filp,char __user * buffer,size_t bufflen,loff_t * ppos)426 static ssize_t qemu_pipe_read(struct file *filp, char __user *buffer,
427 			      size_t bufflen, loff_t *ppos)
428 {
429 	return qemu_pipe_read_write(filp, buffer, bufflen, 0);
430 }
431 
qemu_pipe_write(struct file * filp,const char __user * buffer,size_t bufflen,loff_t * ppos)432 static ssize_t qemu_pipe_write(struct file *filp,
433 				const char __user *buffer, size_t bufflen,
434 				loff_t *ppos)
435 {
436 	return qemu_pipe_read_write(filp, (char __user *)buffer, bufflen, 1);
437 }
438 
439 
qemu_pipe_poll(struct file * filp,poll_table * wait)440 static unsigned int qemu_pipe_poll(struct file *filp, poll_table *wait)
441 {
442 	struct qemu_pipe *pipe = filp->private_data;
443 	struct qemu_pipe_dev *dev = pipe->dev;
444 	unsigned long irq_flags;
445 	unsigned int mask = 0;
446 	int status;
447 
448 	mutex_lock(&pipe->lock);
449 
450 	poll_wait(filp, &pipe->wake_queue, wait);
451 
452 	spin_lock_irqsave(&dev->lock, irq_flags);
453 	writel((unsigned long)pipe, dev->base + PIPE_REG_CHANNEL);
454 	writel(CMD_POLL, dev->base + PIPE_REG_COMMAND);
455 	status = readl(dev->base + PIPE_REG_STATUS);
456 	spin_unlock_irqrestore(&dev->lock, irq_flags);
457 
458 	mutex_unlock(&pipe->lock);
459 
460 	if (status & PIPE_POLL_IN)
461 		mask |= POLLIN | POLLRDNORM;
462 
463 	if (status & PIPE_POLL_OUT)
464 		mask |= POLLOUT | POLLWRNORM;
465 
466 	if (status & PIPE_POLL_HUP)
467 		mask |= POLLHUP;
468 
469 	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
470 		mask |= POLLERR;
471 
472 	return mask;
473 }
474 
qemu_pipe_interrupt(int irq,void * dev_id)475 static irqreturn_t qemu_pipe_interrupt(int irq, void *dev_id)
476 {
477 	struct qemu_pipe_dev *dev = dev_id;
478 	unsigned long irq_flags;
479 	int count = 0;
480 
481 	/* We're going to read from the emulator a list of (channel,flags)
482 	* pairs corresponding to the wake events that occured on each
483 	* blocked pipe (i.e. channel).
484 	*/
485 	spin_lock_irqsave(&dev->lock, irq_flags);
486 	for (;;) {
487 		/* First read the channel, 0 means the end of the list */
488 		struct qemu_pipe *pipe;
489 		unsigned long wakes;
490 		unsigned long channel = readl(dev->base + PIPE_REG_CHANNEL);
491 
492 		if (channel == 0)
493 			break;
494 
495 		/* Convert channel to struct pipe pointer + read wake flags */
496 		wakes = readl(dev->base + PIPE_REG_WAKES);
497 		pipe  = (struct qemu_pipe *)(ptrdiff_t)channel;
498 
499 		/* check if pipe is still valid */
500 		if ((pipe = radix_tree_lookup(&dev->pipes,
501 			(unsigned long)pipe)) == NULL) {
502 			PIPE_W("interrupt for already closed pipe\n");
503 			break;
504 		}
505 		/* Did the emulator just closed a pipe? */
506 		if (wakes & PIPE_WAKE_CLOSED) {
507 			set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
508 			wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
509 		}
510 		if (wakes & PIPE_WAKE_READ)
511 			clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
512 		if (wakes & PIPE_WAKE_WRITE)
513 			clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
514 
515 		wake_up_interruptible(&pipe->wake_queue);
516 		count++;
517 	}
518 	spin_unlock_irqrestore(&dev->lock, irq_flags);
519 
520 	return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
521 }
522 
qemu_pipe_open(struct inode * inode,struct file * file)523 static int qemu_pipe_open(struct inode *inode, struct file *file)
524 {
525 	unsigned long irq_flags;
526 	struct qemu_pipe *pipe;
527 	struct qemu_pipe_dev *dev = pipe_dev;
528 	int32_t status;
529 	int ret;
530 
531 	/* Allocate new pipe kernel object */
532 	pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
533 	if (pipe == NULL) {
534 		PIPE_E("Not enough kernel memory to allocate new pipe\n");
535 		return -ENOMEM;
536 	}
537 
538 	PIPE_D("Opening pipe %p\n", pipe);
539 
540 	pipe->dev = dev;
541 	mutex_init(&pipe->lock);
542 	init_waitqueue_head(&pipe->wake_queue);
543 
544 	/* Now, tell the emulator we're opening a new pipe. We use the
545 	* pipe object's address as the channel identifier for simplicity.
546 	*/
547 	spin_lock_irqsave(&dev->lock, irq_flags);
548 	if ((ret = radix_tree_insert(&dev->pipes, (unsigned long)pipe, pipe))) {
549 		spin_unlock_irqrestore(&dev->lock, irq_flags);
550 		PIPE_E("opening pipe failed due to radix tree insertion failure\n");
551 		kfree(pipe);
552 		return ret;
553 	}
554 	writel((unsigned long)pipe, dev->base + PIPE_REG_CHANNEL);
555 	writel(CMD_OPEN, dev->base + PIPE_REG_COMMAND);
556 	status = readl(dev->base + PIPE_REG_STATUS);
557 	spin_unlock_irqrestore(&dev->lock, irq_flags);
558 
559 	if (status < 0) {
560 		PIPE_E("Could not open pipe channel, error=%d\n", status);
561 		kfree(pipe);
562 		return status;
563 	}
564 
565 	/* All is done, save the pipe into the file's private data field */
566 	file->private_data = pipe;
567 	return 0;
568 }
569 
qemu_pipe_release(struct inode * inode,struct file * filp)570 static int qemu_pipe_release(struct inode *inode, struct file *filp)
571 {
572 	unsigned long irq_flags;
573 	struct qemu_pipe *pipe = filp->private_data;
574 	struct qemu_pipe_dev *dev = pipe->dev;
575 
576 	PIPE_D("Closing pipe %p\n", dev);
577 
578 	/* The guest is closing the channel, so tell the emulator right now */
579 	spin_lock_irqsave(&dev->lock, irq_flags);
580 	writel((unsigned long)pipe, dev->base + PIPE_REG_CHANNEL);
581 	writel(CMD_CLOSE, dev->base + PIPE_REG_COMMAND);
582 	filp->private_data = NULL;
583 	radix_tree_delete(&pipe_dev->pipes, (unsigned long)pipe);
584 	kfree(pipe);
585 	spin_unlock_irqrestore(&dev->lock, irq_flags);
586 
587 	return 0;
588 }
589 
590 static const struct file_operations qemu_pipe_fops = {
591 	.owner = THIS_MODULE,
592 	.read = qemu_pipe_read,
593 	.write = qemu_pipe_write,
594 	.poll = qemu_pipe_poll,
595 	.open = qemu_pipe_open,
596 	.release = qemu_pipe_release,
597 };
598 
599 static struct miscdevice qemu_pipe_device = {
600 	.minor = MISC_DYNAMIC_MINOR,
601 	.name = "qemu_pipe",
602 	.fops = &qemu_pipe_fops,
603 };
604 
qemu_pipe_probe(struct platform_device * pdev)605 static int qemu_pipe_probe(struct platform_device *pdev)
606 {
607 	int err;
608 	struct resource *r;
609 	struct qemu_pipe_dev *dev = pipe_dev;
610 
611 	PIPE_D("Creating device\n");
612 
613 	INIT_RADIX_TREE(&dev->pipes, GFP_ATOMIC);
614 	/* not thread safe, but this should not happen */
615 	if (dev->base != NULL) {
616 		printk(KERN_ERR "QEMU PIPE Device: already mapped at %p\n",
617 			dev->base);
618 		return -ENODEV;
619 	}
620 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
621 	if (r == NULL || r->end - r->start < PAGE_SIZE - 1) {
622 		printk(KERN_ERR "QEMU PIPE Device: can't allocate i/o page\n");
623 		return -EINVAL;
624 	}
625 	dev->base = ioremap(r->start, PAGE_SIZE);
626 	PIPE_D("The mapped IO base is %p\n", dev->base);
627 
628 	r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
629 	if (r == NULL) {
630 		printk(KERN_ERR "QEMU PIPE Device: failure to allocate IRQ\n");
631 		err = -EINVAL;
632 		goto err_alloc_irq;
633 	}
634 	dev->irq = r->start;
635 	PIPE_D("The IRQ is %d\n", dev->irq);
636 	err = request_irq(dev->irq, qemu_pipe_interrupt, IRQF_SHARED,
637 				"goldfish_pipe", dev);
638 	if (err)
639 		goto err_alloc_irq;
640 
641 	spin_lock_init(&dev->lock);
642 
643 	err = misc_register(&qemu_pipe_device);
644 	if (err)
645 		goto err_misc_register;
646 
647 	setup_access_params_addr(dev);
648 
649 	/* Acquire PipeDevice version information */
650 	dev->version = readl(dev->base + PIPE_REG_VERSION);
651 	return 0;
652 
653 err_misc_register:
654 	free_irq(dev->irq, pdev);
655 err_alloc_irq:
656 	iounmap(dev->base);
657 	dev->base = NULL;
658 	return err;
659 }
660 
qemu_pipe_remove(struct platform_device * pdev)661 static int qemu_pipe_remove(struct platform_device *pdev)
662 {
663 	struct qemu_pipe_dev *dev = pipe_dev;
664 
665 	PIPE_D("Removing device\n");
666 	misc_deregister(&qemu_pipe_device);
667 
668 	free_irq(dev->irq, pdev);
669 
670 	iounmap(dev->base);
671 	if (dev->aps)
672 		kfree(dev->aps);
673 	dev->base = NULL;
674 
675 	return 0;
676 }
677 
678 static struct platform_driver qemu_pipe = {
679 	.probe = qemu_pipe_probe,
680 	.remove = qemu_pipe_remove,
681 	.driver = {
682 		.name = "qemu_pipe"
683 	}
684 };
685 
qemu_pipe_dev_init(void)686 static int __init qemu_pipe_dev_init(void)
687 {
688 	return platform_driver_register(&qemu_pipe);
689 }
690 
qemu_pipe_dev_exit(void)691 static void qemu_pipe_dev_exit(void)
692 {
693 	platform_driver_unregister(&qemu_pipe);
694 }
695 
696 
697 module_init(qemu_pipe_dev_init);
698 module_exit(qemu_pipe_dev_exit);
699 
700 MODULE_AUTHOR("David Turner <digit@google.com>");
701 MODULE_LICENSE("GPL");
702