1 /*
2 * Copyright (C) 2011 Google, Inc.
3 * Copyright (C) 2012 Intel, Inc.
4 * Copyright (C) 2013 Intel, Inc.
5 * Copyright (C) 2014 Linaro Limited
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 /* This source file contains the implementation of the legacy version of
19 * a goldfish pipe device driver. See goldfish_pipe.c for the current version.
20 */
21 #include "goldfish_pipe.h"
22
23 /*
24 * IMPORTANT: The following constants must match the ones used and defined
25 * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
26 */
27
28 /* pipe device registers */
29 #define PIPE_REG_COMMAND 0x00 /* write: value = command */
30 #define PIPE_REG_STATUS 0x04 /* read */
31 #define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
32 #ifdef CONFIG_64BIT
33 #define PIPE_REG_CHANNEL_HIGH 0x30 /* read/write: channel id */
34 #endif
35 #define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
36 #define PIPE_REG_ADDRESS 0x10 /* write: physical address */
37 #ifdef CONFIG_64BIT
38 #define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
39 #endif
40 #define PIPE_REG_WAKES 0x14 /* read: wake flags */
41 #define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
42 #define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
43 #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
44 #define PIPE_REG_VERSION 0x24 /* read: device version */
45
46 /* list of commands for PIPE_REG_COMMAND */
47 #define CMD_OPEN 1 /* open new channel */
48 #define CMD_CLOSE 2 /* close channel (from guest) */
49 #define CMD_POLL 3 /* poll read/write status */
50
51 /* List of bitflags returned in status of CMD_POLL command */
52 #define PIPE_POLL_IN (1 << 0)
53 #define PIPE_POLL_OUT (1 << 1)
54 #define PIPE_POLL_HUP (1 << 2)
55
56 /* The following commands are related to write operations */
57 #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
58 #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
59 is possible */
60 #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
61 #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
62 * is possible */
63
64 /* Possible status values used to signal errors - see goldfish_pipe_error_convert */
65 #define PIPE_ERROR_INVAL -1
66 #define PIPE_ERROR_AGAIN -2
67 #define PIPE_ERROR_NOMEM -3
68 #define PIPE_ERROR_IO -4
69
70 /* Bit-flags used to signal events from the emulator */
71 #define PIPE_WAKE_CLOSED (1 << 0) /* emulator closed pipe */
72 #define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */
73 #define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
74
75 #define MAX_PAGES_TO_GRAB 32
76
77 #define DEBUG 0
78
79 #if DEBUG
80 #define DPRINT(...) { printk(KERN_ERR __VA_ARGS__); }
81 #else
82 #define DPRINT(...)
83 #endif
84
85 /* This data type models a given pipe instance */
86 struct goldfish_pipe {
87 struct goldfish_pipe_dev *dev;
88 struct mutex lock;
89 unsigned long flags;
90 wait_queue_head_t wake_queue;
91 };
92
93 struct access_params {
94 unsigned long channel;
95 u32 size;
96 unsigned long address;
97 u32 cmd;
98 u32 result;
99 /* reserved for future extension */
100 u32 flags;
101 };
102
103 /* Bit flags for the 'flags' field */
104 enum {
105 BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
106 BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
107 BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
108 };
109
110
goldfish_cmd_status(struct goldfish_pipe * pipe,u32 cmd)111 static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
112 {
113 unsigned long flags;
114 u32 status;
115 struct goldfish_pipe_dev *dev = pipe->dev;
116
117 spin_lock_irqsave(&dev->lock, flags);
118 writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
119 #ifdef CONFIG_64BIT
120 writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
121 #endif
122 writel(cmd, dev->base + PIPE_REG_COMMAND);
123 status = readl(dev->base + PIPE_REG_STATUS);
124 spin_unlock_irqrestore(&dev->lock, flags);
125 return status;
126 }
127
goldfish_cmd(struct goldfish_pipe * pipe,u32 cmd)128 static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
129 {
130 unsigned long flags;
131 struct goldfish_pipe_dev *dev = pipe->dev;
132
133 spin_lock_irqsave(&dev->lock, flags);
134 writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
135 #ifdef CONFIG_64BIT
136 writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
137 #endif
138 writel(cmd, dev->base + PIPE_REG_COMMAND);
139 spin_unlock_irqrestore(&dev->lock, flags);
140 }
141
142 /* This function converts an error code returned by the emulator through
143 * the PIPE_REG_STATUS i/o register into a valid negative errno value.
144 */
goldfish_pipe_error_convert(int status)145 static int goldfish_pipe_error_convert(int status)
146 {
147 switch (status) {
148 case PIPE_ERROR_AGAIN:
149 return -EAGAIN;
150 case PIPE_ERROR_NOMEM:
151 return -ENOMEM;
152 case PIPE_ERROR_IO:
153 return -EIO;
154 default:
155 return -EINVAL;
156 }
157 }
158
159 /*
160 * Notice: QEMU will return 0 for un-known register access, indicating
161 * param_acess is supported or not
162 */
valid_batchbuffer_addr(struct goldfish_pipe_dev * dev,struct access_params * aps)163 static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
164 struct access_params *aps)
165 {
166 u32 aph, apl;
167 u64 paddr;
168 aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
169 apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
170
171 paddr = ((u64)aph << 32) | apl;
172 if (paddr != (__pa(aps)))
173 return 0;
174 return 1;
175 }
176
177 /* 0 on success */
setup_access_params_addr(struct platform_device * pdev,struct goldfish_pipe_dev * dev)178 static int setup_access_params_addr(struct platform_device *pdev,
179 struct goldfish_pipe_dev *dev)
180 {
181 u64 paddr;
182 struct access_params *aps;
183
184 aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
185 if (!aps)
186 return -1;
187
188 /* FIXME */
189 paddr = __pa(aps);
190 writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
191 writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
192
193 if (valid_batchbuffer_addr(dev, aps)) {
194 dev->aps = aps;
195 return 0;
196 } else {
197 devm_kfree(&pdev->dev, aps);
198 return -1;
199 }
200 }
201
202 /* A value that will not be set by qemu emulator */
203 #define INITIAL_BATCH_RESULT (0xdeadbeaf)
access_with_param(struct goldfish_pipe_dev * dev,const int cmd,unsigned long address,unsigned long avail,struct goldfish_pipe * pipe,int * status)204 static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
205 unsigned long address, unsigned long avail,
206 struct goldfish_pipe *pipe, int *status)
207 {
208 struct access_params *aps = dev->aps;
209
210 if (aps == NULL)
211 return -1;
212
213 aps->result = INITIAL_BATCH_RESULT;
214 aps->channel = (unsigned long)pipe;
215 aps->size = avail;
216 aps->address = address;
217 aps->cmd = cmd;
218 writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
219 /*
220 * If the aps->result has not changed, that means
221 * that the batch command failed
222 */
223 if (aps->result == INITIAL_BATCH_RESULT)
224 return -1;
225 *status = aps->result;
226 return 0;
227 }
228
goldfish_pipe_read_write(struct file * filp,char __user * buffer,size_t bufflen,int is_write)229 static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
230 size_t bufflen, int is_write)
231 {
232 unsigned long irq_flags;
233 struct goldfish_pipe *pipe = filp->private_data;
234 struct goldfish_pipe_dev *dev = pipe->dev;
235 unsigned long address, address_end;
236 struct page *pages[MAX_PAGES_TO_GRAB] = {};
237 int count = 0, ret = -EINVAL;
238
239 /* If the emulator already closed the pipe, no need to go further */
240 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
241 return -EIO;
242
243 /* Null reads or writes succeeds */
244 if (unlikely(bufflen == 0))
245 return 0;
246
247 /* Check the buffer range for access */
248 if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
249 buffer, bufflen))
250 return -EFAULT;
251
252 /* Serialize access to the pipe */
253 if (mutex_lock_interruptible(&pipe->lock))
254 return -ERESTARTSYS;
255
256 address = (unsigned long)(void *)buffer;
257 address_end = address + bufflen;
258
259 while (address < address_end) {
260 unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
261 unsigned long next, avail;
262 int status, wakeBit, page_i, num_contiguous_pages;
263 long first_page, last_page, requested_pages;
264 unsigned long xaddr, xaddr_prev, xaddr_i;
265
266 /*
267 * Attempt to grab multiple physically contiguous pages.
268 */
269 first_page = address & PAGE_MASK;
270 last_page = (address_end - 1) & PAGE_MASK;
271 requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
272 if (requested_pages > MAX_PAGES_TO_GRAB) {
273 requested_pages = MAX_PAGES_TO_GRAB;
274 }
275 ret = get_user_pages_fast(first_page,
276 requested_pages, !is_write, pages);
277
278 DPRINT("%s: requested pages: %d %d %p\n", __FUNCTION__, ret, requested_pages, first_page);
279 if (ret == 0) {
280 DPRINT("%s: error: (requested pages == 0) (wanted %d)\n",
281 __FUNCTION__, requested_pages);
282 mutex_unlock(&pipe->lock);
283 return ret;
284 }
285 if (ret < 0) {
286 DPRINT("%s: (requested pages < 0) %d \n",
287 __FUNCTION__, requested_pages);
288 mutex_unlock(&pipe->lock);
289 return ret;
290 }
291
292 xaddr = page_to_phys(pages[0]) | (address & ~PAGE_MASK);
293 xaddr_prev = xaddr;
294 num_contiguous_pages = ret == 0 ? 0 : 1;
295 for (page_i = 1; page_i < ret; page_i++) {
296 xaddr_i = page_to_phys(pages[page_i]) | (address & ~PAGE_MASK);
297 if (xaddr_i == xaddr_prev + PAGE_SIZE) {
298 page_end += PAGE_SIZE;
299 xaddr_prev = xaddr_i;
300 num_contiguous_pages++;
301 } else {
302 DPRINT("%s: discontinuous page boundary: %d pages instead\n",
303 __FUNCTION__, page_i);
304 break;
305 }
306 }
307 next = page_end < address_end ? page_end : address_end;
308 avail = next - address;
309
310 /* Now, try to transfer the bytes in the current page */
311 spin_lock_irqsave(&dev->lock, irq_flags);
312 if (access_with_param(dev,
313 is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
314 xaddr, avail, pipe, &status)) {
315 writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
316 #ifdef CONFIG_64BIT
317 writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
318 #endif
319 writel(avail, dev->base + PIPE_REG_SIZE);
320 writel(xaddr, dev->base + PIPE_REG_ADDRESS);
321 #ifdef CONFIG_64BIT
322 writel((u32)((u64)xaddr >> 32), dev->base + PIPE_REG_ADDRESS_HIGH);
323 #endif
324 writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
325 dev->base + PIPE_REG_COMMAND);
326 status = readl(dev->base + PIPE_REG_STATUS);
327 }
328 spin_unlock_irqrestore(&dev->lock, irq_flags);
329
330 for (page_i = 0; page_i < ret; page_i++) {
331 if (status > 0 && !is_write &&
332 page_i < num_contiguous_pages) {
333 set_page_dirty(pages[page_i]);
334 }
335 put_page(pages[page_i]);
336 }
337
338 if (status > 0) { /* Correct transfer */
339 count += status;
340 address += status;
341 continue;
342 } else if (status == 0) { /* EOF */
343 ret = 0;
344 break;
345 } else if (status < 0 && count > 0) {
346 /*
347 * An error occured and we already transfered
348 * something on one of the previous pages.
349 * Just return what we already copied and log this
350 * err.
351 *
352 * Note: This seems like an incorrect approach but
353 * cannot change it until we check if any user space
354 * ABI relies on this behavior.
355 */
356 if (status != PIPE_ERROR_AGAIN)
357 pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
358 status, is_write ? "write" : "read");
359 ret = 0;
360 break;
361 }
362
363 /*
364 * If the error is not PIPE_ERROR_AGAIN, or if we are not in
365 * non-blocking mode, just return the error code.
366 */
367 if (status != PIPE_ERROR_AGAIN ||
368 (filp->f_flags & O_NONBLOCK) != 0) {
369 ret = goldfish_pipe_error_convert(status);
370 break;
371 }
372
373 /*
374 * The backend blocked the read/write, wait until the backend
375 * tells us it's ready to process more data.
376 */
377 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
378 set_bit(wakeBit, &pipe->flags);
379
380 /* Tell the emulator we're going to wait for a wake event */
381 goldfish_cmd(pipe,
382 is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
383
384 /* Unlock the pipe, then wait for the wake signal */
385 mutex_unlock(&pipe->lock);
386
387 while (test_bit(wakeBit, &pipe->flags)) {
388 if (wait_event_interruptible(
389 pipe->wake_queue,
390 !test_bit(wakeBit, &pipe->flags))) {
391 return -ERESTARTSYS;
392 }
393
394 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) {
395 return -EIO;
396 }
397 }
398
399 /* Try to re-acquire the lock */
400 if (mutex_lock_interruptible(&pipe->lock))
401 return -ERESTARTSYS;
402 }
403 mutex_unlock(&pipe->lock);
404
405 if (ret < 0)
406 return ret;
407 else
408 return count;
409 }
410
goldfish_pipe_read(struct file * filp,char __user * buffer,size_t bufflen,loff_t * ppos)411 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
412 size_t bufflen, loff_t *ppos)
413 {
414 return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
415 }
416
goldfish_pipe_write(struct file * filp,const char __user * buffer,size_t bufflen,loff_t * ppos)417 static ssize_t goldfish_pipe_write(struct file *filp,
418 const char __user *buffer, size_t bufflen,
419 loff_t *ppos)
420 {
421 return goldfish_pipe_read_write(filp, (char __user *)buffer,
422 bufflen, 1);
423 }
424
425
goldfish_pipe_poll(struct file * filp,poll_table * wait)426 static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
427 {
428 struct goldfish_pipe *pipe = filp->private_data;
429 unsigned int mask = 0;
430 int status;
431
432 mutex_lock(&pipe->lock);
433
434 poll_wait(filp, &pipe->wake_queue, wait);
435
436 status = goldfish_cmd_status(pipe, CMD_POLL);
437
438 mutex_unlock(&pipe->lock);
439
440 if (status & PIPE_POLL_IN)
441 mask |= POLLIN | POLLRDNORM;
442
443 if (status & PIPE_POLL_OUT)
444 mask |= POLLOUT | POLLWRNORM;
445
446 if (status & PIPE_POLL_HUP)
447 mask |= POLLHUP;
448
449 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
450 mask |= POLLERR;
451
452 return mask;
453 }
454
goldfish_pipe_interrupt(int irq,void * dev_id)455 static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
456 {
457 struct goldfish_pipe_dev *dev = dev_id;
458 unsigned long irq_flags;
459 int count = 0;
460
461 /*
462 * We're going to read from the emulator a list of (channel,flags)
463 * pairs corresponding to the wake events that occured on each
464 * blocked pipe (i.e. channel).
465 */
466 spin_lock_irqsave(&dev->lock, irq_flags);
467 for (;;) {
468 /* First read the channel, 0 means the end of the list */
469 struct goldfish_pipe *pipe;
470 unsigned long wakes;
471 unsigned long channel = 0;
472
473 #ifdef CONFIG_64BIT
474 channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
475
476 if (channel == 0)
477 break;
478 #endif
479 channel |= readl(dev->base + PIPE_REG_CHANNEL);
480
481 if (channel == 0)
482 break;
483
484 /* Convert channel to struct pipe pointer + read wake flags */
485 wakes = readl(dev->base + PIPE_REG_WAKES);
486 pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
487
488 /* Did the emulator just closed a pipe? */
489 if (wakes & PIPE_WAKE_CLOSED) {
490 set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
491 wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
492 }
493 if (wakes & PIPE_WAKE_READ)
494 clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
495 if (wakes & PIPE_WAKE_WRITE)
496 clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
497
498 wake_up_interruptible(&pipe->wake_queue);
499 count++;
500 }
501 spin_unlock_irqrestore(&dev->lock, irq_flags);
502
503 return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
504 }
505
506 /**
507 * goldfish_pipe_open - open a channel to the AVD
508 * @inode: inode of device
509 * @file: file struct of opener
510 *
511 * Create a new pipe link between the emulator and the use application.
512 * Each new request produces a new pipe.
513 *
514 * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
515 * right now so this is fine. A move to 64bit will need this addressing
516 */
goldfish_pipe_open(struct inode * inode,struct file * file)517 static int goldfish_pipe_open(struct inode *inode, struct file *file)
518 {
519 struct goldfish_pipe *pipe;
520 struct goldfish_pipe_dev *dev = goldfish_pipe_dev;
521 int32_t status;
522
523 /* Allocate new pipe kernel object */
524 pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
525 if (pipe == NULL)
526 return -ENOMEM;
527
528 pipe->dev = dev;
529 mutex_init(&pipe->lock);
530 DPRINT("%s: call. pipe_dev pipe_dev=0x%lx new_pipe_addr=0x%lx file=0x%lx\n", __FUNCTION__, pipe_dev, pipe, file);
531 // spin lock init, write head of list, i guess
532 init_waitqueue_head(&pipe->wake_queue);
533
534 /*
535 * Now, tell the emulator we're opening a new pipe. We use the
536 * pipe object's address as the channel identifier for simplicity.
537 */
538
539 status = goldfish_cmd_status(pipe, CMD_OPEN);
540 if (status < 0) {
541 kfree(pipe);
542 return status;
543 }
544
545 /* All is done, save the pipe into the file's private data field */
546 file->private_data = pipe;
547 return 0;
548 }
549
goldfish_pipe_release(struct inode * inode,struct file * filp)550 static int goldfish_pipe_release(struct inode *inode, struct file *filp)
551 {
552 struct goldfish_pipe *pipe = filp->private_data;
553
554 DPRINT("%s: call. pipe=0x%lx file=0x%lx\n", __FUNCTION__, pipe, filp);
555 /* The guest is closing the channel, so tell the emulator right now */
556 goldfish_cmd(pipe, CMD_CLOSE);
557 kfree(pipe);
558 filp->private_data = NULL;
559 return 0;
560 }
561
562 static const struct file_operations goldfish_pipe_fops = {
563 .owner = THIS_MODULE,
564 .read = goldfish_pipe_read,
565 .write = goldfish_pipe_write,
566 .poll = goldfish_pipe_poll,
567 .open = goldfish_pipe_open,
568 .release = goldfish_pipe_release,
569 };
570
571 static struct miscdevice goldfish_pipe_miscdev = {
572 .minor = MISC_DYNAMIC_MINOR,
573 .name = "goldfish_pipe",
574 .fops = &goldfish_pipe_fops,
575 };
576
goldfish_pipe_device_init_v1(struct platform_device * pdev)577 int goldfish_pipe_device_init_v1(struct platform_device *pdev)
578 {
579 struct goldfish_pipe_dev *dev = goldfish_pipe_dev;
580 int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
581 IRQF_SHARED, "goldfish_pipe", dev);
582 if (err) {
583 dev_err(&pdev->dev, "unable to allocate IRQ for v1\n");
584 return err;
585 }
586
587 err = misc_register(&goldfish_pipe_miscdev);
588 if (err) {
589 dev_err(&pdev->dev, "unable to register v1 device\n");
590 return err;
591 }
592
593 setup_access_params_addr(pdev, dev);
594 return 0;
595 }
596
goldfish_pipe_device_deinit_v1(struct platform_device * pdev)597 void goldfish_pipe_device_deinit_v1(struct platform_device *pdev)
598 {
599 misc_deregister(&goldfish_pipe_miscdev);
600 }
601