1 // SPDX-License-Identifier: GPL-2.0+
2
3 /* The Goldfish sync driver is designed to provide a interface
4 * between the underlying host's sync device and the kernel's
5 * fence sync framework.
6 *
7 * The purpose of the device/driver is to enable lightweight creation and
8 * signaling of timelines and fences in order to synchronize the guest with
9 * host-side graphics events.
10 *
11 * Each time the interrupt trips, the driver may perform a sync operation.
12 */
13
14 #include <linux/acpi.h>
15 #include <linux/dma-fence.h>
16 #include <linux/fdtable.h>
17 #include <linux/file.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/kref.h>
24 #include <linux/miscdevice.h>
25 #include <linux/mm.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/platform_device.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/sync_file.h>
33 #include <linux/syscalls.h>
34 #include <linux/types.h>
35 #include <linux/uaccess.h>
36
37 #include <uapi/linux/goldfish/goldfish_sync.h>
38
39 struct sync_pt {
40 struct dma_fence base; /* must be the first field in this struct */
41 struct list_head active_list; /* see active_list_head below */
42 };
43
44 struct goldfish_sync_state;
45
46 struct goldfish_sync_timeline {
47 struct goldfish_sync_state *sync_state;
48
49 /* This object is owned by userspace from open() calls and also each
50 * sync_pt refers to it.
51 */
52 struct kref kref;
53 char name[32]; /* for debugging */
54
55 u64 context;
56 unsigned int seqno;
57 /* list of active (unsignaled/errored) sync_pts */
58 struct list_head active_list_head;
59 spinlock_t lock; /* protects the fields above */
60 };
61
62 /* The above definitions (command codes, register layout, ioctl definitions)
63 * need to be in sync with the following files:
64 *
65 * Host-side (emulator):
66 * external/qemu/android/emulation/goldfish_sync.h
67 * external/qemu-android/hw/misc/goldfish_sync.c
68 *
69 * Guest-side (system image):
70 * device/generic/goldfish-opengl/system/egl/goldfish_sync.h
71 * device/generic/goldfish/ueventd.ranchu.rc
72 * platform/build/target/board/generic/sepolicy/file_contexts
73 */
74 struct goldfish_sync_hostcmd {
75 /* sorted for alignment */
76 u64 handle;
77 u64 hostcmd_handle;
78 u32 cmd;
79 u32 time_arg;
80 };
81
82 struct goldfish_sync_guestcmd {
83 u64 host_command; /* u64 for alignment */
84 u64 glsync_handle;
85 u64 thread_handle;
86 u64 guest_timeline_handle;
87 };
88
89 /* The host operations are: */
90 enum cmd_id {
91 /* Ready signal - used to mark when irq should lower */
92 CMD_SYNC_READY = 0,
93
94 /* Create a new timeline. writes timeline handle */
95 CMD_CREATE_SYNC_TIMELINE = 1,
96
97 /* Create a fence object. reads timeline handle and time argument.
98 * Writes fence fd to the SYNC_REG_HANDLE register.
99 */
100 CMD_CREATE_SYNC_FENCE = 2,
101
102 /* Increments timeline. reads timeline handle and time argument */
103 CMD_SYNC_TIMELINE_INC = 3,
104
105 /* Destroys a timeline. reads timeline handle */
106 CMD_DESTROY_SYNC_TIMELINE = 4,
107
108 /* Starts a wait on the host with the given glsync object and
109 * sync thread handle.
110 */
111 CMD_TRIGGER_HOST_WAIT = 5,
112 };
113
114 /* The host register layout is: */
115 enum sync_reg_id {
116 /* host->guest batch commands */
117 SYNC_REG_BATCH_COMMAND = 0x00,
118
119 /* guest->host batch commands */
120 SYNC_REG_BATCH_GUESTCOMMAND = 0x04,
121
122 /* communicate physical address of host->guest batch commands */
123 SYNC_REG_BATCH_COMMAND_ADDR = 0x08,
124 SYNC_REG_BATCH_COMMAND_ADDR_HIGH = 0x0C, /* 64-bit part */
125
126 /* communicate physical address of guest->host commands */
127 SYNC_REG_BATCH_GUESTCOMMAND_ADDR = 0x10,
128 SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH = 0x14, /* 64-bit part */
129
130 /* signals that the device has been probed */
131 SYNC_REG_INIT = 0x18,
132 };
133
134 #define GOLDFISH_SYNC_MAX_CMDS 256
135 #define GOLDFISH_SYNC_MAX_CMDS_STACK 32
136
137 /* The driver state: */
138 struct goldfish_sync_state {
139 struct miscdevice miscdev;
140
141 char __iomem *reg_base;
142 int irq;
143
144 /* Used to generate unique names, see goldfish_sync_timeline::name. */
145 u64 id_counter;
146
147 /* |mutex_lock| protects all concurrent access
148 * to timelines for both kernel and user space.
149 */
150 struct mutex mutex_lock;
151
152 /* Buffer holding commands issued from host. */
153 struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS];
154 u16 to_do_begin;
155 u16 to_do_end;
156 /* Protects the to_do fields */
157 spinlock_t to_do_lock;
158
159 /* Buffers for the reading or writing
160 * of individual commands. The host can directly write
161 * to |batch_hostcmd| (and then this driver immediately
162 * copies contents to |to_do|). This driver either replies
163 * through |batch_hostcmd| or simply issues a
164 * guest->host command through |batch_guestcmd|.
165 */
166 struct goldfish_sync_hostcmd batch_hostcmd;
167 struct goldfish_sync_guestcmd batch_guestcmd;
168
169 /* Used to give this struct itself to a work queue
170 * function for executing actual sync commands.
171 */
172 struct work_struct work_item;
173 };
174
175 static struct goldfish_sync_timeline
goldfish_dma_fence_parent(struct dma_fence * fence)176 *goldfish_dma_fence_parent(struct dma_fence *fence)
177 {
178 return container_of(fence->lock, struct goldfish_sync_timeline, lock);
179 }
180
goldfish_sync_fence_to_sync_pt(struct dma_fence * fence)181 static struct sync_pt *goldfish_sync_fence_to_sync_pt(struct dma_fence *fence)
182 {
183 return container_of(fence, struct sync_pt, base);
184 }
185
186 /* sync_state->mutex_lock must be locked. */
187 struct goldfish_sync_timeline __must_check
goldfish_sync_timeline_create(struct goldfish_sync_state * sync_state)188 *goldfish_sync_timeline_create(struct goldfish_sync_state *sync_state)
189 {
190 struct goldfish_sync_timeline *tl;
191
192 tl = kzalloc(sizeof(*tl), GFP_KERNEL);
193 if (!tl)
194 return NULL;
195
196 tl->sync_state = sync_state;
197 kref_init(&tl->kref);
198 snprintf(tl->name, sizeof(tl->name),
199 "%s:%llu", GOLDFISH_SYNC_DEVICE_NAME,
200 ++sync_state->id_counter);
201 tl->context = dma_fence_context_alloc(1);
202 tl->seqno = 0;
203 INIT_LIST_HEAD(&tl->active_list_head);
204 spin_lock_init(&tl->lock);
205
206 return tl;
207 }
208
goldfish_sync_timeline_free(struct kref * kref)209 static void goldfish_sync_timeline_free(struct kref *kref)
210 {
211 struct goldfish_sync_timeline *tl =
212 container_of(kref, struct goldfish_sync_timeline, kref);
213
214 kfree(tl);
215 }
216
goldfish_sync_timeline_get(struct goldfish_sync_timeline * tl)217 static void goldfish_sync_timeline_get(struct goldfish_sync_timeline *tl)
218 {
219 kref_get(&tl->kref);
220 }
221
goldfish_sync_timeline_put(struct goldfish_sync_timeline * tl)222 void goldfish_sync_timeline_put(struct goldfish_sync_timeline *tl)
223 {
224 kref_put(&tl->kref, goldfish_sync_timeline_free);
225 }
226
goldfish_sync_timeline_signal(struct goldfish_sync_timeline * tl,unsigned int inc)227 void goldfish_sync_timeline_signal(struct goldfish_sync_timeline *tl,
228 unsigned int inc)
229 {
230 unsigned long flags;
231 struct sync_pt *pt, *next;
232
233 spin_lock_irqsave(&tl->lock, flags);
234 tl->seqno += inc;
235
236 list_for_each_entry_safe(pt, next, &tl->active_list_head, active_list) {
237 /* dma_fence_is_signaled_locked has side effects */
238 if (dma_fence_is_signaled_locked(&pt->base))
239 list_del_init(&pt->active_list);
240 }
241 spin_unlock_irqrestore(&tl->lock, flags);
242 }
243
244 static const struct dma_fence_ops goldfish_sync_timeline_fence_ops;
245
246 static struct sync_pt __must_check
goldfish_sync_pt_create(struct goldfish_sync_timeline * tl,unsigned int value)247 *goldfish_sync_pt_create(struct goldfish_sync_timeline *tl,
248 unsigned int value)
249 {
250 struct sync_pt *pt = kzalloc(sizeof(*pt), GFP_KERNEL);
251
252 if (!pt)
253 return NULL;
254
255 dma_fence_init(&pt->base,
256 &goldfish_sync_timeline_fence_ops,
257 &tl->lock,
258 tl->context,
259 value);
260 INIT_LIST_HEAD(&pt->active_list);
261 goldfish_sync_timeline_get(tl); /* pt refers to tl */
262
263 return pt;
264 }
265
goldfish_sync_pt_destroy(struct sync_pt * pt)266 static void goldfish_sync_pt_destroy(struct sync_pt *pt)
267 {
268 struct goldfish_sync_timeline *tl =
269 goldfish_dma_fence_parent(&pt->base);
270 unsigned long flags;
271
272 spin_lock_irqsave(&tl->lock, flags);
273 if (!list_empty(&pt->active_list))
274 list_del(&pt->active_list);
275 spin_unlock_irqrestore(&tl->lock, flags);
276
277 goldfish_sync_timeline_put(tl); /* unref pt from tl */
278 dma_fence_free(&pt->base);
279 }
280
281 static const char
goldfish_sync_timeline_fence_get_driver_name(struct dma_fence * fence)282 *goldfish_sync_timeline_fence_get_driver_name(struct dma_fence *fence)
283 {
284 return "sw_sync";
285 }
286
287 static const char
goldfish_sync_timeline_fence_get_timeline_name(struct dma_fence * fence)288 *goldfish_sync_timeline_fence_get_timeline_name(struct dma_fence *fence)
289 {
290 struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
291
292 return tl->name;
293 }
294
goldfish_sync_timeline_fence_release(struct dma_fence * fence)295 static void goldfish_sync_timeline_fence_release(struct dma_fence *fence)
296 {
297 goldfish_sync_pt_destroy(goldfish_sync_fence_to_sync_pt(fence));
298 }
299
goldfish_sync_timeline_fence_signaled(struct dma_fence * fence)300 static bool goldfish_sync_timeline_fence_signaled(struct dma_fence *fence)
301 {
302 struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
303
304 return tl->seqno >= fence->seqno;
305 }
306
307 static bool
goldfish_sync_timeline_fence_enable_signaling(struct dma_fence * fence)308 goldfish_sync_timeline_fence_enable_signaling(struct dma_fence *fence)
309 {
310 struct sync_pt *pt;
311 struct goldfish_sync_timeline *tl;
312
313 if (goldfish_sync_timeline_fence_signaled(fence))
314 return false;
315
316 pt = goldfish_sync_fence_to_sync_pt(fence);
317 tl = goldfish_dma_fence_parent(fence);
318 list_add_tail(&pt->active_list, &tl->active_list_head);
319 return true;
320 }
321
goldfish_sync_timeline_fence_value_str(struct dma_fence * fence,char * str,int size)322 static void goldfish_sync_timeline_fence_value_str(struct dma_fence *fence,
323 char *str, int size)
324 {
325 snprintf(str, size, "%d", fence->seqno);
326 }
327
goldfish_sync_timeline_fence_timeline_value_str(struct dma_fence * fence,char * str,int size)328 static void goldfish_sync_timeline_fence_timeline_value_str(
329 struct dma_fence *fence,
330 char *str, int size)
331 {
332 struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
333
334 snprintf(str, size, "%d", tl->seqno);
335 }
336
337 static const struct dma_fence_ops goldfish_sync_timeline_fence_ops = {
338 .get_driver_name = goldfish_sync_timeline_fence_get_driver_name,
339 .get_timeline_name = goldfish_sync_timeline_fence_get_timeline_name,
340 .enable_signaling = goldfish_sync_timeline_fence_enable_signaling,
341 .signaled = goldfish_sync_timeline_fence_signaled,
342 .wait = dma_fence_default_wait,
343 .release = goldfish_sync_timeline_fence_release,
344 .fence_value_str = goldfish_sync_timeline_fence_value_str,
345 .timeline_value_str = goldfish_sync_timeline_fence_timeline_value_str,
346 };
347
348 struct fence_data {
349 struct sync_pt *pt;
350 struct sync_file *sync_file_obj;
351 int fd;
352 };
353
354 static int __must_check
goldfish_sync_fence_create(struct goldfish_sync_timeline * tl,u32 val,struct fence_data * fence)355 goldfish_sync_fence_create(struct goldfish_sync_timeline *tl, u32 val,
356 struct fence_data *fence)
357 {
358 struct sync_pt *pt;
359 struct sync_file *sync_file_obj = NULL;
360 int fd;
361
362 pt = goldfish_sync_pt_create(tl, val);
363 if (!pt)
364 return -1;
365
366 fd = get_unused_fd_flags(O_CLOEXEC);
367 if (fd < 0)
368 goto err_cleanup_pt;
369
370 sync_file_obj = sync_file_create(&pt->base);
371 if (!sync_file_obj)
372 goto err_cleanup_fd_pt;
373
374 fd_install(fd, sync_file_obj->file);
375
376 dma_fence_put(&pt->base); /* sync_file_obj now owns the fence */
377
378 fence->pt = pt;
379 fence->sync_file_obj = sync_file_obj;
380 fence->fd = fd;
381
382 return 0;
383
384 err_cleanup_fd_pt:
385 put_unused_fd(fd);
386 err_cleanup_pt:
387 goldfish_sync_pt_destroy(pt);
388
389 return -1;
390 }
391
goldfish_sync_fence_destroy(const struct fence_data * fence)392 static void goldfish_sync_fence_destroy(const struct fence_data *fence)
393 {
394 fput(fence->sync_file_obj->file);
395 goldfish_sync_pt_destroy(fence->pt);
396 }
397
398 static inline bool
goldfish_sync_cmd_queue(struct goldfish_sync_state * sync_state,u32 cmd,u64 handle,u32 time_arg,u64 hostcmd_handle)399 goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state,
400 u32 cmd,
401 u64 handle,
402 u32 time_arg,
403 u64 hostcmd_handle)
404 {
405 unsigned int to_do_end = sync_state->to_do_end;
406 struct goldfish_sync_hostcmd *to_do = sync_state->to_do;
407 struct goldfish_sync_hostcmd *to_add;
408
409 if (to_do_end >= GOLDFISH_SYNC_MAX_CMDS) {
410 const unsigned int to_do_begin = sync_state->to_do_begin;
411 const unsigned int to_do_size = to_do_end - to_do_begin;
412
413 /*
414 * this memmove should not run often if
415 * goldfish_sync_work_item_fn grabs commands faster than they
416 * arrive.
417 */
418 memmove(&to_do[0], &to_do[to_do_begin],
419 sizeof(*to_do) * to_do_size);
420 to_do_end = to_do_size;
421 sync_state->to_do_begin = 0;
422
423 if (to_do_end >= GOLDFISH_SYNC_MAX_CMDS)
424 return false;
425 }
426
427 to_add = &to_do[to_do_end];
428
429 to_add->cmd = cmd;
430 to_add->handle = handle;
431 to_add->time_arg = time_arg;
432 to_add->hostcmd_handle = hostcmd_handle;
433
434 sync_state->to_do_end = to_do_end + 1;
435 return true;
436 }
437
438 static inline void
goldfish_sync_hostcmd_reply(struct goldfish_sync_state * sync_state,u32 cmd,u64 handle,u32 time_arg,u64 hostcmd_handle)439 goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state,
440 u32 cmd,
441 u64 handle,
442 u32 time_arg,
443 u64 hostcmd_handle)
444 {
445 unsigned long irq_flags;
446 struct goldfish_sync_hostcmd *batch_hostcmd =
447 &sync_state->batch_hostcmd;
448
449 spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
450
451 batch_hostcmd->cmd = cmd;
452 batch_hostcmd->handle = handle;
453 batch_hostcmd->time_arg = time_arg;
454 batch_hostcmd->hostcmd_handle = hostcmd_handle;
455 writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
456
457 spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
458 }
459
460 static inline void
goldfish_sync_send_guestcmd(struct goldfish_sync_state * sync_state,u32 cmd,u64 glsync_handle,u64 thread_handle,u64 timeline_handle)461 goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state,
462 u32 cmd,
463 u64 glsync_handle,
464 u64 thread_handle,
465 u64 timeline_handle)
466 {
467 unsigned long irq_flags;
468 struct goldfish_sync_guestcmd *batch_guestcmd =
469 &sync_state->batch_guestcmd;
470
471 spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
472
473 batch_guestcmd->host_command = cmd;
474 batch_guestcmd->glsync_handle = glsync_handle;
475 batch_guestcmd->thread_handle = thread_handle;
476 batch_guestcmd->guest_timeline_handle = timeline_handle;
477 writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND);
478
479 spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
480 }
481
482 /* |goldfish_sync_interrupt| handles IRQ raises from the virtual device.
483 * In the context of OpenGL, this interrupt will fire whenever we need
484 * to signal a fence fd in the guest, with the command
485 * |CMD_SYNC_TIMELINE_INC|.
486 * However, because this function will be called in an interrupt context,
487 * it is necessary to do the actual work of signaling off of interrupt context.
488 * The shared work queue is used for this purpose. At the end when
489 * all pending commands are intercepted by the interrupt handler,
490 * we call |schedule_work|, which will later run the actual
491 * desired sync command in |goldfish_sync_work_item_fn|.
492 */
493 static irqreturn_t
goldfish_sync_interrupt_impl(struct goldfish_sync_state * sync_state)494 goldfish_sync_interrupt_impl(struct goldfish_sync_state *sync_state)
495 {
496 struct goldfish_sync_hostcmd *batch_hostcmd =
497 &sync_state->batch_hostcmd;
498
499 spin_lock(&sync_state->to_do_lock);
500 for (;;) {
501 u32 nextcmd;
502 u32 command_r;
503 u64 handle_rw;
504 u32 time_r;
505 u64 hostcmd_handle_rw;
506
507 readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
508 nextcmd = batch_hostcmd->cmd;
509
510 if (nextcmd == 0)
511 break;
512
513 command_r = nextcmd;
514 handle_rw = batch_hostcmd->handle;
515 time_r = batch_hostcmd->time_arg;
516 hostcmd_handle_rw = batch_hostcmd->hostcmd_handle;
517
518 BUG_ON(!goldfish_sync_cmd_queue(sync_state,
519 command_r,
520 handle_rw,
521 time_r,
522 hostcmd_handle_rw));
523 }
524 spin_unlock(&sync_state->to_do_lock);
525
526 schedule_work(&sync_state->work_item);
527 return IRQ_HANDLED;
528 }
529
530 static const struct file_operations goldfish_sync_fops;
531
goldfish_sync_interrupt(int irq,void * dev_id)532 static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id)
533 {
534 struct goldfish_sync_state *sync_state = dev_id;
535
536 return (sync_state->miscdev.fops == &goldfish_sync_fops) ?
537 goldfish_sync_interrupt_impl(sync_state) : IRQ_NONE;
538 }
539
540 /* We expect that commands will come in at a slow enough rate
541 * so that incoming items will not be more than
542 * GOLDFISH_SYNC_MAX_CMDS.
543 *
544 * This is because the way the sync device is used,
545 * it's only for managing buffer data transfers per frame,
546 * with a sequential dependency between putting things in
547 * to_do and taking them out. Once a set of commands is
548 * queued up in to_do, the user of the device waits for
549 * them to be processed before queuing additional commands,
550 * which limits the rate at which commands come in
551 * to the rate at which we take them out here.
552 *
553 * We also don't expect more than MAX_CMDS to be issued
554 * at once; there is a correspondence between
555 * which buffers need swapping to the (display / buffer queue)
556 * to particular commands, and we don't expect there to be
557 * enough display or buffer queues in operation at once
558 * to overrun GOLDFISH_SYNC_MAX_CMDS.
559 */
560 static u32 __must_check
goldfish_sync_grab_commands(struct goldfish_sync_state * sync_state,struct goldfish_sync_hostcmd * dst,const u32 dst_size)561 goldfish_sync_grab_commands(struct goldfish_sync_state *sync_state,
562 struct goldfish_sync_hostcmd *dst,
563 const u32 dst_size)
564 {
565 u32 result;
566 unsigned int to_do_begin;
567 unsigned int to_do_end;
568 unsigned int to_do_size;
569 struct goldfish_sync_hostcmd *to_do;
570 unsigned long irq_flags;
571
572 spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
573
574 to_do = sync_state->to_do;
575 to_do_begin = sync_state->to_do_begin;
576 to_do_end = sync_state->to_do_end;
577 to_do_size = to_do_end - to_do_begin;
578
579 if (to_do_size > dst_size) {
580 memcpy(dst, &to_do[to_do_begin], sizeof(*to_do) * dst_size);
581 sync_state->to_do_begin = to_do_begin + dst_size;
582 result = dst_size;
583 } else {
584 memcpy(dst, &to_do[to_do_begin], sizeof(*to_do) * to_do_size);
585 sync_state->to_do_begin = 0;
586 sync_state->to_do_end = 0;
587 result = to_do_size;
588 }
589
590 spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
591
592 return result;
593 }
594
goldfish_sync_run_hostcmd(struct goldfish_sync_state * sync_state,struct goldfish_sync_hostcmd * todo)595 void goldfish_sync_run_hostcmd(struct goldfish_sync_state *sync_state,
596 struct goldfish_sync_hostcmd *todo)
597 {
598 struct goldfish_sync_timeline *tl =
599 (struct goldfish_sync_timeline *)(uintptr_t)todo->handle;
600 struct fence_data fence;
601
602 switch (todo->cmd) {
603 case CMD_SYNC_READY:
604 break;
605
606 case CMD_CREATE_SYNC_TIMELINE:
607 tl = goldfish_sync_timeline_create(sync_state);
608 WARN_ON(!tl);
609 goldfish_sync_hostcmd_reply(sync_state,
610 CMD_CREATE_SYNC_TIMELINE,
611 (uintptr_t)tl,
612 0,
613 todo->hostcmd_handle);
614 break;
615
616 case CMD_CREATE_SYNC_FENCE:
617 WARN_ON(!tl);
618 if (goldfish_sync_fence_create(tl, todo->time_arg, &fence)) {
619 fence.fd = -1;
620 }
621 goldfish_sync_hostcmd_reply(sync_state,
622 CMD_CREATE_SYNC_FENCE,
623 fence.fd,
624 0,
625 todo->hostcmd_handle);
626 break;
627
628 case CMD_SYNC_TIMELINE_INC:
629 WARN_ON(!tl);
630 goldfish_sync_timeline_signal(tl, todo->time_arg);
631 break;
632
633 case CMD_DESTROY_SYNC_TIMELINE:
634 WARN_ON(!tl);
635 goldfish_sync_timeline_put(tl);
636 break;
637 }
638 }
639
640 /* |goldfish_sync_work_item_fn| does the actual work of servicing
641 * host->guest sync commands. This function is triggered whenever
642 * the IRQ for the goldfish sync device is raised. Once it starts
643 * running, it grabs the contents of the buffer containing the
644 * commands it needs to execute (there may be multiple, because
645 * our IRQ is active high and not edge triggered), and then
646 * runs all of them one after the other.
647 */
goldfish_sync_work_item_fn(struct work_struct * input)648 static void goldfish_sync_work_item_fn(struct work_struct *input)
649 {
650 struct goldfish_sync_state *sync_state =
651 container_of(input, struct goldfish_sync_state, work_item);
652
653 struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS_STACK];
654
655 mutex_lock(&sync_state->mutex_lock);
656
657 while (true) {
658 u32 i;
659 u32 to_do_end =
660 goldfish_sync_grab_commands(sync_state, to_run,
661 ARRAY_SIZE(to_run));
662
663 for (i = 0; i < to_do_end; i++)
664 goldfish_sync_run_hostcmd(sync_state, &to_run[i]);
665
666 if (to_do_end < ARRAY_SIZE(to_run))
667 break;
668 }
669
670 mutex_unlock(&sync_state->mutex_lock);
671 }
672
goldfish_sync_open(struct inode * inode,struct file * filp)673 static int goldfish_sync_open(struct inode *inode, struct file *filp)
674 {
675 struct goldfish_sync_state *sync_state =
676 container_of(filp->private_data,
677 struct goldfish_sync_state,
678 miscdev);
679
680 if (mutex_lock_interruptible(&sync_state->mutex_lock))
681 return -ERESTARTSYS;
682
683 filp->private_data = goldfish_sync_timeline_create(sync_state);
684 mutex_unlock(&sync_state->mutex_lock);
685
686 return filp->private_data ? 0 : -ENOMEM;
687 }
688
goldfish_sync_release(struct inode * inode,struct file * filp)689 static int goldfish_sync_release(struct inode *inode, struct file *filp)
690 {
691 struct goldfish_sync_timeline *tl = filp->private_data;
692
693 goldfish_sync_timeline_put(tl);
694 return 0;
695 }
696
697 /* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync
698 * and is used in conjunction with eglCreateSyncKHR to queue up the
699 * actual work of waiting for the EGL sync command to complete,
700 * possibly returning a fence fd to the guest.
701 */
702 static long
goldfish_sync_ioctl_locked(struct goldfish_sync_timeline * tl,unsigned int cmd,unsigned long arg)703 goldfish_sync_ioctl_locked(struct goldfish_sync_timeline *tl,
704 unsigned int cmd,
705 unsigned long arg)
706 {
707 struct goldfish_sync_ioctl_info ioctl_data;
708 struct fence_data fence;
709
710 switch (cmd) {
711 case GOLDFISH_SYNC_IOC_QUEUE_WORK:
712 if (copy_from_user(&ioctl_data,
713 (void __user *)arg,
714 sizeof(ioctl_data)))
715 return -EFAULT;
716
717 if (!ioctl_data.host_syncthread_handle_in)
718 return -EFAULT;
719
720 if (goldfish_sync_fence_create(tl, tl->seqno + 1, &fence))
721 return -EAGAIN;
722
723 ioctl_data.fence_fd_out = fence.fd;
724 if (copy_to_user((void __user *)arg,
725 &ioctl_data,
726 sizeof(ioctl_data))) {
727 goldfish_sync_fence_destroy(&fence);
728 return -EFAULT;
729 }
730
731 /* We are now about to trigger a host-side wait;
732 * accumulate on |pending_waits|.
733 */
734 goldfish_sync_send_guestcmd(tl->sync_state,
735 CMD_TRIGGER_HOST_WAIT,
736 ioctl_data.host_glsync_handle_in,
737 ioctl_data.host_syncthread_handle_in,
738 (u64)(uintptr_t)tl);
739 return 0;
740
741 default:
742 return -ENOTTY;
743 }
744 }
745
goldfish_sync_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)746 static long goldfish_sync_ioctl(struct file *filp,
747 unsigned int cmd,
748 unsigned long arg)
749 {
750 struct goldfish_sync_timeline *tl = filp->private_data;
751 struct goldfish_sync_state *x = tl->sync_state;
752 long res;
753
754 if (mutex_lock_interruptible(&x->mutex_lock))
755 return -ERESTARTSYS;
756
757 res = goldfish_sync_ioctl_locked(tl, cmd, arg);
758 mutex_unlock(&x->mutex_lock);
759
760 return res;
761 }
762
setup_verify_batch_cmd_addr(char * reg_base,void * batch_addr,u32 addr_offset,u32 addr_offset_high)763 static bool setup_verify_batch_cmd_addr(char *reg_base,
764 void *batch_addr,
765 u32 addr_offset,
766 u32 addr_offset_high)
767 {
768 u64 batch_addr_phys;
769 u64 batch_addr_phys_test_lo;
770 u64 batch_addr_phys_test_hi;
771
772 batch_addr_phys = virt_to_phys(batch_addr);
773 writel(lower_32_bits(batch_addr_phys), reg_base + addr_offset);
774 writel(upper_32_bits(batch_addr_phys), reg_base + addr_offset_high);
775
776 batch_addr_phys_test_lo = readl(reg_base + addr_offset);
777 batch_addr_phys_test_hi = readl(reg_base + addr_offset_high);
778
779 batch_addr_phys = batch_addr_phys_test_lo |
780 (batch_addr_phys_test_hi << 32);
781
782 return virt_to_phys(batch_addr) == batch_addr_phys;
783 }
784
785 static const struct file_operations goldfish_sync_fops = {
786 .owner = THIS_MODULE,
787 .open = goldfish_sync_open,
788 .release = goldfish_sync_release,
789 .unlocked_ioctl = goldfish_sync_ioctl,
790 .compat_ioctl = goldfish_sync_ioctl,
791 };
792
fill_miscdevice(struct miscdevice * misc)793 static void fill_miscdevice(struct miscdevice *misc)
794 {
795 misc->name = GOLDFISH_SYNC_DEVICE_NAME;
796 misc->minor = MISC_DYNAMIC_MINOR;
797 misc->fops = &goldfish_sync_fops;
798 }
799
goldfish_sync_probe(struct platform_device * pdev)800 static int goldfish_sync_probe(struct platform_device *pdev)
801 {
802 struct goldfish_sync_state *sync_state;
803 struct resource *ioresource;
804 int result;
805
806 sync_state = devm_kzalloc(&pdev->dev, sizeof(*sync_state), GFP_KERNEL);
807 if (!sync_state)
808 return -ENOMEM;
809
810 spin_lock_init(&sync_state->to_do_lock);
811 mutex_init(&sync_state->mutex_lock);
812 INIT_WORK(&sync_state->work_item, goldfish_sync_work_item_fn);
813
814 ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
815 if (!ioresource)
816 return -ENODEV;
817
818 sync_state->reg_base =
819 devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE);
820 if (!sync_state->reg_base)
821 return -ENOMEM;
822
823 result = platform_get_irq(pdev, 0);
824 if (result < 0)
825 return -ENODEV;
826
827 sync_state->irq = result;
828
829 result = devm_request_irq(&pdev->dev,
830 sync_state->irq,
831 goldfish_sync_interrupt,
832 IRQF_SHARED,
833 pdev->name,
834 sync_state);
835 if (result)
836 return -ENODEV;
837
838 if (!setup_verify_batch_cmd_addr(sync_state->reg_base,
839 &sync_state->batch_hostcmd,
840 SYNC_REG_BATCH_COMMAND_ADDR,
841 SYNC_REG_BATCH_COMMAND_ADDR_HIGH))
842 return -ENODEV;
843
844 if (!setup_verify_batch_cmd_addr(sync_state->reg_base,
845 &sync_state->batch_guestcmd,
846 SYNC_REG_BATCH_GUESTCOMMAND_ADDR,
847 SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH))
848 return -ENODEV;
849
850 fill_miscdevice(&sync_state->miscdev);
851 result = misc_register(&sync_state->miscdev);
852 if (result)
853 return -ENODEV;
854
855 writel(0, sync_state->reg_base + SYNC_REG_INIT);
856
857 platform_set_drvdata(pdev, sync_state);
858
859 return 0;
860 }
861
goldfish_sync_remove(struct platform_device * pdev)862 static int goldfish_sync_remove(struct platform_device *pdev)
863 {
864 struct goldfish_sync_state *sync_state = platform_get_drvdata(pdev);
865
866 misc_deregister(&sync_state->miscdev);
867 return 0;
868 }
869
870 static const struct of_device_id goldfish_sync_of_match[] = {
871 { .compatible = "google,goldfish-sync", },
872 {},
873 };
874 MODULE_DEVICE_TABLE(of, goldfish_sync_of_match);
875
876 static const struct acpi_device_id goldfish_sync_acpi_match[] = {
877 { "GFSH0006", 0 },
878 { },
879 };
880 MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match);
881
882 static struct platform_driver goldfish_sync = {
883 .probe = goldfish_sync_probe,
884 .remove = goldfish_sync_remove,
885 .driver = {
886 .name = GOLDFISH_SYNC_DEVICE_NAME,
887 .of_match_table = goldfish_sync_of_match,
888 .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match),
889 }
890 };
891 module_platform_driver(goldfish_sync);
892
893 MODULE_AUTHOR("Google, Inc.");
894 MODULE_DESCRIPTION("Android QEMU Sync Driver");
895 MODULE_LICENSE("GPL");
896 MODULE_VERSION("2.0");
897