• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28 
29 #include "sync.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
33 
34 static const struct fence_ops android_fence_ops;
35 static const struct file_operations sync_fence_fops;
36 
sync_timeline_create(const struct sync_timeline_ops * ops,int size,const char * name)37 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
38 					   int size, const char *name)
39 {
40 	struct sync_timeline *obj;
41 
42 	if (size < sizeof(struct sync_timeline))
43 		return NULL;
44 
45 	obj = kzalloc(size, GFP_KERNEL);
46 	if (obj == NULL)
47 		return NULL;
48 
49 	kref_init(&obj->kref);
50 	obj->ops = ops;
51 	obj->context = fence_context_alloc(1);
52 	strlcpy(obj->name, name, sizeof(obj->name));
53 
54 	INIT_LIST_HEAD(&obj->child_list_head);
55 	INIT_LIST_HEAD(&obj->active_list_head);
56 	spin_lock_init(&obj->child_list_lock);
57 
58 	sync_timeline_debug_add(obj);
59 
60 	return obj;
61 }
62 EXPORT_SYMBOL(sync_timeline_create);
63 
sync_timeline_free(struct kref * kref)64 static void sync_timeline_free(struct kref *kref)
65 {
66 	struct sync_timeline *obj =
67 		container_of(kref, struct sync_timeline, kref);
68 
69 	sync_timeline_debug_remove(obj);
70 
71 	if (obj->ops->release_obj)
72 		obj->ops->release_obj(obj);
73 
74 	kfree(obj);
75 }
76 
sync_timeline_get(struct sync_timeline * obj)77 static void sync_timeline_get(struct sync_timeline *obj)
78 {
79 	kref_get(&obj->kref);
80 }
81 
sync_timeline_put(struct sync_timeline * obj)82 static void sync_timeline_put(struct sync_timeline *obj)
83 {
84 	kref_put(&obj->kref, sync_timeline_free);
85 }
86 
sync_timeline_destroy(struct sync_timeline * obj)87 void sync_timeline_destroy(struct sync_timeline *obj)
88 {
89 	obj->destroyed = true;
90 	/*
91 	 * Ensure timeline is marked as destroyed before
92 	 * changing timeline's fences status.
93 	 */
94 	smp_wmb();
95 
96 	/*
97 	 * signal any children that their parent is going away.
98 	 */
99 	sync_timeline_signal(obj);
100 	sync_timeline_put(obj);
101 }
102 EXPORT_SYMBOL(sync_timeline_destroy);
103 
sync_timeline_signal(struct sync_timeline * obj)104 void sync_timeline_signal(struct sync_timeline *obj)
105 {
106 	unsigned long flags;
107 	LIST_HEAD(signaled_pts);
108 	struct sync_pt *pt, *next;
109 
110 	trace_sync_timeline(obj);
111 
112 	spin_lock_irqsave(&obj->child_list_lock, flags);
113 
114 	list_for_each_entry_safe(pt, next, &obj->active_list_head,
115 				 active_list) {
116 		if (fence_is_signaled_locked(&pt->base))
117 			list_del_init(&pt->active_list);
118 	}
119 
120 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
121 }
122 EXPORT_SYMBOL(sync_timeline_signal);
123 
sync_pt_create(struct sync_timeline * obj,int size)124 struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
125 {
126 	unsigned long flags;
127 	struct sync_pt *pt;
128 
129 	if (size < sizeof(struct sync_pt))
130 		return NULL;
131 
132 	pt = kzalloc(size, GFP_KERNEL);
133 	if (pt == NULL)
134 		return NULL;
135 
136 	spin_lock_irqsave(&obj->child_list_lock, flags);
137 	sync_timeline_get(obj);
138 	fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
139 		   obj->context, ++obj->value);
140 	list_add_tail(&pt->child_list, &obj->child_list_head);
141 	INIT_LIST_HEAD(&pt->active_list);
142 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
143 	return pt;
144 }
145 EXPORT_SYMBOL(sync_pt_create);
146 
sync_pt_free(struct sync_pt * pt)147 void sync_pt_free(struct sync_pt *pt)
148 {
149 	fence_put(&pt->base);
150 }
151 EXPORT_SYMBOL(sync_pt_free);
152 
sync_fence_alloc(int size,const char * name)153 static struct sync_fence *sync_fence_alloc(int size, const char *name)
154 {
155 	struct sync_fence *fence;
156 
157 	fence = kzalloc(size, GFP_KERNEL);
158 	if (fence == NULL)
159 		return NULL;
160 
161 	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
162 					 fence, 0);
163 	if (IS_ERR(fence->file))
164 		goto err;
165 
166 	kref_init(&fence->kref);
167 	strlcpy(fence->name, name, sizeof(fence->name));
168 
169 	init_waitqueue_head(&fence->wq);
170 
171 	return fence;
172 
173 err:
174 	kfree(fence);
175 	return NULL;
176 }
177 
fence_check_cb_func(struct fence * f,struct fence_cb * cb)178 static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
179 {
180 	struct sync_fence_cb *check;
181 	struct sync_fence *fence;
182 
183 	check = container_of(cb, struct sync_fence_cb, cb);
184 	fence = check->fence;
185 
186 	if (atomic_dec_and_test(&fence->status))
187 		wake_up_all(&fence->wq);
188 }
189 
190 /* TODO: implement a create which takes more that one sync_pt */
sync_fence_create(const char * name,struct sync_pt * pt)191 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
192 {
193 	struct sync_fence *fence;
194 
195 	fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
196 	if (fence == NULL)
197 		return NULL;
198 
199 	fence->num_fences = 1;
200 	atomic_set(&fence->status, 1);
201 
202 	fence->cbs[0].sync_pt = &pt->base;
203 	fence->cbs[0].fence = fence;
204 	if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
205 			       fence_check_cb_func))
206 		atomic_dec(&fence->status);
207 
208 	sync_fence_debug_add(fence);
209 
210 	return fence;
211 }
212 EXPORT_SYMBOL(sync_fence_create);
213 
sync_fence_fdget(int fd)214 struct sync_fence *sync_fence_fdget(int fd)
215 {
216 	struct file *file = fget(fd);
217 
218 	if (file == NULL)
219 		return NULL;
220 
221 	if (file->f_op != &sync_fence_fops)
222 		goto err;
223 
224 	return file->private_data;
225 
226 err:
227 	fput(file);
228 	return NULL;
229 }
230 EXPORT_SYMBOL(sync_fence_fdget);
231 
sync_fence_put(struct sync_fence * fence)232 void sync_fence_put(struct sync_fence *fence)
233 {
234 	fput(fence->file);
235 }
236 EXPORT_SYMBOL(sync_fence_put);
237 
sync_fence_install(struct sync_fence * fence,int fd)238 void sync_fence_install(struct sync_fence *fence, int fd)
239 {
240 	fd_install(fd, fence->file);
241 }
242 EXPORT_SYMBOL(sync_fence_install);
243 
sync_fence_add_pt(struct sync_fence * fence,int * i,struct fence * pt)244 static void sync_fence_add_pt(struct sync_fence *fence,
245 			      int *i, struct fence *pt)
246 {
247 	fence->cbs[*i].sync_pt = pt;
248 	fence->cbs[*i].fence = fence;
249 
250 	if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
251 		fence_get(pt);
252 		(*i)++;
253 	}
254 }
255 
sync_fence_merge(const char * name,struct sync_fence * a,struct sync_fence * b)256 struct sync_fence *sync_fence_merge(const char *name,
257 				    struct sync_fence *a, struct sync_fence *b)
258 {
259 	int num_fences = a->num_fences + b->num_fences;
260 	struct sync_fence *fence;
261 	int i, i_a, i_b;
262 	unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
263 
264 	fence = sync_fence_alloc(size, name);
265 	if (fence == NULL)
266 		return NULL;
267 
268 	atomic_set(&fence->status, num_fences);
269 
270 	/*
271 	 * Assume sync_fence a and b are both ordered and have no
272 	 * duplicates with the same context.
273 	 *
274 	 * If a sync_fence can only be created with sync_fence_merge
275 	 * and sync_fence_create, this is a reasonable assumption.
276 	 */
277 	for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
278 		struct fence *pt_a = a->cbs[i_a].sync_pt;
279 		struct fence *pt_b = b->cbs[i_b].sync_pt;
280 
281 		if (pt_a->context < pt_b->context) {
282 			sync_fence_add_pt(fence, &i, pt_a);
283 
284 			i_a++;
285 		} else if (pt_a->context > pt_b->context) {
286 			sync_fence_add_pt(fence, &i, pt_b);
287 
288 			i_b++;
289 		} else {
290 			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
291 				sync_fence_add_pt(fence, &i, pt_a);
292 			else
293 				sync_fence_add_pt(fence, &i, pt_b);
294 
295 			i_a++;
296 			i_b++;
297 		}
298 	}
299 
300 	for (; i_a < a->num_fences; i_a++)
301 		sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
302 
303 	for (; i_b < b->num_fences; i_b++)
304 		sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
305 
306 	if (num_fences > i)
307 		atomic_sub(num_fences - i, &fence->status);
308 	fence->num_fences = i;
309 
310 	sync_fence_debug_add(fence);
311 	return fence;
312 }
313 EXPORT_SYMBOL(sync_fence_merge);
314 
sync_fence_wake_up_wq(wait_queue_t * curr,unsigned mode,int wake_flags,void * key)315 int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
316 				 int wake_flags, void *key)
317 {
318 	struct sync_fence_waiter *wait;
319 
320 	wait = container_of(curr, struct sync_fence_waiter, work);
321 	list_del_init(&wait->work.task_list);
322 
323 	wait->callback(wait->work.private, wait);
324 	return 1;
325 }
326 
sync_fence_wait_async(struct sync_fence * fence,struct sync_fence_waiter * waiter)327 int sync_fence_wait_async(struct sync_fence *fence,
328 			  struct sync_fence_waiter *waiter)
329 {
330 	int err = atomic_read(&fence->status);
331 	unsigned long flags;
332 
333 	if (err < 0)
334 		return err;
335 
336 	if (!err)
337 		return 1;
338 
339 	init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
340 	waiter->work.private = fence;
341 
342 	spin_lock_irqsave(&fence->wq.lock, flags);
343 	err = atomic_read(&fence->status);
344 	if (err > 0)
345 		__add_wait_queue_tail(&fence->wq, &waiter->work);
346 	spin_unlock_irqrestore(&fence->wq.lock, flags);
347 
348 	if (err < 0)
349 		return err;
350 
351 	return !err;
352 }
353 EXPORT_SYMBOL(sync_fence_wait_async);
354 
sync_fence_cancel_async(struct sync_fence * fence,struct sync_fence_waiter * waiter)355 int sync_fence_cancel_async(struct sync_fence *fence,
356 			     struct sync_fence_waiter *waiter)
357 {
358 	unsigned long flags;
359 	int ret = 0;
360 
361 	spin_lock_irqsave(&fence->wq.lock, flags);
362 	if (!list_empty(&waiter->work.task_list))
363 		list_del_init(&waiter->work.task_list);
364 	else
365 		ret = -ENOENT;
366 	spin_unlock_irqrestore(&fence->wq.lock, flags);
367 	return ret;
368 }
369 EXPORT_SYMBOL(sync_fence_cancel_async);
370 
sync_fence_wait(struct sync_fence * fence,long timeout)371 int sync_fence_wait(struct sync_fence *fence, long timeout)
372 {
373 	long ret;
374 	int i;
375 
376 	if (timeout < 0)
377 		timeout = MAX_SCHEDULE_TIMEOUT;
378 	else
379 		timeout = msecs_to_jiffies(timeout);
380 
381 	trace_sync_wait(fence, 1);
382 	for (i = 0; i < fence->num_fences; ++i)
383 		trace_sync_pt(fence->cbs[i].sync_pt);
384 	ret = wait_event_interruptible_timeout(fence->wq,
385 					       atomic_read(&fence->status) <= 0,
386 					       timeout);
387 	trace_sync_wait(fence, 0);
388 
389 	if (ret < 0) {
390 		return ret;
391 	} else if (ret == 0) {
392 		if (timeout) {
393 			pr_info("fence timeout on [%pK] after %dms\n", fence,
394 				jiffies_to_msecs(timeout));
395 			sync_dump();
396 		}
397 		return -ETIME;
398 	}
399 
400 	ret = atomic_read(&fence->status);
401 	if (ret) {
402 		pr_info("fence error %ld on [%pK]\n", ret, fence);
403 		sync_dump();
404 	}
405 	return ret;
406 }
407 EXPORT_SYMBOL(sync_fence_wait);
408 
android_fence_get_driver_name(struct fence * fence)409 static const char *android_fence_get_driver_name(struct fence *fence)
410 {
411 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
412 	struct sync_timeline *parent = sync_pt_parent(pt);
413 
414 	return parent->ops->driver_name;
415 }
416 
android_fence_get_timeline_name(struct fence * fence)417 static const char *android_fence_get_timeline_name(struct fence *fence)
418 {
419 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
420 	struct sync_timeline *parent = sync_pt_parent(pt);
421 
422 	return parent->name;
423 }
424 
android_fence_release(struct fence * fence)425 static void android_fence_release(struct fence *fence)
426 {
427 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
428 	struct sync_timeline *parent = sync_pt_parent(pt);
429 	unsigned long flags;
430 
431 	spin_lock_irqsave(fence->lock, flags);
432 	list_del(&pt->child_list);
433 	if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
434 		list_del(&pt->active_list);
435 	spin_unlock_irqrestore(fence->lock, flags);
436 
437 	if (parent->ops->free_pt)
438 		parent->ops->free_pt(pt);
439 
440 	sync_timeline_put(parent);
441 	fence_free(&pt->base);
442 }
443 
android_fence_signaled(struct fence * fence)444 static bool android_fence_signaled(struct fence *fence)
445 {
446 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
447 	struct sync_timeline *parent = sync_pt_parent(pt);
448 	int ret;
449 
450 	ret = parent->ops->has_signaled(pt);
451 	if (ret < 0)
452 		fence->status = ret;
453 	return ret;
454 }
455 
android_fence_enable_signaling(struct fence * fence)456 static bool android_fence_enable_signaling(struct fence *fence)
457 {
458 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
459 	struct sync_timeline *parent = sync_pt_parent(pt);
460 
461 	if (android_fence_signaled(fence))
462 		return false;
463 
464 	list_add_tail(&pt->active_list, &parent->active_list_head);
465 	return true;
466 }
467 
android_fence_disable_signaling(struct fence * fence)468 static void android_fence_disable_signaling(struct fence *fence)
469 {
470 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
471 
472 	list_del_init(&pt->active_list);
473 }
474 
android_fence_fill_driver_data(struct fence * fence,void * data,int size)475 static int android_fence_fill_driver_data(struct fence *fence,
476 					  void *data, int size)
477 {
478 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
479 	struct sync_timeline *parent = sync_pt_parent(pt);
480 
481 	if (!parent->ops->fill_driver_data)
482 		return 0;
483 	return parent->ops->fill_driver_data(pt, data, size);
484 }
485 
android_fence_value_str(struct fence * fence,char * str,int size)486 static void android_fence_value_str(struct fence *fence,
487 				    char *str, int size)
488 {
489 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
490 	struct sync_timeline *parent = sync_pt_parent(pt);
491 
492 	if (!parent->ops->pt_value_str) {
493 		if (size)
494 			*str = 0;
495 		return;
496 	}
497 	parent->ops->pt_value_str(pt, str, size);
498 }
499 
android_fence_timeline_value_str(struct fence * fence,char * str,int size)500 static void android_fence_timeline_value_str(struct fence *fence,
501 					     char *str, int size)
502 {
503 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
504 	struct sync_timeline *parent = sync_pt_parent(pt);
505 
506 	if (!parent->ops->timeline_value_str) {
507 		if (size)
508 			*str = 0;
509 		return;
510 	}
511 	parent->ops->timeline_value_str(parent, str, size);
512 }
513 
514 static const struct fence_ops android_fence_ops = {
515 	.get_driver_name = android_fence_get_driver_name,
516 	.get_timeline_name = android_fence_get_timeline_name,
517 	.enable_signaling = android_fence_enable_signaling,
518 	.disable_signaling = android_fence_disable_signaling,
519 	.signaled = android_fence_signaled,
520 	.wait = fence_default_wait,
521 	.release = android_fence_release,
522 	.fill_driver_data = android_fence_fill_driver_data,
523 	.fence_value_str = android_fence_value_str,
524 	.timeline_value_str = android_fence_timeline_value_str,
525 };
526 
sync_fence_free(struct kref * kref)527 static void sync_fence_free(struct kref *kref)
528 {
529 	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
530 	int i;
531 
532 	for (i = 0; i < fence->num_fences; ++i) {
533 		fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
534 		fence_put(fence->cbs[i].sync_pt);
535 	}
536 
537 	kfree(fence);
538 }
539 
sync_fence_release(struct inode * inode,struct file * file)540 static int sync_fence_release(struct inode *inode, struct file *file)
541 {
542 	struct sync_fence *fence = file->private_data;
543 
544 	sync_fence_debug_remove(fence);
545 
546 	kref_put(&fence->kref, sync_fence_free);
547 	return 0;
548 }
549 
sync_fence_poll(struct file * file,poll_table * wait)550 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
551 {
552 	struct sync_fence *fence = file->private_data;
553 	int status;
554 
555 	poll_wait(file, &fence->wq, wait);
556 
557 	status = atomic_read(&fence->status);
558 
559 	if (!status)
560 		return POLLIN;
561 	else if (status < 0)
562 		return POLLERR;
563 	return 0;
564 }
565 
sync_fence_ioctl_wait(struct sync_fence * fence,unsigned long arg)566 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
567 {
568 	__s32 value;
569 
570 	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
571 		return -EFAULT;
572 
573 	return sync_fence_wait(fence, value);
574 }
575 
sync_fence_ioctl_merge(struct sync_fence * fence,unsigned long arg)576 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
577 {
578 	int fd = get_unused_fd_flags(O_CLOEXEC);
579 	int err;
580 	struct sync_fence *fence2, *fence3;
581 	struct sync_merge_data data;
582 
583 	if (fd < 0)
584 		return fd;
585 
586 	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
587 		err = -EFAULT;
588 		goto err_put_fd;
589 	}
590 
591 	fence2 = sync_fence_fdget(data.fd2);
592 	if (fence2 == NULL) {
593 		err = -ENOENT;
594 		goto err_put_fd;
595 	}
596 
597 	data.name[sizeof(data.name) - 1] = '\0';
598 	fence3 = sync_fence_merge(data.name, fence, fence2);
599 	if (fence3 == NULL) {
600 		err = -ENOMEM;
601 		goto err_put_fence2;
602 	}
603 
604 	data.fence = fd;
605 	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
606 		err = -EFAULT;
607 		goto err_put_fence3;
608 	}
609 
610 	sync_fence_install(fence3, fd);
611 	sync_fence_put(fence2);
612 	return 0;
613 
614 err_put_fence3:
615 	sync_fence_put(fence3);
616 
617 err_put_fence2:
618 	sync_fence_put(fence2);
619 
620 err_put_fd:
621 	put_unused_fd(fd);
622 	return err;
623 }
624 
sync_fill_pt_info(struct fence * fence,void * data,int size)625 static int sync_fill_pt_info(struct fence *fence, void *data, int size)
626 {
627 	struct sync_pt_info *info = data;
628 	int ret;
629 
630 	if (size < sizeof(struct sync_pt_info))
631 		return -ENOMEM;
632 
633 	info->len = sizeof(struct sync_pt_info);
634 
635 	if (fence->ops->fill_driver_data) {
636 		ret = fence->ops->fill_driver_data(fence, info->driver_data,
637 						   size - sizeof(*info));
638 		if (ret < 0)
639 			return ret;
640 
641 		info->len += ret;
642 	}
643 
644 	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
645 		sizeof(info->obj_name));
646 	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
647 		sizeof(info->driver_name));
648 	if (fence_is_signaled(fence))
649 		info->status = fence->status >= 0 ? 1 : fence->status;
650 	else
651 		info->status = 0;
652 	info->timestamp_ns = ktime_to_ns(fence->timestamp);
653 
654 	return info->len;
655 }
656 
sync_fence_ioctl_fence_info(struct sync_fence * fence,unsigned long arg)657 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
658 					unsigned long arg)
659 {
660 	struct sync_fence_info_data *data;
661 	__u32 size;
662 	__u32 len = 0;
663 	int ret, i;
664 
665 	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
666 		return -EFAULT;
667 
668 	if (size < sizeof(struct sync_fence_info_data))
669 		return -EINVAL;
670 
671 	if (size > 4096)
672 		size = 4096;
673 
674 	data = kzalloc(size, GFP_KERNEL);
675 	if (data == NULL)
676 		return -ENOMEM;
677 
678 	strlcpy(data->name, fence->name, sizeof(data->name));
679 	data->status = atomic_read(&fence->status);
680 	if (data->status >= 0)
681 		data->status = !data->status;
682 
683 	len = sizeof(struct sync_fence_info_data);
684 
685 	for (i = 0; i < fence->num_fences; ++i) {
686 		struct fence *pt = fence->cbs[i].sync_pt;
687 
688 		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
689 
690 		if (ret < 0)
691 			goto out;
692 
693 		len += ret;
694 	}
695 
696 	data->len = len;
697 
698 	if (copy_to_user((void __user *)arg, data, len))
699 		ret = -EFAULT;
700 	else
701 		ret = 0;
702 
703 out:
704 	kfree(data);
705 
706 	return ret;
707 }
708 
sync_fence_ioctl(struct file * file,unsigned int cmd,unsigned long arg)709 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
710 			     unsigned long arg)
711 {
712 	struct sync_fence *fence = file->private_data;
713 
714 	switch (cmd) {
715 	case SYNC_IOC_WAIT:
716 		return sync_fence_ioctl_wait(fence, arg);
717 
718 	case SYNC_IOC_MERGE:
719 		return sync_fence_ioctl_merge(fence, arg);
720 
721 	case SYNC_IOC_FENCE_INFO:
722 		return sync_fence_ioctl_fence_info(fence, arg);
723 
724 	default:
725 		return -ENOTTY;
726 	}
727 }
728 
729 static const struct file_operations sync_fence_fops = {
730 	.release = sync_fence_release,
731 	.poll = sync_fence_poll,
732 	.unlocked_ioctl = sync_fence_ioctl,
733 	.compat_ioctl = sync_fence_ioctl,
734 };
735 
736