• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Google, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 
15 #include <linux/bitops.h>
16 #include <linux/circ_buf.h>
17 #include <linux/fs.h>
18 #include <linux/module.h>
19 #include <linux/poll.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 
23 #include <video/adf_client.h>
24 #include <video/adf_format.h>
25 
26 #include "sw_sync.h"
27 #include "sync.h"
28 
29 #include "adf.h"
30 #include "adf_fops.h"
31 #include "adf_sysfs.h"
32 
33 #ifdef CONFIG_COMPAT
34 #include "adf_fops32.h"
35 #endif
36 
adf_obj_set_event(struct adf_obj * obj,struct adf_file * file,struct adf_set_event __user * arg)37 static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
38 		struct adf_set_event __user *arg)
39 {
40 	struct adf_set_event data;
41 	bool enabled;
42 	unsigned long flags;
43 	int err;
44 
45 	if (copy_from_user(&data, arg, sizeof(data)))
46 		return -EFAULT;
47 
48 	err = adf_obj_check_supports_event(obj, data.type);
49 	if (err < 0)
50 		return err;
51 
52 	spin_lock_irqsave(&obj->file_lock, flags);
53 	if (data.enabled)
54 		enabled = test_and_set_bit(data.type,
55 				file->event_subscriptions);
56 	else
57 		enabled = test_and_clear_bit(data.type,
58 				file->event_subscriptions);
59 	spin_unlock_irqrestore(&obj->file_lock, flags);
60 
61 	if (data.enabled == enabled)
62 		return -EALREADY;
63 
64 	if (data.enabled)
65 		adf_event_get(obj, data.type);
66 	else
67 		adf_event_put(obj, data.type);
68 
69 	return 0;
70 }
71 
adf_obj_copy_custom_data_to_user(struct adf_obj * obj,void __user * dst,size_t * dst_size)72 static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
73 		void __user *dst, size_t *dst_size)
74 {
75 	void *custom_data;
76 	size_t custom_data_size;
77 	int ret;
78 
79 	if (!obj->ops || !obj->ops->custom_data) {
80 		dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
81 		return 0;
82 	}
83 
84 	custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
85 	if (!custom_data)
86 		return -ENOMEM;
87 
88 	ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
89 	if (ret < 0)
90 		goto done;
91 
92 	if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
93 		ret = -EFAULT;
94 		goto done;
95 	}
96 	*dst_size = custom_data_size;
97 
98 done:
99 	kfree(custom_data);
100 	return ret;
101 }
102 
adf_eng_get_data(struct adf_overlay_engine * eng,struct adf_overlay_engine_data __user * arg)103 static int adf_eng_get_data(struct adf_overlay_engine *eng,
104 		struct adf_overlay_engine_data __user *arg)
105 {
106 	struct adf_device *dev = adf_overlay_engine_parent(eng);
107 	struct adf_overlay_engine_data data;
108 	size_t n_supported_formats;
109 	u32 *supported_formats = NULL;
110 	int ret = 0;
111 
112 	if (copy_from_user(&data, arg, sizeof(data)))
113 		return -EFAULT;
114 
115 	strlcpy(data.name, eng->base.name, sizeof(data.name));
116 
117 	if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
118 		return -EINVAL;
119 
120 	n_supported_formats = data.n_supported_formats;
121 	data.n_supported_formats = eng->ops->n_supported_formats;
122 
123 	if (n_supported_formats) {
124 		supported_formats = kzalloc(n_supported_formats *
125 				sizeof(supported_formats[0]), GFP_KERNEL);
126 		if (!supported_formats)
127 			return -ENOMEM;
128 	}
129 
130 	memcpy(supported_formats, eng->ops->supported_formats,
131 			sizeof(u32) * min(n_supported_formats,
132 					eng->ops->n_supported_formats));
133 
134 	mutex_lock(&dev->client_lock);
135 	ret = adf_obj_copy_custom_data_to_user(&eng->base, data.custom_data,
136 			&data.custom_data_size);
137 	mutex_unlock(&dev->client_lock);
138 
139 	if (ret < 0)
140 		goto done;
141 
142 	if (copy_to_user(arg, &data, sizeof(data))) {
143 		ret = -EFAULT;
144 		goto done;
145 	}
146 
147 	if (supported_formats && copy_to_user(data.supported_formats,
148 			supported_formats,
149 			n_supported_formats * sizeof(supported_formats[0])))
150 		ret = -EFAULT;
151 
152 done:
153 	kfree(supported_formats);
154 	return ret;
155 }
156 
adf_buffer_import(struct adf_device * dev,struct adf_buffer_config __user * cfg,struct adf_buffer * buf)157 static int adf_buffer_import(struct adf_device *dev,
158 		struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
159 {
160 	struct adf_buffer_config user_buf;
161 	size_t i;
162 	int ret = 0;
163 
164 	if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
165 		return -EFAULT;
166 
167 	memset(buf, 0, sizeof(*buf));
168 
169 	if (user_buf.n_planes > ADF_MAX_PLANES) {
170 		dev_err(&dev->base.dev, "invalid plane count %u\n",
171 				user_buf.n_planes);
172 		return -EINVAL;
173 	}
174 
175 	buf->overlay_engine = idr_find(&dev->overlay_engines,
176 			user_buf.overlay_engine);
177 	if (!buf->overlay_engine) {
178 		dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
179 				user_buf.overlay_engine);
180 		return -ENOENT;
181 	}
182 
183 	buf->w = user_buf.w;
184 	buf->h = user_buf.h;
185 	buf->format = user_buf.format;
186 	for (i = 0; i < user_buf.n_planes; i++) {
187 		buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
188 		if (IS_ERR(buf->dma_bufs[i])) {
189 			ret = PTR_ERR(buf->dma_bufs[i]);
190 			dev_err(&dev->base.dev, "importing dma_buf fd %d failed: %d\n",
191 					user_buf.fd[i], ret);
192 			buf->dma_bufs[i] = NULL;
193 			goto done;
194 		}
195 		buf->offset[i] = user_buf.offset[i];
196 		buf->pitch[i] = user_buf.pitch[i];
197 	}
198 	buf->n_planes = user_buf.n_planes;
199 
200 	if (user_buf.acquire_fence >= 0) {
201 		buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
202 		if (!buf->acquire_fence) {
203 			dev_err(&dev->base.dev, "getting fence fd %d failed\n",
204 					user_buf.acquire_fence);
205 			ret = -EINVAL;
206 			goto done;
207 		}
208 	}
209 
210 done:
211 	if (ret < 0)
212 		adf_buffer_cleanup(buf);
213 	return ret;
214 }
215 
adf_device_post_config(struct adf_device * dev,struct adf_post_config __user * arg)216 static int adf_device_post_config(struct adf_device *dev,
217 		struct adf_post_config __user *arg)
218 {
219 	struct sync_fence *complete_fence;
220 	int complete_fence_fd;
221 	struct adf_buffer *bufs = NULL;
222 	struct adf_interface **intfs = NULL;
223 	struct adf_post_config data;
224 	size_t i;
225 	void *custom_data = NULL;
226 	int ret = 0;
227 
228 	if (copy_from_user(&data, arg, sizeof(data)))
229 		return -EFAULT;
230 
231 	complete_fence_fd = get_unused_fd();
232 	if (complete_fence_fd < 0)
233 		return complete_fence_fd;
234 
235 	if (data.n_interfaces > ADF_MAX_INTERFACES) {
236 		ret = -EINVAL;
237 		goto err_get_user;
238 	}
239 
240 	if (data.n_bufs > ADF_MAX_BUFFERS) {
241 		ret = -EINVAL;
242 		goto err_get_user;
243 	}
244 
245 	if (data.custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
246 		ret = -EINVAL;
247 		goto err_get_user;
248 	}
249 
250 	if (data.n_interfaces) {
251 		intfs = kmalloc(sizeof(intfs[0]) * data.n_interfaces,
252 			GFP_KERNEL);
253 		if (!intfs) {
254 			ret = -ENOMEM;
255 			goto err_get_user;
256 		}
257 	}
258 
259 	for (i = 0; i < data.n_interfaces; i++) {
260 		u32 intf_id;
261 		if (get_user(intf_id, &data.interfaces[i])) {
262 			ret = -EFAULT;
263 			goto err_get_user;
264 		}
265 
266 		intfs[i] = idr_find(&dev->interfaces, intf_id);
267 		if (!intfs[i]) {
268 			ret = -EINVAL;
269 			goto err_get_user;
270 		}
271 	}
272 
273 	if (data.n_bufs) {
274 		bufs = kzalloc(sizeof(bufs[0]) * data.n_bufs, GFP_KERNEL);
275 		if (!bufs) {
276 			ret = -ENOMEM;
277 			goto err_get_user;
278 		}
279 	}
280 
281 	for (i = 0; i < data.n_bufs; i++) {
282 		ret = adf_buffer_import(dev, &data.bufs[i], &bufs[i]);
283 		if (ret < 0) {
284 			memset(&bufs[i], 0, sizeof(bufs[i]));
285 			goto err_import;
286 		}
287 	}
288 
289 	if (data.custom_data_size) {
290 		custom_data = kzalloc(data.custom_data_size, GFP_KERNEL);
291 		if (!custom_data) {
292 			ret = -ENOMEM;
293 			goto err_import;
294 		}
295 
296 		if (copy_from_user(custom_data, data.custom_data,
297 				data.custom_data_size)) {
298 			ret = -EFAULT;
299 			goto err_import;
300 		}
301 	}
302 
303 	if (put_user(complete_fence_fd, &arg->complete_fence)) {
304 		ret = -EFAULT;
305 		goto err_import;
306 	}
307 
308 	complete_fence = adf_device_post_nocopy(dev, intfs, data.n_interfaces,
309 			bufs, data.n_bufs, custom_data, data.custom_data_size);
310 	if (IS_ERR(complete_fence)) {
311 		ret = PTR_ERR(complete_fence);
312 		goto err_import;
313 	}
314 
315 	sync_fence_install(complete_fence, complete_fence_fd);
316 	return 0;
317 
318 err_import:
319 	for (i = 0; i < data.n_bufs; i++)
320 		adf_buffer_cleanup(&bufs[i]);
321 
322 err_get_user:
323 	kfree(custom_data);
324 	kfree(bufs);
325 	kfree(intfs);
326 	put_unused_fd(complete_fence_fd);
327 	return ret;
328 }
329 
adf_intf_simple_post_config(struct adf_interface * intf,struct adf_simple_post_config __user * arg)330 static int adf_intf_simple_post_config(struct adf_interface *intf,
331 		struct adf_simple_post_config __user *arg)
332 {
333 	struct adf_device *dev = intf->base.parent;
334 	struct sync_fence *complete_fence;
335 	int complete_fence_fd;
336 	struct adf_buffer buf;
337 	int ret = 0;
338 
339 	complete_fence_fd = get_unused_fd();
340 	if (complete_fence_fd < 0)
341 		return complete_fence_fd;
342 
343 	ret = adf_buffer_import(dev, &arg->buf, &buf);
344 	if (ret < 0)
345 		goto err_import;
346 
347 	if (put_user(complete_fence_fd, &arg->complete_fence)) {
348 		ret = -EFAULT;
349 		goto err_put_user;
350 	}
351 
352 	complete_fence = adf_interface_simple_post(intf, &buf);
353 	if (IS_ERR(complete_fence)) {
354 		ret = PTR_ERR(complete_fence);
355 		goto err_put_user;
356 	}
357 
358 	sync_fence_install(complete_fence, complete_fence_fd);
359 	return 0;
360 
361 err_put_user:
362 	adf_buffer_cleanup(&buf);
363 err_import:
364 	put_unused_fd(complete_fence_fd);
365 	return ret;
366 }
367 
adf_intf_simple_buffer_alloc(struct adf_interface * intf,struct adf_simple_buffer_alloc __user * arg)368 static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
369 		struct adf_simple_buffer_alloc __user *arg)
370 {
371 	struct adf_simple_buffer_alloc data;
372 	struct dma_buf *dma_buf;
373 	int ret = 0;
374 
375 	if (copy_from_user(&data, arg, sizeof(data)))
376 		return -EFAULT;
377 
378 	data.fd = get_unused_fd_flags(O_CLOEXEC);
379 	if (data.fd < 0)
380 		return data.fd;
381 
382 	ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
383 			data.format, &dma_buf, &data.offset, &data.pitch);
384 	if (ret < 0)
385 		goto err_alloc;
386 
387 	if (copy_to_user(arg, &data, sizeof(*arg))) {
388 		ret = -EFAULT;
389 		goto err_copy;
390 	}
391 
392 	fd_install(data.fd, dma_buf->file);
393 	return 0;
394 
395 err_copy:
396 	dma_buf_put(dma_buf);
397 
398 err_alloc:
399 	put_unused_fd(data.fd);
400 	return ret;
401 }
402 
adf_copy_attachment_list_to_user(struct adf_attachment_config __user * to,size_t n_to,struct adf_attachment * from,size_t n_from)403 static int adf_copy_attachment_list_to_user(
404 		struct adf_attachment_config __user *to, size_t n_to,
405 		struct adf_attachment *from, size_t n_from)
406 {
407 	struct adf_attachment_config *temp;
408 	size_t n = min(n_to, n_from);
409 	size_t i;
410 	int ret = 0;
411 
412 	if (!n)
413 		return 0;
414 
415 	temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
416 	if (!temp)
417 		return -ENOMEM;
418 
419 	for (i = 0; i < n; i++) {
420 		temp[i].interface = from[i].interface->base.id;
421 		temp[i].overlay_engine = from[i].overlay_engine->base.id;
422 	}
423 
424 	if (copy_to_user(to, temp, n * sizeof(to[0]))) {
425 		ret = -EFAULT;
426 		goto done;
427 	}
428 
429 done:
430 	kfree(temp);
431 	return ret;
432 }
433 
adf_device_get_data(struct adf_device * dev,struct adf_device_data __user * arg)434 static int adf_device_get_data(struct adf_device *dev,
435 		struct adf_device_data __user *arg)
436 {
437 	struct adf_device_data data;
438 	size_t n_attach;
439 	struct adf_attachment *attach = NULL;
440 	size_t n_allowed_attach;
441 	struct adf_attachment *allowed_attach = NULL;
442 	int ret = 0;
443 
444 	if (copy_from_user(&data, arg, sizeof(data)))
445 		return -EFAULT;
446 
447 	if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
448 			data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
449 		return -EINVAL;
450 
451 	strlcpy(data.name, dev->base.name, sizeof(data.name));
452 
453 	if (data.n_attachments) {
454 		attach = kzalloc(data.n_attachments * sizeof(attach[0]),
455 				GFP_KERNEL);
456 		if (!attach)
457 			return -ENOMEM;
458 	}
459 	n_attach = adf_device_attachments(dev, attach, data.n_attachments);
460 
461 	if (data.n_allowed_attachments) {
462 		allowed_attach = kzalloc(data.n_allowed_attachments *
463 				sizeof(allowed_attach[0]), GFP_KERNEL);
464 		if (!allowed_attach) {
465 			ret = -ENOMEM;
466 			goto done;
467 		}
468 	}
469 	n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
470 			data.n_allowed_attachments);
471 
472 	mutex_lock(&dev->client_lock);
473 	ret = adf_obj_copy_custom_data_to_user(&dev->base, data.custom_data,
474 			&data.custom_data_size);
475 	mutex_unlock(&dev->client_lock);
476 
477 	if (ret < 0)
478 		goto done;
479 
480 	ret = adf_copy_attachment_list_to_user(data.attachments,
481 			data.n_attachments, attach, n_attach);
482 	if (ret < 0)
483 		goto done;
484 
485 	ret = adf_copy_attachment_list_to_user(data.allowed_attachments,
486 			data.n_allowed_attachments, allowed_attach,
487 			n_allowed_attach);
488 	if (ret < 0)
489 		goto done;
490 
491 	data.n_attachments = n_attach;
492 	data.n_allowed_attachments = n_allowed_attach;
493 
494 	if (copy_to_user(arg, &data, sizeof(data)))
495 		ret = -EFAULT;
496 
497 done:
498 	kfree(allowed_attach);
499 	kfree(attach);
500 	return ret;
501 }
502 
adf_device_handle_attachment(struct adf_device * dev,struct adf_attachment_config __user * arg,bool attach)503 static int adf_device_handle_attachment(struct adf_device *dev,
504 		struct adf_attachment_config __user *arg, bool attach)
505 {
506 	struct adf_attachment_config data;
507 	struct adf_overlay_engine *eng;
508 	struct adf_interface *intf;
509 
510 	if (copy_from_user(&data, arg, sizeof(data)))
511 		return -EFAULT;
512 
513 	eng = idr_find(&dev->overlay_engines, data.overlay_engine);
514 	if (!eng) {
515 		dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
516 				data.overlay_engine);
517 		return -EINVAL;
518 	}
519 
520 	intf = idr_find(&dev->interfaces, data.interface);
521 	if (!intf) {
522 		dev_err(&dev->base.dev, "invalid interface id %u\n",
523 				data.interface);
524 		return -EINVAL;
525 	}
526 
527 	if (attach)
528 		return adf_device_attach(dev, eng, intf);
529 	else
530 		return adf_device_detach(dev, eng, intf);
531 }
532 
adf_intf_set_mode(struct adf_interface * intf,struct drm_mode_modeinfo __user * arg)533 static int adf_intf_set_mode(struct adf_interface *intf,
534 		struct drm_mode_modeinfo __user *arg)
535 {
536 	struct drm_mode_modeinfo mode;
537 
538 	if (copy_from_user(&mode, arg, sizeof(mode)))
539 		return -EFAULT;
540 
541 	return adf_interface_set_mode(intf, &mode);
542 }
543 
adf_intf_get_data(struct adf_interface * intf,struct adf_interface_data __user * arg)544 static int adf_intf_get_data(struct adf_interface *intf,
545 		struct adf_interface_data __user *arg)
546 {
547 	struct adf_device *dev = adf_interface_parent(intf);
548 	struct adf_interface_data data;
549 	struct drm_mode_modeinfo *modelist;
550 	size_t modelist_size;
551 	int err;
552 	int ret = 0;
553 	unsigned long flags;
554 
555 	if (copy_from_user(&data, arg, sizeof(data)))
556 		return -EFAULT;
557 
558 	strlcpy(data.name, intf->base.name, sizeof(data.name));
559 
560 	data.type = intf->type;
561 	data.id = intf->idx;
562 	data.flags = intf->flags;
563 
564 	err = adf_interface_get_screen_size(intf, &data.width_mm,
565 			&data.height_mm);
566 	if (err < 0) {
567 		data.width_mm = 0;
568 		data.height_mm = 0;
569 	}
570 
571 	modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
572 	if (!modelist)
573 		return -ENOMEM;
574 
575 	mutex_lock(&dev->client_lock);
576 	read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
577 	data.hotplug_detect = intf->hotplug_detect;
578 	modelist_size = min(data.n_available_modes, intf->n_modes) *
579 			sizeof(intf->modelist[0]);
580 	memcpy(modelist, intf->modelist, modelist_size);
581 	data.n_available_modes = intf->n_modes;
582 	read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
583 
584 	if (copy_to_user(data.available_modes, modelist, modelist_size)) {
585 		ret = -EFAULT;
586 		goto done;
587 	}
588 
589 	data.dpms_state = intf->dpms_state;
590 	memcpy(&data.current_mode, &intf->current_mode,
591 			sizeof(intf->current_mode));
592 
593 	ret = adf_obj_copy_custom_data_to_user(&intf->base, data.custom_data,
594 			&data.custom_data_size);
595 done:
596 	mutex_unlock(&dev->client_lock);
597 	kfree(modelist);
598 
599 	if (ret < 0)
600 		return ret;
601 
602 	if (copy_to_user(arg, &data, sizeof(data)))
603 		ret = -EFAULT;
604 
605 	return ret;
606 }
607 
adf_obj_custom_ioctl(struct adf_obj * obj,unsigned int cmd,unsigned long arg)608 static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
609 		unsigned long arg)
610 {
611 	if (obj->ops && obj->ops->ioctl)
612 		return obj->ops->ioctl(obj, cmd, arg);
613 	return -ENOTTY;
614 }
615 
adf_overlay_engine_ioctl(struct adf_overlay_engine * eng,struct adf_file * file,unsigned int cmd,unsigned long arg)616 static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
617 		struct adf_file *file, unsigned int cmd, unsigned long arg)
618 {
619 	switch (cmd) {
620 	case ADF_SET_EVENT:
621 		return adf_obj_set_event(&eng->base, file,
622 				(struct adf_set_event __user *)arg);
623 
624 	case ADF_GET_OVERLAY_ENGINE_DATA:
625 		return adf_eng_get_data(eng,
626 			(struct adf_overlay_engine_data __user *)arg);
627 
628 	case ADF_BLANK:
629 	case ADF_POST_CONFIG:
630 	case ADF_SET_MODE:
631 	case ADF_GET_DEVICE_DATA:
632 	case ADF_GET_INTERFACE_DATA:
633 	case ADF_SIMPLE_POST_CONFIG:
634 	case ADF_SIMPLE_BUFFER_ALLOC:
635 	case ADF_ATTACH:
636 	case ADF_DETACH:
637 		return -EINVAL;
638 
639 	default:
640 		return adf_obj_custom_ioctl(&eng->base, cmd, arg);
641 	}
642 }
643 
adf_interface_ioctl(struct adf_interface * intf,struct adf_file * file,unsigned int cmd,unsigned long arg)644 static long adf_interface_ioctl(struct adf_interface *intf,
645 		struct adf_file *file, unsigned int cmd, unsigned long arg)
646 {
647 	switch (cmd) {
648 	case ADF_SET_EVENT:
649 		return adf_obj_set_event(&intf->base, file,
650 				(struct adf_set_event __user *)arg);
651 
652 	case ADF_BLANK:
653 		return adf_interface_blank(intf, arg);
654 
655 	case ADF_SET_MODE:
656 		return adf_intf_set_mode(intf,
657 				(struct drm_mode_modeinfo __user *)arg);
658 
659 	case ADF_GET_INTERFACE_DATA:
660 		return adf_intf_get_data(intf,
661 				(struct adf_interface_data __user *)arg);
662 
663 	case ADF_SIMPLE_POST_CONFIG:
664 		return adf_intf_simple_post_config(intf,
665 				(struct adf_simple_post_config __user *)arg);
666 
667 	case ADF_SIMPLE_BUFFER_ALLOC:
668 		return adf_intf_simple_buffer_alloc(intf,
669 				(struct adf_simple_buffer_alloc __user *)arg);
670 
671 	case ADF_POST_CONFIG:
672 	case ADF_GET_DEVICE_DATA:
673 	case ADF_GET_OVERLAY_ENGINE_DATA:
674 	case ADF_ATTACH:
675 	case ADF_DETACH:
676 		return -EINVAL;
677 
678 	default:
679 		return adf_obj_custom_ioctl(&intf->base, cmd, arg);
680 	}
681 }
682 
adf_device_ioctl(struct adf_device * dev,struct adf_file * file,unsigned int cmd,unsigned long arg)683 static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
684 		unsigned int cmd, unsigned long arg)
685 {
686 	switch (cmd) {
687 	case ADF_SET_EVENT:
688 		return adf_obj_set_event(&dev->base, file,
689 				(struct adf_set_event __user *)arg);
690 
691 	case ADF_POST_CONFIG:
692 		return adf_device_post_config(dev,
693 				(struct adf_post_config __user *)arg);
694 
695 	case ADF_GET_DEVICE_DATA:
696 		return adf_device_get_data(dev,
697 				(struct adf_device_data __user *)arg);
698 
699 	case ADF_ATTACH:
700 		return adf_device_handle_attachment(dev,
701 				(struct adf_attachment_config __user *)arg,
702 				true);
703 
704 	case ADF_DETACH:
705 		return adf_device_handle_attachment(dev,
706 				(struct adf_attachment_config __user *)arg,
707 				false);
708 
709 	case ADF_BLANK:
710 	case ADF_SET_MODE:
711 	case ADF_GET_INTERFACE_DATA:
712 	case ADF_GET_OVERLAY_ENGINE_DATA:
713 	case ADF_SIMPLE_POST_CONFIG:
714 	case ADF_SIMPLE_BUFFER_ALLOC:
715 		return -EINVAL;
716 
717 	default:
718 		return adf_obj_custom_ioctl(&dev->base, cmd, arg);
719 	}
720 }
721 
adf_file_open(struct inode * inode,struct file * file)722 static int adf_file_open(struct inode *inode, struct file *file)
723 {
724 	struct adf_obj *obj;
725 	struct adf_file *fpriv = NULL;
726 	unsigned long flags;
727 	int ret = 0;
728 
729 	obj = adf_obj_sysfs_find(iminor(inode));
730 	if (!obj)
731 		return -ENODEV;
732 
733 	dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
734 
735 	if (!try_module_get(obj->parent->ops->owner)) {
736 		dev_err(&obj->dev, "getting owner module failed\n");
737 		return -ENODEV;
738 	}
739 
740 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
741 	if (!fpriv) {
742 		ret = -ENOMEM;
743 		goto done;
744 	}
745 
746 	INIT_LIST_HEAD(&fpriv->head);
747 	fpriv->obj = obj;
748 	init_waitqueue_head(&fpriv->event_wait);
749 
750 	file->private_data = fpriv;
751 
752 	if (obj->ops && obj->ops->open) {
753 		ret = obj->ops->open(obj, inode, file);
754 		if (ret < 0)
755 			goto done;
756 	}
757 
758 	spin_lock_irqsave(&obj->file_lock, flags);
759 	list_add_tail(&fpriv->head, &obj->file_list);
760 	spin_unlock_irqrestore(&obj->file_lock, flags);
761 
762 done:
763 	if (ret < 0) {
764 		kfree(fpriv);
765 		module_put(obj->parent->ops->owner);
766 	}
767 	return ret;
768 }
769 
adf_file_release(struct inode * inode,struct file * file)770 static int adf_file_release(struct inode *inode, struct file *file)
771 {
772 	struct adf_file *fpriv = file->private_data;
773 	struct adf_obj *obj = fpriv->obj;
774 	enum adf_event_type event_type;
775 	unsigned long flags;
776 
777 	if (obj->ops && obj->ops->release)
778 		obj->ops->release(obj, inode, file);
779 
780 	spin_lock_irqsave(&obj->file_lock, flags);
781 	list_del(&fpriv->head);
782 	spin_unlock_irqrestore(&obj->file_lock, flags);
783 
784 	for_each_set_bit(event_type, fpriv->event_subscriptions,
785 			ADF_EVENT_TYPE_MAX) {
786 		adf_event_put(obj, event_type);
787 	}
788 
789 	kfree(fpriv);
790 	module_put(obj->parent->ops->owner);
791 
792 	dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
793 	return 0;
794 }
795 
adf_file_ioctl(struct file * file,unsigned int cmd,unsigned long arg)796 long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
797 {
798 	struct adf_file *fpriv = file->private_data;
799 	struct adf_obj *obj = fpriv->obj;
800 	long ret = -EINVAL;
801 
802 	dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
803 
804 	switch (obj->type) {
805 	case ADF_OBJ_OVERLAY_ENGINE:
806 		ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
807 				fpriv, cmd, arg);
808 		break;
809 
810 	case ADF_OBJ_INTERFACE:
811 		ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
812 				arg);
813 		break;
814 
815 	case ADF_OBJ_DEVICE:
816 		ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
817 		break;
818 	}
819 
820 	return ret;
821 }
822 
adf_file_event_available(struct adf_file * fpriv)823 static inline bool adf_file_event_available(struct adf_file *fpriv)
824 {
825 	int head = fpriv->event_head;
826 	int tail = fpriv->event_tail;
827 	return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
828 }
829 
adf_file_queue_event(struct adf_file * fpriv,struct adf_event * event)830 void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
831 {
832 	int head = fpriv->event_head;
833 	int tail = fpriv->event_tail;
834 	size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
835 	size_t space_to_end =
836 			CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
837 
838 	if (space < event->length) {
839 		dev_dbg(&fpriv->obj->dev,
840 				"insufficient buffer space for event %u\n",
841 				event->type);
842 		return;
843 	}
844 
845 	if (space_to_end >= event->length) {
846 		memcpy(fpriv->event_buf + head, event, event->length);
847 	} else {
848 		memcpy(fpriv->event_buf + head, event, space_to_end);
849 		memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
850 				event->length - space_to_end);
851 	}
852 
853 	smp_wmb();
854 	fpriv->event_head = (fpriv->event_head + event->length) &
855 			(sizeof(fpriv->event_buf) - 1);
856 	wake_up_interruptible_all(&fpriv->event_wait);
857 }
858 
adf_file_copy_to_user(struct adf_file * fpriv,char __user * buffer,size_t buffer_size)859 static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
860 		char __user *buffer, size_t buffer_size)
861 {
862 	int head, tail;
863 	u8 *event_buf;
864 	size_t cnt, cnt_to_end, copy_size = 0;
865 	ssize_t ret = 0;
866 	unsigned long flags;
867 
868 	event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
869 			GFP_KERNEL);
870 	if (!event_buf)
871 		return -ENOMEM;
872 
873 	spin_lock_irqsave(&fpriv->obj->file_lock, flags);
874 
875 	if (!adf_file_event_available(fpriv))
876 		goto out;
877 
878 	head = fpriv->event_head;
879 	tail = fpriv->event_tail;
880 
881 	cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
882 	cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
883 	copy_size = min(buffer_size, cnt);
884 
885 	if (cnt_to_end >= copy_size) {
886 		memcpy(event_buf, fpriv->event_buf + tail, copy_size);
887 	} else {
888 		memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
889 		memcpy(event_buf + cnt_to_end, fpriv->event_buf,
890 				copy_size - cnt_to_end);
891 	}
892 
893 	fpriv->event_tail = (fpriv->event_tail + copy_size) &
894 			(sizeof(fpriv->event_buf) - 1);
895 
896 out:
897 	spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
898 	if (copy_size) {
899 		if (copy_to_user(buffer, event_buf, copy_size))
900 			ret = -EFAULT;
901 		else
902 			ret = copy_size;
903 	}
904 	kfree(event_buf);
905 	return ret;
906 }
907 
adf_file_read(struct file * filp,char __user * buffer,size_t count,loff_t * offset)908 ssize_t adf_file_read(struct file *filp, char __user *buffer,
909 		 size_t count, loff_t *offset)
910 {
911 	struct adf_file *fpriv = filp->private_data;
912 	int err;
913 
914 	err = wait_event_interruptible(fpriv->event_wait,
915 			adf_file_event_available(fpriv));
916 	if (err < 0)
917 		return err;
918 
919 	return adf_file_copy_to_user(fpriv, buffer, count);
920 }
921 
adf_file_poll(struct file * filp,struct poll_table_struct * wait)922 unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
923 {
924 	struct adf_file *fpriv = filp->private_data;
925 	unsigned int mask = 0;
926 
927 	poll_wait(filp, &fpriv->event_wait, wait);
928 
929 	if (adf_file_event_available(fpriv))
930 		mask |= POLLIN | POLLRDNORM;
931 
932 	return mask;
933 }
934 
935 const struct file_operations adf_fops = {
936 	.owner = THIS_MODULE,
937 	.unlocked_ioctl = adf_file_ioctl,
938 #ifdef CONFIG_COMPAT
939 	.compat_ioctl = adf_file_compat_ioctl,
940 #endif
941 	.open = adf_file_open,
942 	.release = adf_file_release,
943 	.llseek = default_llseek,
944 	.read = adf_file_read,
945 	.poll = adf_file_poll,
946 };
947