• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Google, Inc.
3  * adf_modeinfo_{set_name,set_vrefresh} modified from
4  * drivers/gpu/drm/drm_modes.c
5  * adf_format_validate_yuv modified from framebuffer_check in
6  * drivers/gpu/drm/drm_crtc.c
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 
19 #include <linux/device.h>
20 #include <linux/idr.h>
21 #include <linux/highmem.h>
22 #include <linux/memblock.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 
27 #include <video/adf_format.h>
28 
29 #include "sw_sync.h"
30 #include "sync.h"
31 
32 #include "adf.h"
33 #include "adf_fops.h"
34 #include "adf_sysfs.h"
35 
36 #define CREATE_TRACE_POINTS
37 #include "adf_trace.h"
38 
39 #define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
40 #define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
41 
42 static DEFINE_IDR(adf_devices);
43 
adf_fence_wait(struct adf_device * dev,struct sync_fence * fence)44 static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
45 {
46 	/* sync_fence_wait() dumps debug information on timeout.  Experience
47 	   has shown that if the pipeline gets stuck, a short timeout followed
48 	   by a longer one provides useful information for debugging. */
49 	int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
50 	if (err >= 0)
51 		return;
52 
53 	if (err == -ETIME)
54 		err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
55 
56 	if (err < 0)
57 		dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
58 }
59 
adf_buffer_cleanup(struct adf_buffer * buf)60 void adf_buffer_cleanup(struct adf_buffer *buf)
61 {
62 	size_t i;
63 	for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
64 		if (buf->dma_bufs[i])
65 			dma_buf_put(buf->dma_bufs[i]);
66 
67 	if (buf->acquire_fence)
68 		sync_fence_put(buf->acquire_fence);
69 }
70 
adf_buffer_mapping_cleanup(struct adf_buffer_mapping * mapping,struct adf_buffer * buf)71 void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
72 		struct adf_buffer *buf)
73 {
74 	/* calling adf_buffer_mapping_cleanup() is safe even if mapping is
75 	   uninitialized or partially-initialized, as long as it was
76 	   zeroed on allocation */
77 	size_t i;
78 	for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
79 		if (mapping->sg_tables[i])
80 			dma_buf_unmap_attachment(mapping->attachments[i],
81 					mapping->sg_tables[i], DMA_TO_DEVICE);
82 		if (mapping->attachments[i])
83 			dma_buf_detach(buf->dma_bufs[i],
84 					mapping->attachments[i]);
85 	}
86 }
87 
adf_post_cleanup(struct adf_device * dev,struct adf_pending_post * post)88 void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
89 {
90 	size_t i;
91 
92 	if (post->state)
93 		dev->ops->state_free(dev, post->state);
94 
95 	for (i = 0; i < post->config.n_bufs; i++) {
96 		adf_buffer_mapping_cleanup(&post->config.mappings[i],
97 				&post->config.bufs[i]);
98 		adf_buffer_cleanup(&post->config.bufs[i]);
99 	}
100 
101 	kfree(post->config.custom_data);
102 	kfree(post->config.mappings);
103 	kfree(post->config.bufs);
104 	kfree(post);
105 }
106 
adf_sw_advance_timeline(struct adf_device * dev)107 static void adf_sw_advance_timeline(struct adf_device *dev)
108 {
109 #ifdef CONFIG_SW_SYNC
110 	sw_sync_timeline_inc(dev->timeline, 1);
111 #else
112 	BUG();
113 #endif
114 }
115 
adf_post_work_func(struct kthread_work * work)116 static void adf_post_work_func(struct kthread_work *work)
117 {
118 	struct adf_device *dev =
119 			container_of(work, struct adf_device, post_work);
120 	struct adf_pending_post *post, *next;
121 	struct list_head saved_list;
122 
123 	mutex_lock(&dev->post_lock);
124 	memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
125 	list_replace_init(&dev->post_list, &saved_list);
126 	mutex_unlock(&dev->post_lock);
127 
128 	list_for_each_entry_safe(post, next, &saved_list, head) {
129 		int i;
130 
131 		for (i = 0; i < post->config.n_bufs; i++) {
132 			struct sync_fence *fence =
133 					post->config.bufs[i].acquire_fence;
134 			if (fence)
135 				adf_fence_wait(dev, fence);
136 		}
137 
138 		dev->ops->post(dev, &post->config, post->state);
139 
140 		if (dev->ops->advance_timeline)
141 			dev->ops->advance_timeline(dev, &post->config,
142 					post->state);
143 		else
144 			adf_sw_advance_timeline(dev);
145 
146 		list_del(&post->head);
147 		if (dev->onscreen)
148 			adf_post_cleanup(dev, dev->onscreen);
149 		dev->onscreen = post;
150 	}
151 }
152 
adf_attachment_free(struct adf_attachment_list * attachment)153 void adf_attachment_free(struct adf_attachment_list *attachment)
154 {
155 	list_del(&attachment->head);
156 	kfree(attachment);
157 }
158 
adf_obj_find_event_refcount(struct adf_obj * obj,enum adf_event_type type)159 struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
160 		enum adf_event_type type)
161 {
162 	struct rb_root *root = &obj->event_refcount;
163 	struct rb_node **new = &(root->rb_node);
164 	struct rb_node *parent = NULL;
165 	struct adf_event_refcount *refcount;
166 
167 	while (*new) {
168 		refcount = container_of(*new, struct adf_event_refcount, node);
169 		parent = *new;
170 
171 		if (refcount->type > type)
172 			new = &(*new)->rb_left;
173 		else if (refcount->type < type)
174 			new = &(*new)->rb_right;
175 		else
176 			return refcount;
177 	}
178 
179 	refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
180 	if (!refcount)
181 		return NULL;
182 	refcount->type = type;
183 
184 	rb_link_node(&refcount->node, parent, new);
185 	rb_insert_color(&refcount->node, root);
186 	return refcount;
187 }
188 
189 /**
190  * adf_event_get - increase the refcount for an event
191  *
192  * @obj: the object that produces the event
193  * @type: the event type
194  *
195  * ADF will call the object's set_event() op if needed.  ops are allowed
196  * to sleep, so adf_event_get() must NOT be called from an atomic context.
197  *
198  * Returns 0 if successful, or -%EINVAL if the object does not support the
199  * requested event type.
200  */
adf_event_get(struct adf_obj * obj,enum adf_event_type type)201 int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
202 {
203 	struct adf_event_refcount *refcount;
204 	int old_refcount;
205 	int ret;
206 
207 	ret = adf_obj_check_supports_event(obj, type);
208 	if (ret < 0)
209 		return ret;
210 
211 	mutex_lock(&obj->event_lock);
212 
213 	refcount = adf_obj_find_event_refcount(obj, type);
214 	if (!refcount) {
215 		ret = -ENOMEM;
216 		goto done;
217 	}
218 
219 	old_refcount = refcount->refcount++;
220 
221 	if (old_refcount == 0) {
222 		obj->ops->set_event(obj, type, true);
223 		trace_adf_event_enable(obj, type);
224 	}
225 
226 done:
227 	mutex_unlock(&obj->event_lock);
228 	return ret;
229 }
230 EXPORT_SYMBOL(adf_event_get);
231 
232 /**
233  * adf_event_put - decrease the refcount for an event
234  *
235  * @obj: the object that produces the event
236  * @type: the event type
237  *
238  * ADF will call the object's set_event() op if needed.  ops are allowed
239  * to sleep, so adf_event_put() must NOT be called from an atomic context.
240  *
241  * Returns 0 if successful, -%EINVAL if the object does not support the
242  * requested event type, or -%EALREADY if the refcount is already 0.
243  */
adf_event_put(struct adf_obj * obj,enum adf_event_type type)244 int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
245 {
246 	struct adf_event_refcount *refcount;
247 	int old_refcount;
248 	int ret;
249 
250 	ret = adf_obj_check_supports_event(obj, type);
251 	if (ret < 0)
252 		return ret;
253 
254 
255 	mutex_lock(&obj->event_lock);
256 
257 	refcount = adf_obj_find_event_refcount(obj, type);
258 	if (!refcount) {
259 		ret = -ENOMEM;
260 		goto done;
261 	}
262 
263 	old_refcount = refcount->refcount--;
264 
265 	if (WARN_ON(old_refcount == 0)) {
266 		refcount->refcount++;
267 		ret = -EALREADY;
268 	} else if (old_refcount == 1) {
269 		obj->ops->set_event(obj, type, false);
270 		trace_adf_event_disable(obj, type);
271 	}
272 
273 done:
274 	mutex_unlock(&obj->event_lock);
275 	return ret;
276 }
277 EXPORT_SYMBOL(adf_event_put);
278 
279 /**
280  * adf_vsync_wait - wait for a vsync event on a display interface
281  *
282  * @intf: the display interface
283  * @timeout: timeout in jiffies (0 = wait indefinitely)
284  *
285  * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
286  *
287  * This function returns -%ERESTARTSYS if it is interrupted by a signal.
288  * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
289  * this function returns the number of remaining jiffies or -%ETIMEDOUT on
290  * timeout.
291  */
adf_vsync_wait(struct adf_interface * intf,long timeout)292 int adf_vsync_wait(struct adf_interface *intf, long timeout)
293 {
294 	ktime_t timestamp;
295 	int ret;
296 	unsigned long flags;
297 
298 	read_lock_irqsave(&intf->vsync_lock, flags);
299 	timestamp = intf->vsync_timestamp;
300 	read_unlock_irqrestore(&intf->vsync_lock, flags);
301 
302 	adf_vsync_get(intf);
303 	if (timeout) {
304 		ret = wait_event_interruptible_timeout(intf->vsync_wait,
305 				!ktime_equal(timestamp,
306 						intf->vsync_timestamp),
307 				msecs_to_jiffies(timeout));
308 		if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
309 			ret = -ETIMEDOUT;
310 	} else {
311 		ret = wait_event_interruptible(intf->vsync_wait,
312 				!ktime_equal(timestamp,
313 						intf->vsync_timestamp));
314 	}
315 	adf_vsync_put(intf);
316 
317 	return ret;
318 }
319 EXPORT_SYMBOL(adf_vsync_wait);
320 
adf_event_queue(struct adf_obj * obj,struct adf_event * event)321 static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
322 {
323 	struct adf_file *file;
324 	unsigned long flags;
325 
326 	trace_adf_event(obj, event->type);
327 
328 	spin_lock_irqsave(&obj->file_lock, flags);
329 
330 	list_for_each_entry(file, &obj->file_list, head)
331 		if (test_bit(event->type, file->event_subscriptions))
332 			adf_file_queue_event(file, event);
333 
334 	spin_unlock_irqrestore(&obj->file_lock, flags);
335 }
336 
337 /**
338  * adf_event_notify - notify userspace of a driver-private event
339  *
340  * @obj: the ADF object that produced the event
341  * @event: the event
342  *
343  * adf_event_notify() may be called safely from an atomic context.  It will
344  * copy @event if needed, so @event may point to a variable on the stack.
345  *
346  * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
347  * ADF provides adf_vsync_notify() and
348  * adf_hotplug_notify_{connected,disconnected}() for these events.
349  */
adf_event_notify(struct adf_obj * obj,struct adf_event * event)350 int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
351 {
352 	if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
353 			event->type == ADF_EVENT_HOTPLUG))
354 		return -EINVAL;
355 
356 	adf_event_queue(obj, event);
357 	return 0;
358 }
359 EXPORT_SYMBOL(adf_event_notify);
360 
361 /**
362  * adf_vsync_notify - notify ADF of a display interface's vsync event
363  *
364  * @intf: the display interface
365  * @timestamp: the time the vsync occurred
366  *
367  * adf_vsync_notify() may be called safely from an atomic context.
368  */
adf_vsync_notify(struct adf_interface * intf,ktime_t timestamp)369 void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
370 {
371 	unsigned long flags;
372 	struct adf_vsync_event event;
373 
374 	write_lock_irqsave(&intf->vsync_lock, flags);
375 	intf->vsync_timestamp = timestamp;
376 	write_unlock_irqrestore(&intf->vsync_lock, flags);
377 
378 	wake_up_interruptible_all(&intf->vsync_wait);
379 
380 	event.base.type = ADF_EVENT_VSYNC;
381 	event.base.length = sizeof(event);
382 	event.timestamp = ktime_to_ns(timestamp);
383 	adf_event_queue(&intf->base, &event.base);
384 }
385 EXPORT_SYMBOL(adf_vsync_notify);
386 
adf_hotplug_notify(struct adf_interface * intf,bool connected,struct drm_mode_modeinfo * modelist,size_t n_modes)387 void adf_hotplug_notify(struct adf_interface *intf, bool connected,
388 		struct drm_mode_modeinfo *modelist, size_t n_modes)
389 {
390 	unsigned long flags;
391 	struct adf_hotplug_event event;
392 	struct drm_mode_modeinfo *old_modelist;
393 
394 	write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
395 	old_modelist = intf->modelist;
396 	intf->hotplug_detect = connected;
397 	intf->modelist = modelist;
398 	intf->n_modes = n_modes;
399 	write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
400 
401 	kfree(old_modelist);
402 
403 	event.base.length = sizeof(event);
404 	event.base.type = ADF_EVENT_HOTPLUG;
405 	event.connected = connected;
406 	adf_event_queue(&intf->base, &event.base);
407 }
408 
409 /**
410  * adf_hotplug_notify_connected - notify ADF of a display interface being
411  * connected to a display
412  *
413  * @intf: the display interface
414  * @modelist: hardware modes supported by display
415  * @n_modes: length of modelist
416  *
417  * @modelist is copied as needed, so it may point to a variable on the stack.
418  *
419  * adf_hotplug_notify_connected() may NOT be called safely from an atomic
420  * context.
421  *
422  * Returns 0 on success or error code (<0) on error.
423  */
adf_hotplug_notify_connected(struct adf_interface * intf,struct drm_mode_modeinfo * modelist,size_t n_modes)424 int adf_hotplug_notify_connected(struct adf_interface *intf,
425 		struct drm_mode_modeinfo *modelist, size_t n_modes)
426 {
427 	struct drm_mode_modeinfo *modelist_copy;
428 
429 	if (n_modes > ADF_MAX_MODES)
430 		return -ENOMEM;
431 
432 	modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
433 			GFP_KERNEL);
434 	if (!modelist_copy)
435 		return -ENOMEM;
436 	memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
437 
438 	adf_hotplug_notify(intf, true, modelist_copy, n_modes);
439 	return 0;
440 }
441 EXPORT_SYMBOL(adf_hotplug_notify_connected);
442 
443 /**
444  * adf_hotplug_notify_disconnected - notify ADF of a display interface being
445  * disconnected from a display
446  *
447  * @intf: the display interface
448  *
449  * adf_hotplug_notify_disconnected() may be called safely from an atomic
450  * context.
451  */
adf_hotplug_notify_disconnected(struct adf_interface * intf)452 void adf_hotplug_notify_disconnected(struct adf_interface *intf)
453 {
454 	adf_hotplug_notify(intf, false, NULL, 0);
455 }
456 EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
457 
adf_obj_init(struct adf_obj * obj,enum adf_obj_type type,struct idr * idr,struct adf_device * parent,const struct adf_obj_ops * ops,const char * fmt,va_list args)458 static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
459 		struct idr *idr, struct adf_device *parent,
460 		const struct adf_obj_ops *ops, const char *fmt, va_list args)
461 {
462 	int ret;
463 
464 	if (ops && ops->supports_event && !ops->set_event) {
465 		pr_err("%s: %s implements supports_event but not set_event\n",
466 				__func__, adf_obj_type_str(type));
467 		return -EINVAL;
468 	}
469 
470 	ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
471 	if (ret < 0) {
472 		pr_err("%s: allocating object id failed: %d\n", __func__, ret);
473 		return ret;
474 	}
475 	obj->id = ret;
476 
477 	vscnprintf(obj->name, sizeof(obj->name), fmt, args);
478 
479 	obj->type = type;
480 	obj->ops = ops;
481 	obj->parent = parent;
482 	mutex_init(&obj->event_lock);
483 	obj->event_refcount = RB_ROOT;
484 	spin_lock_init(&obj->file_lock);
485 	INIT_LIST_HEAD(&obj->file_list);
486 	return 0;
487 }
488 
adf_obj_destroy(struct adf_obj * obj,struct idr * idr)489 static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
490 {
491 	struct rb_node *node = rb_first(&obj->event_refcount);
492 
493 	while (node) {
494 		struct adf_event_refcount *refcount =
495 				container_of(node, struct adf_event_refcount,
496 						node);
497 		rb_erase(&refcount->node, &obj->event_refcount);
498 		kfree(refcount);
499 		node = rb_first(&obj->event_refcount);
500 	}
501 
502 	mutex_destroy(&obj->event_lock);
503 	idr_remove(idr, obj->id);
504 }
505 
506 /**
507  * adf_device_init - initialize ADF-internal data for a display device
508  * and create sysfs entries
509  *
510  * @dev: the display device
511  * @parent: the device's parent device
512  * @ops: the device's associated ops
513  * @fmt: formatting string for the display device's name
514  *
515  * @fmt specifies the device's sysfs filename and the name returned to
516  * userspace through the %ADF_GET_DEVICE_DATA ioctl.
517  *
518  * Returns 0 on success or error code (<0) on failure.
519  */
adf_device_init(struct adf_device * dev,struct device * parent,const struct adf_device_ops * ops,const char * fmt,...)520 int adf_device_init(struct adf_device *dev, struct device *parent,
521 		const struct adf_device_ops *ops, const char *fmt, ...)
522 {
523 	int ret;
524 	va_list args;
525 
526 	if (!ops->validate || !ops->post) {
527 		pr_err("%s: device must implement validate and post\n",
528 				__func__);
529 		return -EINVAL;
530 	}
531 
532 	if (!ops->complete_fence && !ops->advance_timeline) {
533 		if (!IS_ENABLED(CONFIG_SW_SYNC)) {
534 			pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
535 					__func__);
536 			return -EINVAL;
537 		}
538 	} else if (!(ops->complete_fence && ops->advance_timeline)) {
539 		pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
540 				__func__);
541 		return -EINVAL;
542 	}
543 
544 	memset(dev, 0, sizeof(*dev));
545 
546 	va_start(args, fmt);
547 	ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
548 			&ops->base, fmt, args);
549 	va_end(args);
550 	if (ret < 0)
551 		return ret;
552 
553 	dev->dev = parent;
554 	dev->ops = ops;
555 	idr_init(&dev->overlay_engines);
556 	idr_init(&dev->interfaces);
557 	mutex_init(&dev->client_lock);
558 	INIT_LIST_HEAD(&dev->post_list);
559 	mutex_init(&dev->post_lock);
560 	init_kthread_worker(&dev->post_worker);
561 	INIT_LIST_HEAD(&dev->attached);
562 	INIT_LIST_HEAD(&dev->attach_allowed);
563 
564 	dev->post_thread = kthread_run(kthread_worker_fn,
565 			&dev->post_worker, dev->base.name);
566 	if (IS_ERR(dev->post_thread)) {
567 		ret = PTR_ERR(dev->post_thread);
568 		dev->post_thread = NULL;
569 
570 		pr_err("%s: failed to run config posting thread: %d\n",
571 				__func__, ret);
572 		goto err;
573 	}
574 	init_kthread_work(&dev->post_work, adf_post_work_func);
575 
576 	ret = adf_device_sysfs_init(dev);
577 	if (ret < 0)
578 		goto err;
579 
580 	return 0;
581 
582 err:
583 	adf_device_destroy(dev);
584 	return ret;
585 }
586 EXPORT_SYMBOL(adf_device_init);
587 
588 /**
589  * adf_device_destroy - clean up ADF-internal data for a display device
590  *
591  * @dev: the display device
592  */
adf_device_destroy(struct adf_device * dev)593 void adf_device_destroy(struct adf_device *dev)
594 {
595 	struct adf_attachment_list *entry, *next;
596 
597 	idr_destroy(&dev->interfaces);
598 	idr_destroy(&dev->overlay_engines);
599 
600 	if (dev->post_thread) {
601 		flush_kthread_worker(&dev->post_worker);
602 		kthread_stop(dev->post_thread);
603 	}
604 
605 	if (dev->onscreen)
606 		adf_post_cleanup(dev, dev->onscreen);
607 	adf_device_sysfs_destroy(dev);
608 	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
609 		adf_attachment_free(entry);
610 	}
611 	list_for_each_entry_safe(entry, next, &dev->attached, head) {
612 		adf_attachment_free(entry);
613 	}
614 	mutex_destroy(&dev->post_lock);
615 	mutex_destroy(&dev->client_lock);
616 
617 	if (dev->timeline)
618 		sync_timeline_destroy(&dev->timeline->obj);
619 
620 	adf_obj_destroy(&dev->base, &adf_devices);
621 }
622 EXPORT_SYMBOL(adf_device_destroy);
623 
624 /**
625  * adf_interface_init - initialize ADF-internal data for a display interface
626  * and create sysfs entries
627  *
628  * @intf: the display interface
629  * @dev: the interface's "parent" display device
630  * @type: interface type (see enum @adf_interface_type)
631  * @idx: which interface of type @type;
632  *	e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
633  * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
634  * @ops: the interface's associated ops
635  * @fmt: formatting string for the display interface's name
636  *
637  * @dev must have previously been initialized with adf_device_init().
638  *
639  * @fmt affects the name returned to userspace through the
640  * %ADF_GET_INTERFACE_DATA ioctl.  It does not affect the sysfs filename,
641  * which is derived from @dev's name.
642  *
643  * Returns 0 on success or error code (<0) on failure.
644  */
adf_interface_init(struct adf_interface * intf,struct adf_device * dev,enum adf_interface_type type,u32 idx,u32 flags,const struct adf_interface_ops * ops,const char * fmt,...)645 int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
646 		enum adf_interface_type type, u32 idx, u32 flags,
647 		const struct adf_interface_ops *ops, const char *fmt, ...)
648 {
649 	int ret;
650 	va_list args;
651 	const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
652 			ADF_INTF_FLAG_EXTERNAL;
653 
654 	if (dev->n_interfaces == ADF_MAX_INTERFACES) {
655 		pr_err("%s: parent device %s has too many interfaces\n",
656 				__func__, dev->base.name);
657 		return -ENOMEM;
658 	}
659 
660 	if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
661 		pr_err("%s: invalid interface type %u\n", __func__, type);
662 		return -EINVAL;
663 	}
664 
665 	if (flags & ~allowed_flags) {
666 		pr_err("%s: invalid interface flags 0x%X\n", __func__,
667 				flags & ~allowed_flags);
668 		return -EINVAL;
669 	}
670 
671 	memset(intf, 0, sizeof(*intf));
672 
673 	va_start(args, fmt);
674 	ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
675 			dev, ops ? &ops->base : NULL, fmt, args);
676 	va_end(args);
677 	if (ret < 0)
678 		return ret;
679 
680 	intf->type = type;
681 	intf->idx = idx;
682 	intf->flags = flags;
683 	intf->ops = ops;
684 	intf->dpms_state = DRM_MODE_DPMS_OFF;
685 	init_waitqueue_head(&intf->vsync_wait);
686 	rwlock_init(&intf->vsync_lock);
687 	rwlock_init(&intf->hotplug_modelist_lock);
688 
689 	ret = adf_interface_sysfs_init(intf);
690 	if (ret < 0)
691 		goto err;
692 	dev->n_interfaces++;
693 
694 	return 0;
695 
696 err:
697 	adf_obj_destroy(&intf->base, &dev->interfaces);
698 	return ret;
699 }
700 EXPORT_SYMBOL(adf_interface_init);
701 
702 /**
703  * adf_interface_destroy - clean up ADF-internal data for a display interface
704  *
705  * @intf: the display interface
706  */
adf_interface_destroy(struct adf_interface * intf)707 void adf_interface_destroy(struct adf_interface *intf)
708 {
709 	struct adf_device *dev = adf_interface_parent(intf);
710 	struct adf_attachment_list *entry, *next;
711 
712 	mutex_lock(&dev->client_lock);
713 	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
714 		if (entry->attachment.interface == intf) {
715 			adf_attachment_free(entry);
716 			dev->n_attach_allowed--;
717 		}
718 	}
719 	list_for_each_entry_safe(entry, next, &dev->attached, head) {
720 		if (entry->attachment.interface == intf) {
721 			adf_device_detach_op(dev,
722 					entry->attachment.overlay_engine, intf);
723 			adf_attachment_free(entry);
724 			dev->n_attached--;
725 		}
726 	}
727 	kfree(intf->modelist);
728 	adf_interface_sysfs_destroy(intf);
729 	adf_obj_destroy(&intf->base, &dev->interfaces);
730 	dev->n_interfaces--;
731 	mutex_unlock(&dev->client_lock);
732 }
733 EXPORT_SYMBOL(adf_interface_destroy);
734 
adf_overlay_engine_has_custom_formats(const struct adf_overlay_engine_ops * ops)735 static bool adf_overlay_engine_has_custom_formats(
736 		const struct adf_overlay_engine_ops *ops)
737 {
738 	size_t i;
739 	for (i = 0; i < ops->n_supported_formats; i++)
740 		if (!adf_format_is_standard(ops->supported_formats[i]))
741 			return true;
742 	return false;
743 }
744 
745 /**
746  * adf_overlay_engine_init - initialize ADF-internal data for an
747  * overlay engine and create sysfs entries
748  *
749  * @eng: the overlay engine
750  * @dev: the overlay engine's "parent" display device
751  * @ops: the overlay engine's associated ops
752  * @fmt: formatting string for the overlay engine's name
753  *
754  * @dev must have previously been initialized with adf_device_init().
755  *
756  * @fmt affects the name returned to userspace through the
757  * %ADF_GET_OVERLAY_ENGINE_DATA ioctl.  It does not affect the sysfs filename,
758  * which is derived from @dev's name.
759  *
760  * Returns 0 on success or error code (<0) on failure.
761  */
adf_overlay_engine_init(struct adf_overlay_engine * eng,struct adf_device * dev,const struct adf_overlay_engine_ops * ops,const char * fmt,...)762 int adf_overlay_engine_init(struct adf_overlay_engine *eng,
763 		struct adf_device *dev,
764 		const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
765 {
766 	int ret;
767 	va_list args;
768 
769 	if (!ops->supported_formats) {
770 		pr_err("%s: overlay engine must support at least one format\n",
771 				__func__);
772 		return -EINVAL;
773 	}
774 
775 	if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
776 		pr_err("%s: overlay engine supports too many formats\n",
777 				__func__);
778 		return -EINVAL;
779 	}
780 
781 	if (adf_overlay_engine_has_custom_formats(ops) &&
782 			!dev->ops->validate_custom_format) {
783 		pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
784 				__func__, dev->base.name);
785 		return -EINVAL;
786 	}
787 
788 	memset(eng, 0, sizeof(*eng));
789 
790 	va_start(args, fmt);
791 	ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
792 			&dev->overlay_engines, dev, &ops->base, fmt, args);
793 	va_end(args);
794 	if (ret < 0)
795 		return ret;
796 
797 	eng->ops = ops;
798 
799 	ret = adf_overlay_engine_sysfs_init(eng);
800 	if (ret < 0)
801 		goto err;
802 
803 	return 0;
804 
805 err:
806 	adf_obj_destroy(&eng->base, &dev->overlay_engines);
807 	return ret;
808 }
809 EXPORT_SYMBOL(adf_overlay_engine_init);
810 
811 /**
812  * adf_interface_destroy - clean up ADF-internal data for an overlay engine
813  *
814  * @eng: the overlay engine
815  */
adf_overlay_engine_destroy(struct adf_overlay_engine * eng)816 void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
817 {
818 	struct adf_device *dev = adf_overlay_engine_parent(eng);
819 	struct adf_attachment_list *entry, *next;
820 
821 	mutex_lock(&dev->client_lock);
822 	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
823 		if (entry->attachment.overlay_engine == eng) {
824 			adf_attachment_free(entry);
825 			dev->n_attach_allowed--;
826 		}
827 	}
828 	list_for_each_entry_safe(entry, next, &dev->attached, head) {
829 		if (entry->attachment.overlay_engine == eng) {
830 			adf_device_detach_op(dev, eng,
831 					entry->attachment.interface);
832 			adf_attachment_free(entry);
833 			dev->n_attached--;
834 		}
835 	}
836 	adf_overlay_engine_sysfs_destroy(eng);
837 	adf_obj_destroy(&eng->base, &dev->overlay_engines);
838 	mutex_unlock(&dev->client_lock);
839 }
840 EXPORT_SYMBOL(adf_overlay_engine_destroy);
841 
adf_attachment_find(struct list_head * list,struct adf_overlay_engine * eng,struct adf_interface * intf)842 struct adf_attachment_list *adf_attachment_find(struct list_head *list,
843 		struct adf_overlay_engine *eng, struct adf_interface *intf)
844 {
845 	struct adf_attachment_list *entry;
846 	list_for_each_entry(entry, list, head) {
847 		if (entry->attachment.interface == intf &&
848 				entry->attachment.overlay_engine == eng)
849 			return entry;
850 	}
851 	return NULL;
852 }
853 
adf_attachment_validate(struct adf_device * dev,struct adf_overlay_engine * eng,struct adf_interface * intf)854 int adf_attachment_validate(struct adf_device *dev,
855 		struct adf_overlay_engine *eng, struct adf_interface *intf)
856 {
857 	struct adf_device *intf_dev = adf_interface_parent(intf);
858 	struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
859 
860 	if (intf_dev != dev) {
861 		dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
862 				intf->base.name, intf_dev->base.name);
863 		return -EINVAL;
864 	}
865 
866 	if (eng_dev != dev) {
867 		dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
868 				eng->base.name, eng_dev->base.name);
869 		return -EINVAL;
870 	}
871 
872 	return 0;
873 }
874 
875 /**
876  * adf_attachment_allow - add a new entry to the list of allowed
877  * attachments
878  *
879  * @dev: the parent device
880  * @eng: the overlay engine
881  * @intf: the interface
882  *
883  * adf_attachment_allow() indicates that the underlying display hardware allows
884  * @intf to scan out @eng's output.  It is intended to be called at
885  * driver initialization for each supported overlay engine + interface pair.
886  *
887  * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
888  * any other failure.
889  */
adf_attachment_allow(struct adf_device * dev,struct adf_overlay_engine * eng,struct adf_interface * intf)890 int adf_attachment_allow(struct adf_device *dev,
891 		struct adf_overlay_engine *eng, struct adf_interface *intf)
892 {
893 	int ret;
894 	struct adf_attachment_list *entry = NULL;
895 
896 	ret = adf_attachment_validate(dev, eng, intf);
897 	if (ret < 0)
898 		return ret;
899 
900 	mutex_lock(&dev->client_lock);
901 
902 	if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
903 		ret = -ENOMEM;
904 		goto done;
905 	}
906 
907 	if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
908 		ret = -EALREADY;
909 		goto done;
910 	}
911 
912 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
913 	if (!entry) {
914 		ret = -ENOMEM;
915 		goto done;
916 	}
917 
918 	entry->attachment.interface = intf;
919 	entry->attachment.overlay_engine = eng;
920 	list_add_tail(&entry->head, &dev->attach_allowed);
921 	dev->n_attach_allowed++;
922 
923 done:
924 	mutex_unlock(&dev->client_lock);
925 	if (ret < 0)
926 		kfree(entry);
927 
928 	return ret;
929 }
930 EXPORT_SYMBOL(adf_attachment_allow);
931 
932 /**
933  * adf_obj_type_str - string representation of an adf_obj_type
934  *
935  * @type: the object type
936  */
adf_obj_type_str(enum adf_obj_type type)937 const char *adf_obj_type_str(enum adf_obj_type type)
938 {
939 	switch (type) {
940 	case ADF_OBJ_OVERLAY_ENGINE:
941 		return "overlay engine";
942 
943 	case ADF_OBJ_INTERFACE:
944 		return "interface";
945 
946 	case ADF_OBJ_DEVICE:
947 		return "device";
948 
949 	default:
950 		return "unknown";
951 	}
952 }
953 EXPORT_SYMBOL(adf_obj_type_str);
954 
955 /**
956  * adf_interface_type_str - string representation of an adf_interface's type
957  *
958  * @intf: the interface
959  */
adf_interface_type_str(struct adf_interface * intf)960 const char *adf_interface_type_str(struct adf_interface *intf)
961 {
962 	switch (intf->type) {
963 	case ADF_INTF_DSI:
964 		return "DSI";
965 
966 	case ADF_INTF_eDP:
967 		return "eDP";
968 
969 	case ADF_INTF_DPI:
970 		return "DPI";
971 
972 	case ADF_INTF_VGA:
973 		return "VGA";
974 
975 	case ADF_INTF_DVI:
976 		return "DVI";
977 
978 	case ADF_INTF_HDMI:
979 		return "HDMI";
980 
981 	case ADF_INTF_MEMORY:
982 		return "memory";
983 
984 	default:
985 		if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
986 			if (intf->ops && intf->ops->type_str)
987 				return intf->ops->type_str(intf);
988 			return "custom";
989 		}
990 		return "unknown";
991 	}
992 }
993 EXPORT_SYMBOL(adf_interface_type_str);
994 
995 /**
996  * adf_event_type_str - string representation of an adf_event_type
997  *
998  * @obj: ADF object that produced the event
999  * @type: event type
1000  */
adf_event_type_str(struct adf_obj * obj,enum adf_event_type type)1001 const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
1002 {
1003 	switch (type) {
1004 	case ADF_EVENT_VSYNC:
1005 		return "vsync";
1006 
1007 	case ADF_EVENT_HOTPLUG:
1008 		return "hotplug";
1009 
1010 	default:
1011 		if (type >= ADF_EVENT_DEVICE_CUSTOM) {
1012 			if (obj->ops && obj->ops->event_type_str)
1013 				return obj->ops->event_type_str(obj, type);
1014 			return "custom";
1015 		}
1016 		return "unknown";
1017 	}
1018 }
1019 EXPORT_SYMBOL(adf_event_type_str);
1020 
1021 /**
1022  * adf_format_str - string representation of an ADF/DRM fourcc format
1023  *
1024  * @format: format fourcc
1025  * @buf: target buffer for the format's string representation
1026  */
adf_format_str(u32 format,char buf[ADF_FORMAT_STR_SIZE])1027 void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
1028 {
1029 	buf[0] = format & 0xFF;
1030 	buf[1] = (format >> 8) & 0xFF;
1031 	buf[2] = (format >> 16) & 0xFF;
1032 	buf[3] = (format >> 24) & 0xFF;
1033 	buf[4] = '\0';
1034 }
1035 EXPORT_SYMBOL(adf_format_str);
1036 
1037 /**
1038  * adf_format_validate_yuv - validate the number and size of planes in buffers
1039  * with a custom YUV format.
1040  *
1041  * @dev: ADF device performing the validation
1042  * @buf: buffer to validate
1043  * @num_planes: expected number of planes
1044  * @hsub: expected horizontal chroma subsampling factor, in pixels
1045  * @vsub: expected vertical chroma subsampling factor, in pixels
1046  * @cpp: expected bytes per pixel for each plane (length @num_planes)
1047  *
1048  * adf_format_validate_yuv() is intended to be called as a helper from @dev's
1049  * validate_custom_format() op.
1050  *
1051  * Returns 0 if @buf has the expected number of planes and each plane
1052  * has sufficient size, or -EINVAL otherwise.
1053  */
adf_format_validate_yuv(struct adf_device * dev,struct adf_buffer * buf,u8 num_planes,u8 hsub,u8 vsub,u8 cpp[])1054 int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
1055 		u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
1056 {
1057 	u8 i;
1058 
1059 	if (num_planes != buf->n_planes) {
1060 		char format_str[ADF_FORMAT_STR_SIZE];
1061 		adf_format_str(buf->format, format_str);
1062 		dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
1063 				num_planes, format_str, buf->n_planes);
1064 		return -EINVAL;
1065 	}
1066 
1067 	if (buf->w == 0 || buf->w % hsub) {
1068 		dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
1069 		return -EINVAL;
1070 	}
1071 
1072 	if (buf->h == 0 || buf->h % vsub) {
1073 		dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
1074 		return -EINVAL;
1075 	}
1076 
1077 	for (i = 0; i < num_planes; i++) {
1078 		u32 width = buf->w / (i != 0 ? hsub : 1);
1079 		u32 height = buf->h / (i != 0 ? vsub : 1);
1080 		u8 cpp = adf_format_plane_cpp(buf->format, i);
1081 		u32 last_line_size;
1082 
1083 		if (buf->pitch[i] < (u64) width * cpp) {
1084 			dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
1085 					i, buf->pitch[i], width, cpp * 8);
1086 			return -EINVAL;
1087 		}
1088 
1089 		switch (dev->ops->quirks.buffer_padding) {
1090 		case ADF_BUFFER_PADDED_TO_PITCH:
1091 			last_line_size = buf->pitch[i];
1092 			break;
1093 
1094 		case ADF_BUFFER_UNPADDED:
1095 			last_line_size = width * cpp;
1096 			break;
1097 
1098 		default:
1099 			BUG();
1100 		}
1101 
1102 		if ((u64) (height - 1) * buf->pitch[i] + last_line_size +
1103 				buf->offset[i] > buf->dma_bufs[i]->size) {
1104 			dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
1105 					i, height, buf->pitch[i],
1106 					buf->offset[i], buf->dma_bufs[i]->size);
1107 			return -EINVAL;
1108 		}
1109 	}
1110 
1111 	return 0;
1112 }
1113 EXPORT_SYMBOL(adf_format_validate_yuv);
1114 
1115 /**
1116  * adf_modeinfo_set_name - sets the name of a mode from its display resolution
1117  *
1118  * @mode: mode
1119  *
1120  * adf_modeinfo_set_name() fills in @mode->name in the format
1121  * "[hdisplay]x[vdisplay](i)".  It is intended to help drivers create
1122  * ADF/DRM-style modelists from other mode formats.
1123  */
adf_modeinfo_set_name(struct drm_mode_modeinfo * mode)1124 void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
1125 {
1126 	bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
1127 
1128 	snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
1129 		 mode->hdisplay, mode->vdisplay,
1130 		 interlaced ? "i" : "");
1131 }
1132 EXPORT_SYMBOL(adf_modeinfo_set_name);
1133 
1134 /**
1135  * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
1136  * timing data
1137  *
1138  * @mode: mode
1139  *
1140  * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
1141  * @mode->{h,v}display and @mode->flags.  It is intended to help drivers
1142  * create ADF/DRM-style modelists from other mode formats.
1143  */
adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo * mode)1144 void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
1145 {
1146 	int refresh = 0;
1147 	unsigned int calc_val;
1148 
1149 	if (mode->vrefresh > 0)
1150 		return;
1151 
1152 	if (mode->htotal <= 0 || mode->vtotal <= 0)
1153 		return;
1154 
1155 	/* work out vrefresh the value will be x1000 */
1156 	calc_val = (mode->clock * 1000);
1157 	calc_val /= mode->htotal;
1158 	refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
1159 
1160 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1161 		refresh *= 2;
1162 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1163 		refresh /= 2;
1164 	if (mode->vscan > 1)
1165 		refresh /= mode->vscan;
1166 
1167 	mode->vrefresh = refresh;
1168 }
1169 EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
1170 
adf_init(void)1171 static int __init adf_init(void)
1172 {
1173 	int err;
1174 
1175 	err = adf_sysfs_init();
1176 	if (err < 0)
1177 		return err;
1178 
1179 	return 0;
1180 }
1181 
adf_exit(void)1182 static void __exit adf_exit(void)
1183 {
1184 	adf_sysfs_destroy();
1185 }
1186 
1187 module_init(adf_init);
1188 module_exit(adf_exit);
1189