• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom BM2835 V4L2 driver
3  *
4  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file COPYING in the main directory of this archive
8  * for more details.
9  *
10  * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
11  *          Dave Stevenson <dsteve@broadcom.com>
12  *          Simon Mellor <simellor@broadcom.com>
13  *          Luke Diamand <luked@broadcom.com>
14  *
15  * V4L2 driver MMAL vchiq interface code
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/slab.h>
25 #include <linux/completion.h>
26 #include <linux/vmalloc.h>
27 #include <linux/btree.h>
28 #include <asm/cacheflush.h>
29 #include <media/videobuf2-vmalloc.h>
30 
31 #include "mmal-common.h"
32 #include "mmal-vchiq.h"
33 #include "mmal-msg.h"
34 
35 #define USE_VCHIQ_ARM
36 #include "interface/vchi/vchi.h"
37 
38 /* maximum number of components supported */
39 #define VCHIQ_MMAL_MAX_COMPONENTS 4
40 
41 /*#define FULL_MSG_DUMP 1*/
42 
43 #ifdef DEBUG
44 static const char *const msg_type_names[] = {
45 	"UNKNOWN",
46 	"QUIT",
47 	"SERVICE_CLOSED",
48 	"GET_VERSION",
49 	"COMPONENT_CREATE",
50 	"COMPONENT_DESTROY",
51 	"COMPONENT_ENABLE",
52 	"COMPONENT_DISABLE",
53 	"PORT_INFO_GET",
54 	"PORT_INFO_SET",
55 	"PORT_ACTION",
56 	"BUFFER_FROM_HOST",
57 	"BUFFER_TO_HOST",
58 	"GET_STATS",
59 	"PORT_PARAMETER_SET",
60 	"PORT_PARAMETER_GET",
61 	"EVENT_TO_HOST",
62 	"GET_CORE_STATS_FOR_PORT",
63 	"OPAQUE_ALLOCATOR",
64 	"CONSUME_MEM",
65 	"LMK",
66 	"OPAQUE_ALLOCATOR_DESC",
67 	"DRM_GET_LHS32",
68 	"DRM_GET_TIME",
69 	"BUFFER_FROM_HOST_ZEROLEN",
70 	"PORT_FLUSH",
71 	"HOST_LOG",
72 };
73 #endif
74 
75 static const char *const port_action_type_names[] = {
76 	"UNKNOWN",
77 	"ENABLE",
78 	"DISABLE",
79 	"FLUSH",
80 	"CONNECT",
81 	"DISCONNECT",
82 	"SET_REQUIREMENTS",
83 };
84 
85 #if defined(DEBUG)
86 #if defined(FULL_MSG_DUMP)
87 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
88 	do {								\
89 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
90 			 msg_type_names[(MSG)->h.type],			\
91 			 (MSG)->h.type, (MSG_LEN));			\
92 		print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET,	\
93 			       16, 4, (MSG),				\
94 			       sizeof(struct mmal_msg_header), 1);	\
95 		print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET,	\
96 			       16, 4,					\
97 			       ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
98 			       (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
99 	} while (0)
100 #else
101 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
102 	{								\
103 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
104 			 msg_type_names[(MSG)->h.type],			\
105 			 (MSG)->h.type, (MSG_LEN));			\
106 	}
107 #endif
108 #else
109 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
110 #endif
111 
112 struct vchiq_mmal_instance;
113 
114 /* normal message context */
115 struct mmal_msg_context {
116 	struct vchiq_mmal_instance *instance;
117 	u32 handle;
118 
119 	union {
120 		struct {
121 			/* work struct for defered callback - must come first */
122 			struct work_struct work;
123 			/* mmal instance */
124 			struct vchiq_mmal_instance *instance;
125 			/* mmal port */
126 			struct vchiq_mmal_port *port;
127 			/* actual buffer used to store bulk reply */
128 			struct mmal_buffer *buffer;
129 			/* amount of buffer used */
130 			unsigned long buffer_used;
131 			/* MMAL buffer flags */
132 			u32 mmal_flags;
133 			/* Presentation and Decode timestamps */
134 			s64 pts;
135 			s64 dts;
136 
137 			int status;	/* context status */
138 
139 		} bulk;		/* bulk data */
140 
141 		struct {
142 			/* message handle to release */
143 			VCHI_HELD_MSG_T msg_handle;
144 			/* pointer to received message */
145 			struct mmal_msg *msg;
146 			/* received message length */
147 			u32 msg_len;
148 			/* completion upon reply */
149 			struct completion cmplt;
150 		} sync;		/* synchronous response */
151 	} u;
152 
153 };
154 
155 struct vchiq_mmal_context_map {
156 	/* ensure serialized access to the btree(contention should be low) */
157 	struct mutex lock;
158 	struct btree_head32 btree_head;
159 	u32 last_handle;
160 };
161 
162 struct vchiq_mmal_instance {
163 	VCHI_SERVICE_HANDLE_T handle;
164 
165 	/* ensure serialised access to service */
166 	struct mutex vchiq_mutex;
167 
168 	/* ensure serialised access to bulk operations */
169 	struct mutex bulk_mutex;
170 
171 	/* vmalloc page to receive scratch bulk xfers into */
172 	void *bulk_scratch;
173 
174 	/* mapping table between context handles and mmal_msg_contexts */
175 	struct vchiq_mmal_context_map context_map;
176 
177 	/* component to use next */
178 	int component_idx;
179 	struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
180 };
181 
182 static int __must_check
mmal_context_map_init(struct vchiq_mmal_context_map * context_map)183 mmal_context_map_init(struct vchiq_mmal_context_map *context_map)
184 {
185 	mutex_init(&context_map->lock);
186 	context_map->last_handle = 0;
187 	return btree_init32(&context_map->btree_head);
188 }
189 
mmal_context_map_destroy(struct vchiq_mmal_context_map * context_map)190 static void mmal_context_map_destroy(struct vchiq_mmal_context_map *context_map)
191 {
192 	mutex_lock(&context_map->lock);
193 	btree_destroy32(&context_map->btree_head);
194 	mutex_unlock(&context_map->lock);
195 }
196 
197 static u32
mmal_context_map_create_handle(struct vchiq_mmal_context_map * context_map,struct mmal_msg_context * msg_context,gfp_t gfp)198 mmal_context_map_create_handle(struct vchiq_mmal_context_map *context_map,
199 			       struct mmal_msg_context *msg_context,
200 			       gfp_t gfp)
201 {
202 	u32 handle;
203 
204 	mutex_lock(&context_map->lock);
205 
206 	while (1) {
207 		/* just use a simple count for handles, but do not use 0 */
208 		context_map->last_handle++;
209 		if (!context_map->last_handle)
210 			context_map->last_handle++;
211 
212 		handle = context_map->last_handle;
213 
214 		/* check if the handle is already in use */
215 		if (!btree_lookup32(&context_map->btree_head, handle))
216 			break;
217 	}
218 
219 	if (btree_insert32(&context_map->btree_head, handle,
220 			   msg_context, gfp)) {
221 		/* probably out of memory */
222 		mutex_unlock(&context_map->lock);
223 		return 0;
224 	}
225 
226 	mutex_unlock(&context_map->lock);
227 	return handle;
228 }
229 
230 static struct mmal_msg_context *
mmal_context_map_lookup_handle(struct vchiq_mmal_context_map * context_map,u32 handle)231 mmal_context_map_lookup_handle(struct vchiq_mmal_context_map *context_map,
232 			       u32 handle)
233 {
234 	struct mmal_msg_context *msg_context;
235 
236 	if (!handle)
237 		return NULL;
238 
239 	mutex_lock(&context_map->lock);
240 
241 	msg_context = btree_lookup32(&context_map->btree_head, handle);
242 
243 	mutex_unlock(&context_map->lock);
244 	return msg_context;
245 }
246 
247 static void
mmal_context_map_destroy_handle(struct vchiq_mmal_context_map * context_map,u32 handle)248 mmal_context_map_destroy_handle(struct vchiq_mmal_context_map *context_map,
249 				u32 handle)
250 {
251 	mutex_lock(&context_map->lock);
252 	btree_remove32(&context_map->btree_head, handle);
253 	mutex_unlock(&context_map->lock);
254 }
255 
256 static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance * instance)257 get_msg_context(struct vchiq_mmal_instance *instance)
258 {
259 	struct mmal_msg_context *msg_context;
260 
261 	/* todo: should this be allocated from a pool to avoid kzalloc */
262 	msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
263 
264 	if (!msg_context)
265 		return ERR_PTR(-ENOMEM);
266 
267 	msg_context->instance = instance;
268 	msg_context->handle =
269 		mmal_context_map_create_handle(&instance->context_map,
270 					       msg_context,
271 					       GFP_KERNEL);
272 
273 	if (!msg_context->handle) {
274 		kfree(msg_context);
275 		return ERR_PTR(-ENOMEM);
276 	}
277 
278 	return msg_context;
279 }
280 
281 static struct mmal_msg_context *
lookup_msg_context(struct vchiq_mmal_instance * instance,u32 handle)282 lookup_msg_context(struct vchiq_mmal_instance *instance, u32 handle)
283 {
284 	return mmal_context_map_lookup_handle(&instance->context_map,
285 		handle);
286 }
287 
288 static void
release_msg_context(struct mmal_msg_context * msg_context)289 release_msg_context(struct mmal_msg_context *msg_context)
290 {
291 	mmal_context_map_destroy_handle(&msg_context->instance->context_map,
292 					msg_context->handle);
293 	kfree(msg_context);
294 }
295 
296 /* deals with receipt of event to host message */
event_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)297 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
298 			     struct mmal_msg *msg, u32 msg_len)
299 {
300 	pr_debug("unhandled event\n");
301 	pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
302 		 msg->u.event_to_host.client_component,
303 		 msg->u.event_to_host.port_type,
304 		 msg->u.event_to_host.port_num,
305 		 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
306 }
307 
308 /* workqueue scheduled callback
309  *
310  * we do this because it is important we do not call any other vchiq
311  * sync calls from witin the message delivery thread
312  */
buffer_work_cb(struct work_struct * work)313 static void buffer_work_cb(struct work_struct *work)
314 {
315 	struct mmal_msg_context *msg_context =
316 		container_of(work, struct mmal_msg_context, u.bulk.work);
317 
318 	msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
319 					    msg_context->u.bulk.port,
320 					    msg_context->u.bulk.status,
321 					    msg_context->u.bulk.buffer,
322 					    msg_context->u.bulk.buffer_used,
323 					    msg_context->u.bulk.mmal_flags,
324 					    msg_context->u.bulk.dts,
325 					    msg_context->u.bulk.pts);
326 
327 	/* release message context */
328 	release_msg_context(msg_context);
329 }
330 
331 /* enqueue a bulk receive for a given message context */
bulk_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)332 static int bulk_receive(struct vchiq_mmal_instance *instance,
333 			struct mmal_msg *msg,
334 			struct mmal_msg_context *msg_context)
335 {
336 	unsigned long rd_len;
337 	unsigned long flags = 0;
338 	int ret;
339 
340 	/* bulk mutex stops other bulk operations while we have a
341 	 * receive in progress - released in callback
342 	 */
343 	ret = mutex_lock_interruptible(&instance->bulk_mutex);
344 	if (ret != 0)
345 		return ret;
346 
347 	rd_len = msg->u.buffer_from_host.buffer_header.length;
348 
349 	/* take buffer from queue */
350 	spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
351 	if (list_empty(&msg_context->u.bulk.port->buffers)) {
352 		spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
353 		pr_err("buffer list empty trying to submit bulk receive\n");
354 
355 		/* todo: this is a serious error, we should never have
356 		 * committed a buffer_to_host operation to the mmal
357 		 * port without the buffer to back it up (underflow
358 		 * handling) and there is no obvious way to deal with
359 		 * this - how is the mmal servie going to react when
360 		 * we fail to do the xfer and reschedule a buffer when
361 		 * it arrives? perhaps a starved flag to indicate a
362 		 * waiting bulk receive?
363 		 */
364 
365 		mutex_unlock(&instance->bulk_mutex);
366 
367 		return -EINVAL;
368 	}
369 
370 	msg_context->u.bulk.buffer =
371 	    list_entry(msg_context->u.bulk.port->buffers.next,
372 		       struct mmal_buffer, list);
373 	list_del(&msg_context->u.bulk.buffer->list);
374 
375 	spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
376 
377 	/* ensure we do not overrun the available buffer */
378 	if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
379 		rd_len = msg_context->u.bulk.buffer->buffer_size;
380 		pr_warn("short read as not enough receive buffer space\n");
381 		/* todo: is this the correct response, what happens to
382 		 * the rest of the message data?
383 		 */
384 	}
385 
386 	/* store length */
387 	msg_context->u.bulk.buffer_used = rd_len;
388 	msg_context->u.bulk.mmal_flags =
389 	    msg->u.buffer_from_host.buffer_header.flags;
390 	msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
391 	msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
392 
393 	/* queue the bulk submission */
394 	vchi_service_use(instance->handle);
395 	ret = vchi_bulk_queue_receive(instance->handle,
396 				      msg_context->u.bulk.buffer->buffer,
397 				      /* Actual receive needs to be a multiple
398 				       * of 4 bytes
399 				       */
400 				      (rd_len + 3) & ~3,
401 				      VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
402 				      VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
403 				      msg_context);
404 
405 	vchi_service_release(instance->handle);
406 
407 	if (ret != 0) {
408 		/* callback will not be clearing the mutex */
409 		mutex_unlock(&instance->bulk_mutex);
410 	}
411 
412 	return ret;
413 }
414 
415 /* enque a dummy bulk receive for a given message context */
dummy_bulk_receive(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)416 static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
417 			      struct mmal_msg_context *msg_context)
418 {
419 	int ret;
420 
421 	/* bulk mutex stops other bulk operations while we have a
422 	 * receive in progress - released in callback
423 	 */
424 	ret = mutex_lock_interruptible(&instance->bulk_mutex);
425 	if (ret != 0)
426 		return ret;
427 
428 	/* zero length indicates this was a dummy transfer */
429 	msg_context->u.bulk.buffer_used = 0;
430 
431 	/* queue the bulk submission */
432 	vchi_service_use(instance->handle);
433 
434 	ret = vchi_bulk_queue_receive(instance->handle,
435 				      instance->bulk_scratch,
436 				      8,
437 				      VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
438 				      VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
439 				      msg_context);
440 
441 	vchi_service_release(instance->handle);
442 
443 	if (ret != 0) {
444 		/* callback will not be clearing the mutex */
445 		mutex_unlock(&instance->bulk_mutex);
446 	}
447 
448 	return ret;
449 }
450 
451 /* data in message, memcpy from packet into output buffer */
inline_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)452 static int inline_receive(struct vchiq_mmal_instance *instance,
453 			  struct mmal_msg *msg,
454 			  struct mmal_msg_context *msg_context)
455 {
456 	unsigned long flags = 0;
457 
458 	/* take buffer from queue */
459 	spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
460 	if (list_empty(&msg_context->u.bulk.port->buffers)) {
461 		spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
462 		pr_err("buffer list empty trying to receive inline\n");
463 
464 		/* todo: this is a serious error, we should never have
465 		 * committed a buffer_to_host operation to the mmal
466 		 * port without the buffer to back it up (with
467 		 * underflow handling) and there is no obvious way to
468 		 * deal with this. Less bad than the bulk case as we
469 		 * can just drop this on the floor but...unhelpful
470 		 */
471 		return -EINVAL;
472 	}
473 
474 	msg_context->u.bulk.buffer =
475 	    list_entry(msg_context->u.bulk.port->buffers.next,
476 		       struct mmal_buffer, list);
477 	list_del(&msg_context->u.bulk.buffer->list);
478 
479 	spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
480 
481 	memcpy(msg_context->u.bulk.buffer->buffer,
482 	       msg->u.buffer_from_host.short_data,
483 	       msg->u.buffer_from_host.payload_in_message);
484 
485 	msg_context->u.bulk.buffer_used =
486 	    msg->u.buffer_from_host.payload_in_message;
487 
488 	return 0;
489 }
490 
491 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
492 static int
buffer_from_host(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buf)493 buffer_from_host(struct vchiq_mmal_instance *instance,
494 		 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
495 {
496 	struct mmal_msg_context *msg_context;
497 	struct mmal_msg m;
498 	int ret;
499 
500 	pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
501 
502 	/* bulk mutex stops other bulk operations while we
503 	 * have a receive in progress
504 	 */
505 	if (mutex_lock_interruptible(&instance->bulk_mutex))
506 		return -EINTR;
507 
508 	/* get context */
509 	msg_context = get_msg_context(instance);
510 	if (IS_ERR(msg_context)) {
511 		ret = PTR_ERR(msg_context);
512 		goto unlock;
513 	}
514 
515 	/* store bulk message context for when data arrives */
516 	msg_context->u.bulk.instance = instance;
517 	msg_context->u.bulk.port = port;
518 	msg_context->u.bulk.buffer = NULL;	/* not valid until bulk xfer */
519 	msg_context->u.bulk.buffer_used = 0;
520 
521 	/* initialise work structure ready to schedule callback */
522 	INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
523 
524 	/* prep the buffer from host message */
525 	memset(&m, 0xbc, sizeof(m));	/* just to make debug clearer */
526 
527 	m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
528 	m.h.magic = MMAL_MAGIC;
529 	m.h.context = msg_context->handle;
530 	m.h.status = 0;
531 
532 	/* drvbuf is our private data passed back */
533 	m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
534 	m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
535 	m.u.buffer_from_host.drvbuf.port_handle = port->handle;
536 	m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
537 
538 	/* buffer header */
539 	m.u.buffer_from_host.buffer_header.cmd = 0;
540 	m.u.buffer_from_host.buffer_header.data =
541 		(u32)(unsigned long)buf->buffer;
542 	m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
543 	m.u.buffer_from_host.buffer_header.length = 0;	/* nothing used yet */
544 	m.u.buffer_from_host.buffer_header.offset = 0;	/* no offset */
545 	m.u.buffer_from_host.buffer_header.flags = 0;	/* no flags */
546 	m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
547 	m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
548 
549 	/* clear buffer type sepecific data */
550 	memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
551 	       sizeof(m.u.buffer_from_host.buffer_header_type_specific));
552 
553 	/* no payload in message */
554 	m.u.buffer_from_host.payload_in_message = 0;
555 
556 	vchi_service_use(instance->handle);
557 
558 	ret = vchi_queue_kernel_message(instance->handle,
559 					&m,
560 					sizeof(struct mmal_msg_header) +
561 					sizeof(m.u.buffer_from_host));
562 
563 	if (ret != 0) {
564 		release_msg_context(msg_context);
565 		/* todo: is this correct error value? */
566 	}
567 
568 	vchi_service_release(instance->handle);
569 
570 unlock:
571 	mutex_unlock(&instance->bulk_mutex);
572 
573 	return ret;
574 }
575 
576 /* submit a buffer to the mmal sevice
577  *
578  * the buffer_from_host uses size data from the ports next available
579  * mmal_buffer and deals with there being no buffer available by
580  * incrementing the underflow for later
581  */
port_buffer_from_host(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)582 static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
583 				 struct vchiq_mmal_port *port)
584 {
585 	int ret;
586 	struct mmal_buffer *buf;
587 	unsigned long flags = 0;
588 
589 	if (!port->enabled)
590 		return -EINVAL;
591 
592 	/* peek buffer from queue */
593 	spin_lock_irqsave(&port->slock, flags);
594 	if (list_empty(&port->buffers)) {
595 		port->buffer_underflow++;
596 		spin_unlock_irqrestore(&port->slock, flags);
597 		return -ENOSPC;
598 	}
599 
600 	buf = list_entry(port->buffers.next, struct mmal_buffer, list);
601 
602 	spin_unlock_irqrestore(&port->slock, flags);
603 
604 	/* issue buffer to mmal service */
605 	ret = buffer_from_host(instance, port, buf);
606 	if (ret) {
607 		pr_err("adding buffer header failed\n");
608 		/* todo: how should this be dealt with */
609 	}
610 
611 	return ret;
612 }
613 
614 /* deals with receipt of buffer to host message */
buffer_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)615 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
616 			      struct mmal_msg *msg, u32 msg_len)
617 {
618 	struct mmal_msg_context *msg_context;
619 	u32 handle;
620 
621 	pr_debug("buffer_to_host_cb: instance:%p msg:%p msg_len:%d\n",
622 		 instance, msg, msg_len);
623 
624 	if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
625 		handle = msg->u.buffer_from_host.drvbuf.client_context;
626 		msg_context = lookup_msg_context(instance, handle);
627 
628 		if (!msg_context) {
629 			pr_err("drvbuf.client_context(%u) is invalid\n",
630 			       handle);
631 			return;
632 		}
633 	} else {
634 		pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
635 		return;
636 	}
637 
638 	if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
639 		/* message reception had an error */
640 		pr_warn("error %d in reply\n", msg->h.status);
641 
642 		msg_context->u.bulk.status = msg->h.status;
643 
644 	} else if (msg->u.buffer_from_host.buffer_header.length == 0) {
645 		/* empty buffer */
646 		if (msg->u.buffer_from_host.buffer_header.flags &
647 		    MMAL_BUFFER_HEADER_FLAG_EOS) {
648 			msg_context->u.bulk.status =
649 			    dummy_bulk_receive(instance, msg_context);
650 			if (msg_context->u.bulk.status == 0)
651 				return;	/* successful bulk submission, bulk
652 					 * completion will trigger callback
653 					 */
654 		} else {
655 			/* do callback with empty buffer - not EOS though */
656 			msg_context->u.bulk.status = 0;
657 			msg_context->u.bulk.buffer_used = 0;
658 		}
659 	} else if (msg->u.buffer_from_host.payload_in_message == 0) {
660 		/* data is not in message, queue a bulk receive */
661 		msg_context->u.bulk.status =
662 		    bulk_receive(instance, msg, msg_context);
663 		if (msg_context->u.bulk.status == 0)
664 			return;	/* successful bulk submission, bulk
665 				 * completion will trigger callback
666 				 */
667 
668 		/* failed to submit buffer, this will end badly */
669 		pr_err("error %d on bulk submission\n",
670 		       msg_context->u.bulk.status);
671 
672 	} else if (msg->u.buffer_from_host.payload_in_message <=
673 		   MMAL_VC_SHORT_DATA) {
674 		/* data payload within message */
675 		msg_context->u.bulk.status = inline_receive(instance, msg,
676 							    msg_context);
677 	} else {
678 		pr_err("message with invalid short payload\n");
679 
680 		/* signal error */
681 		msg_context->u.bulk.status = -EINVAL;
682 		msg_context->u.bulk.buffer_used =
683 		    msg->u.buffer_from_host.payload_in_message;
684 	}
685 
686 	/* replace the buffer header */
687 	port_buffer_from_host(instance, msg_context->u.bulk.port);
688 
689 	/* schedule the port callback */
690 	schedule_work(&msg_context->u.bulk.work);
691 }
692 
bulk_receive_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)693 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
694 			    struct mmal_msg_context *msg_context)
695 {
696 	/* bulk receive operation complete */
697 	mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
698 
699 	/* replace the buffer header */
700 	port_buffer_from_host(msg_context->u.bulk.instance,
701 			      msg_context->u.bulk.port);
702 
703 	msg_context->u.bulk.status = 0;
704 
705 	/* schedule the port callback */
706 	schedule_work(&msg_context->u.bulk.work);
707 }
708 
bulk_abort_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)709 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
710 			  struct mmal_msg_context *msg_context)
711 {
712 	pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
713 
714 	/* bulk receive operation complete */
715 	mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
716 
717 	/* replace the buffer header */
718 	port_buffer_from_host(msg_context->u.bulk.instance,
719 			      msg_context->u.bulk.port);
720 
721 	msg_context->u.bulk.status = -EINTR;
722 
723 	schedule_work(&msg_context->u.bulk.work);
724 }
725 
726 /* incoming event service callback */
service_callback(void * param,const VCHI_CALLBACK_REASON_T reason,void * bulk_ctx)727 static void service_callback(void *param,
728 			     const VCHI_CALLBACK_REASON_T reason,
729 			     void *bulk_ctx)
730 {
731 	struct vchiq_mmal_instance *instance = param;
732 	int status;
733 	u32 msg_len;
734 	struct mmal_msg *msg;
735 	VCHI_HELD_MSG_T msg_handle;
736 	struct mmal_msg_context *msg_context;
737 
738 	if (!instance) {
739 		pr_err("Message callback passed NULL instance\n");
740 		return;
741 	}
742 
743 	switch (reason) {
744 	case VCHI_CALLBACK_MSG_AVAILABLE:
745 		status = vchi_msg_hold(instance->handle, (void **)&msg,
746 				       &msg_len, VCHI_FLAGS_NONE, &msg_handle);
747 		if (status) {
748 			pr_err("Unable to dequeue a message (%d)\n", status);
749 			break;
750 		}
751 
752 		DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
753 
754 		/* handling is different for buffer messages */
755 		switch (msg->h.type) {
756 		case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
757 			vchi_held_msg_release(&msg_handle);
758 			break;
759 
760 		case MMAL_MSG_TYPE_EVENT_TO_HOST:
761 			event_to_host_cb(instance, msg, msg_len);
762 			vchi_held_msg_release(&msg_handle);
763 
764 			break;
765 
766 		case MMAL_MSG_TYPE_BUFFER_TO_HOST:
767 			buffer_to_host_cb(instance, msg, msg_len);
768 			vchi_held_msg_release(&msg_handle);
769 			break;
770 
771 		default:
772 			/* messages dependent on header context to complete */
773 			if (!msg->h.context) {
774 				pr_err("received message context was null!\n");
775 				vchi_held_msg_release(&msg_handle);
776 				break;
777 			}
778 
779 			msg_context = lookup_msg_context(instance,
780 							 msg->h.context);
781 			if (!msg_context) {
782 				pr_err("received invalid message context %u!\n",
783 				       msg->h.context);
784 				vchi_held_msg_release(&msg_handle);
785 				break;
786 			}
787 
788 			/* fill in context values */
789 			msg_context->u.sync.msg_handle = msg_handle;
790 			msg_context->u.sync.msg = msg;
791 			msg_context->u.sync.msg_len = msg_len;
792 
793 			/* todo: should this check (completion_done()
794 			 * == 1) for no one waiting? or do we need a
795 			 * flag to tell us the completion has been
796 			 * interrupted so we can free the message and
797 			 * its context. This probably also solves the
798 			 * message arriving after interruption todo
799 			 * below
800 			 */
801 
802 			/* complete message so caller knows it happened */
803 			complete(&msg_context->u.sync.cmplt);
804 			break;
805 		}
806 
807 		break;
808 
809 	case VCHI_CALLBACK_BULK_RECEIVED:
810 		bulk_receive_cb(instance, bulk_ctx);
811 		break;
812 
813 	case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
814 		bulk_abort_cb(instance, bulk_ctx);
815 		break;
816 
817 	case VCHI_CALLBACK_SERVICE_CLOSED:
818 		/* TODO: consider if this requires action if received when
819 		 * driver is not explicitly closing the service
820 		 */
821 		break;
822 
823 	default:
824 		pr_err("Received unhandled message reason %d\n", reason);
825 		break;
826 	}
827 }
828 
send_synchronous_mmal_msg(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,unsigned int payload_len,struct mmal_msg ** msg_out,VCHI_HELD_MSG_T * msg_handle_out)829 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
830 				     struct mmal_msg *msg,
831 				     unsigned int payload_len,
832 				     struct mmal_msg **msg_out,
833 				     VCHI_HELD_MSG_T *msg_handle_out)
834 {
835 	struct mmal_msg_context *msg_context;
836 	int ret;
837 	unsigned long timeout;
838 
839 	/* payload size must not cause message to exceed max size */
840 	if (payload_len >
841 	    (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
842 		pr_err("payload length %d exceeds max:%d\n", payload_len,
843 		      (int)(MMAL_MSG_MAX_SIZE -
844 			    sizeof(struct mmal_msg_header)));
845 		return -EINVAL;
846 	}
847 
848 	msg_context = get_msg_context(instance);
849 	if (IS_ERR(msg_context))
850 		return PTR_ERR(msg_context);
851 
852 	init_completion(&msg_context->u.sync.cmplt);
853 
854 	msg->h.magic = MMAL_MAGIC;
855 	msg->h.context = msg_context->handle;
856 	msg->h.status = 0;
857 
858 	DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
859 		     ">>> sync message");
860 
861 	vchi_service_use(instance->handle);
862 
863 	ret = vchi_queue_kernel_message(instance->handle,
864 					msg,
865 					sizeof(struct mmal_msg_header) +
866 					payload_len);
867 
868 	vchi_service_release(instance->handle);
869 
870 	if (ret) {
871 		pr_err("error %d queuing message\n", ret);
872 		release_msg_context(msg_context);
873 		return ret;
874 	}
875 
876 	timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
877 					      3 * HZ);
878 	if (timeout == 0) {
879 		pr_err("timed out waiting for sync completion\n");
880 		ret = -ETIME;
881 		/* todo: what happens if the message arrives after aborting */
882 		release_msg_context(msg_context);
883 		return ret;
884 	}
885 
886 	*msg_out = msg_context->u.sync.msg;
887 	*msg_handle_out = msg_context->u.sync.msg_handle;
888 	release_msg_context(msg_context);
889 
890 	return 0;
891 }
892 
dump_port_info(struct vchiq_mmal_port * port)893 static void dump_port_info(struct vchiq_mmal_port *port)
894 {
895 	pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
896 
897 	pr_debug("buffer minimum num:%d size:%d align:%d\n",
898 		 port->minimum_buffer.num,
899 		 port->minimum_buffer.size, port->minimum_buffer.alignment);
900 
901 	pr_debug("buffer recommended num:%d size:%d align:%d\n",
902 		 port->recommended_buffer.num,
903 		 port->recommended_buffer.size,
904 		 port->recommended_buffer.alignment);
905 
906 	pr_debug("buffer current values num:%d size:%d align:%d\n",
907 		 port->current_buffer.num,
908 		 port->current_buffer.size, port->current_buffer.alignment);
909 
910 	pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
911 		 port->format.type,
912 		 port->format.encoding, port->format.encoding_variant);
913 
914 	pr_debug("		    bitrate:%d flags:0x%x\n",
915 		 port->format.bitrate, port->format.flags);
916 
917 	if (port->format.type == MMAL_ES_TYPE_VIDEO) {
918 		pr_debug
919 		    ("es video format: width:%d height:%d colourspace:0x%x\n",
920 		     port->es.video.width, port->es.video.height,
921 		     port->es.video.color_space);
922 
923 		pr_debug("		 : crop xywh %d,%d,%d,%d\n",
924 			 port->es.video.crop.x,
925 			 port->es.video.crop.y,
926 			 port->es.video.crop.width, port->es.video.crop.height);
927 		pr_debug("		 : framerate %d/%d  aspect %d/%d\n",
928 			 port->es.video.frame_rate.num,
929 			 port->es.video.frame_rate.den,
930 			 port->es.video.par.num, port->es.video.par.den);
931 	}
932 }
933 
port_to_mmal_msg(struct vchiq_mmal_port * port,struct mmal_port * p)934 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
935 {
936 	/* todo do readonly fields need setting at all? */
937 	p->type = port->type;
938 	p->index = port->index;
939 	p->index_all = 0;
940 	p->is_enabled = port->enabled;
941 	p->buffer_num_min = port->minimum_buffer.num;
942 	p->buffer_size_min = port->minimum_buffer.size;
943 	p->buffer_alignment_min = port->minimum_buffer.alignment;
944 	p->buffer_num_recommended = port->recommended_buffer.num;
945 	p->buffer_size_recommended = port->recommended_buffer.size;
946 
947 	/* only three writable fields in a port */
948 	p->buffer_num = port->current_buffer.num;
949 	p->buffer_size = port->current_buffer.size;
950 	p->userdata = (u32)(unsigned long)port;
951 }
952 
port_info_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)953 static int port_info_set(struct vchiq_mmal_instance *instance,
954 			 struct vchiq_mmal_port *port)
955 {
956 	int ret;
957 	struct mmal_msg m;
958 	struct mmal_msg *rmsg;
959 	VCHI_HELD_MSG_T rmsg_handle;
960 
961 	pr_debug("setting port info port %p\n", port);
962 	if (!port)
963 		return -1;
964 	dump_port_info(port);
965 
966 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
967 
968 	m.u.port_info_set.component_handle = port->component->handle;
969 	m.u.port_info_set.port_type = port->type;
970 	m.u.port_info_set.port_index = port->index;
971 
972 	port_to_mmal_msg(port, &m.u.port_info_set.port);
973 
974 	/* elementary stream format setup */
975 	m.u.port_info_set.format.type = port->format.type;
976 	m.u.port_info_set.format.encoding = port->format.encoding;
977 	m.u.port_info_set.format.encoding_variant =
978 	    port->format.encoding_variant;
979 	m.u.port_info_set.format.bitrate = port->format.bitrate;
980 	m.u.port_info_set.format.flags = port->format.flags;
981 
982 	memcpy(&m.u.port_info_set.es, &port->es,
983 	       sizeof(union mmal_es_specific_format));
984 
985 	m.u.port_info_set.format.extradata_size = port->format.extradata_size;
986 	memcpy(&m.u.port_info_set.extradata, port->format.extradata,
987 	       port->format.extradata_size);
988 
989 	ret = send_synchronous_mmal_msg(instance, &m,
990 					sizeof(m.u.port_info_set),
991 					&rmsg, &rmsg_handle);
992 	if (ret)
993 		return ret;
994 
995 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
996 		/* got an unexpected message type in reply */
997 		ret = -EINVAL;
998 		goto release_msg;
999 	}
1000 
1001 	/* return operation status */
1002 	ret = -rmsg->u.port_info_get_reply.status;
1003 
1004 	pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
1005 		 port->component->handle, port->handle);
1006 
1007 release_msg:
1008 	vchi_held_msg_release(&rmsg_handle);
1009 
1010 	return ret;
1011 }
1012 
1013 /* use port info get message to retrieve port information */
port_info_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1014 static int port_info_get(struct vchiq_mmal_instance *instance,
1015 			 struct vchiq_mmal_port *port)
1016 {
1017 	int ret;
1018 	struct mmal_msg m;
1019 	struct mmal_msg *rmsg;
1020 	VCHI_HELD_MSG_T rmsg_handle;
1021 
1022 	/* port info time */
1023 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
1024 	m.u.port_info_get.component_handle = port->component->handle;
1025 	m.u.port_info_get.port_type = port->type;
1026 	m.u.port_info_get.index = port->index;
1027 
1028 	ret = send_synchronous_mmal_msg(instance, &m,
1029 					sizeof(m.u.port_info_get),
1030 					&rmsg, &rmsg_handle);
1031 	if (ret)
1032 		return ret;
1033 
1034 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
1035 		/* got an unexpected message type in reply */
1036 		ret = -EINVAL;
1037 		goto release_msg;
1038 	}
1039 
1040 	/* return operation status */
1041 	ret = -rmsg->u.port_info_get_reply.status;
1042 	if (ret != MMAL_MSG_STATUS_SUCCESS)
1043 		goto release_msg;
1044 
1045 	if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
1046 		port->enabled = false;
1047 	else
1048 		port->enabled = true;
1049 
1050 	/* copy the values out of the message */
1051 	port->handle = rmsg->u.port_info_get_reply.port_handle;
1052 
1053 	/* port type and index cached to use on port info set because
1054 	 * it does not use a port handle
1055 	 */
1056 	port->type = rmsg->u.port_info_get_reply.port_type;
1057 	port->index = rmsg->u.port_info_get_reply.port_index;
1058 
1059 	port->minimum_buffer.num =
1060 	    rmsg->u.port_info_get_reply.port.buffer_num_min;
1061 	port->minimum_buffer.size =
1062 	    rmsg->u.port_info_get_reply.port.buffer_size_min;
1063 	port->minimum_buffer.alignment =
1064 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1065 
1066 	port->recommended_buffer.alignment =
1067 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1068 	port->recommended_buffer.num =
1069 	    rmsg->u.port_info_get_reply.port.buffer_num_recommended;
1070 
1071 	port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
1072 	port->current_buffer.size =
1073 	    rmsg->u.port_info_get_reply.port.buffer_size;
1074 
1075 	/* stream format */
1076 	port->format.type = rmsg->u.port_info_get_reply.format.type;
1077 	port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
1078 	port->format.encoding_variant =
1079 	    rmsg->u.port_info_get_reply.format.encoding_variant;
1080 	port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
1081 	port->format.flags = rmsg->u.port_info_get_reply.format.flags;
1082 
1083 	/* elementary stream format */
1084 	memcpy(&port->es,
1085 	       &rmsg->u.port_info_get_reply.es,
1086 	       sizeof(union mmal_es_specific_format));
1087 	port->format.es = &port->es;
1088 
1089 	port->format.extradata_size =
1090 	    rmsg->u.port_info_get_reply.format.extradata_size;
1091 	memcpy(port->format.extradata,
1092 	       rmsg->u.port_info_get_reply.extradata,
1093 	       port->format.extradata_size);
1094 
1095 	pr_debug("received port info\n");
1096 	dump_port_info(port);
1097 
1098 release_msg:
1099 
1100 	pr_debug("%s:result:%d component:0x%x port:%d\n",
1101 		 __func__, ret, port->component->handle, port->handle);
1102 
1103 	vchi_held_msg_release(&rmsg_handle);
1104 
1105 	return ret;
1106 }
1107 
1108 /* create comonent on vc */
create_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component,const char * name)1109 static int create_component(struct vchiq_mmal_instance *instance,
1110 			    struct vchiq_mmal_component *component,
1111 			    const char *name)
1112 {
1113 	int ret;
1114 	struct mmal_msg m;
1115 	struct mmal_msg *rmsg;
1116 	VCHI_HELD_MSG_T rmsg_handle;
1117 
1118 	/* build component create message */
1119 	m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
1120 	m.u.component_create.client_component = (u32)(unsigned long)component;
1121 	strncpy(m.u.component_create.name, name,
1122 		sizeof(m.u.component_create.name));
1123 
1124 	ret = send_synchronous_mmal_msg(instance, &m,
1125 					sizeof(m.u.component_create),
1126 					&rmsg, &rmsg_handle);
1127 	if (ret)
1128 		return ret;
1129 
1130 	if (rmsg->h.type != m.h.type) {
1131 		/* got an unexpected message type in reply */
1132 		ret = -EINVAL;
1133 		goto release_msg;
1134 	}
1135 
1136 	ret = -rmsg->u.component_create_reply.status;
1137 	if (ret != MMAL_MSG_STATUS_SUCCESS)
1138 		goto release_msg;
1139 
1140 	/* a valid component response received */
1141 	component->handle = rmsg->u.component_create_reply.component_handle;
1142 	component->inputs = rmsg->u.component_create_reply.input_num;
1143 	component->outputs = rmsg->u.component_create_reply.output_num;
1144 	component->clocks = rmsg->u.component_create_reply.clock_num;
1145 
1146 	pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1147 		 component->handle,
1148 		 component->inputs, component->outputs, component->clocks);
1149 
1150 release_msg:
1151 	vchi_held_msg_release(&rmsg_handle);
1152 
1153 	return ret;
1154 }
1155 
1156 /* destroys a component on vc */
destroy_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1157 static int destroy_component(struct vchiq_mmal_instance *instance,
1158 			     struct vchiq_mmal_component *component)
1159 {
1160 	int ret;
1161 	struct mmal_msg m;
1162 	struct mmal_msg *rmsg;
1163 	VCHI_HELD_MSG_T rmsg_handle;
1164 
1165 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
1166 	m.u.component_destroy.component_handle = component->handle;
1167 
1168 	ret = send_synchronous_mmal_msg(instance, &m,
1169 					sizeof(m.u.component_destroy),
1170 					&rmsg, &rmsg_handle);
1171 	if (ret)
1172 		return ret;
1173 
1174 	if (rmsg->h.type != m.h.type) {
1175 		/* got an unexpected message type in reply */
1176 		ret = -EINVAL;
1177 		goto release_msg;
1178 	}
1179 
1180 	ret = -rmsg->u.component_destroy_reply.status;
1181 
1182 release_msg:
1183 
1184 	vchi_held_msg_release(&rmsg_handle);
1185 
1186 	return ret;
1187 }
1188 
1189 /* enable a component on vc */
enable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1190 static int enable_component(struct vchiq_mmal_instance *instance,
1191 			    struct vchiq_mmal_component *component)
1192 {
1193 	int ret;
1194 	struct mmal_msg m;
1195 	struct mmal_msg *rmsg;
1196 	VCHI_HELD_MSG_T rmsg_handle;
1197 
1198 	m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1199 	m.u.component_enable.component_handle = component->handle;
1200 
1201 	ret = send_synchronous_mmal_msg(instance, &m,
1202 					sizeof(m.u.component_enable),
1203 					&rmsg, &rmsg_handle);
1204 	if (ret)
1205 		return ret;
1206 
1207 	if (rmsg->h.type != m.h.type) {
1208 		/* got an unexpected message type in reply */
1209 		ret = -EINVAL;
1210 		goto release_msg;
1211 	}
1212 
1213 	ret = -rmsg->u.component_enable_reply.status;
1214 
1215 release_msg:
1216 	vchi_held_msg_release(&rmsg_handle);
1217 
1218 	return ret;
1219 }
1220 
1221 /* disable a component on vc */
disable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1222 static int disable_component(struct vchiq_mmal_instance *instance,
1223 			     struct vchiq_mmal_component *component)
1224 {
1225 	int ret;
1226 	struct mmal_msg m;
1227 	struct mmal_msg *rmsg;
1228 	VCHI_HELD_MSG_T rmsg_handle;
1229 
1230 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1231 	m.u.component_disable.component_handle = component->handle;
1232 
1233 	ret = send_synchronous_mmal_msg(instance, &m,
1234 					sizeof(m.u.component_disable),
1235 					&rmsg, &rmsg_handle);
1236 	if (ret)
1237 		return ret;
1238 
1239 	if (rmsg->h.type != m.h.type) {
1240 		/* got an unexpected message type in reply */
1241 		ret = -EINVAL;
1242 		goto release_msg;
1243 	}
1244 
1245 	ret = -rmsg->u.component_disable_reply.status;
1246 
1247 release_msg:
1248 
1249 	vchi_held_msg_release(&rmsg_handle);
1250 
1251 	return ret;
1252 }
1253 
1254 /* get version of mmal implementation */
get_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1255 static int get_version(struct vchiq_mmal_instance *instance,
1256 		       u32 *major_out, u32 *minor_out)
1257 {
1258 	int ret;
1259 	struct mmal_msg m;
1260 	struct mmal_msg *rmsg;
1261 	VCHI_HELD_MSG_T rmsg_handle;
1262 
1263 	m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1264 
1265 	ret = send_synchronous_mmal_msg(instance, &m,
1266 					sizeof(m.u.version),
1267 					&rmsg, &rmsg_handle);
1268 	if (ret)
1269 		return ret;
1270 
1271 	if (rmsg->h.type != m.h.type) {
1272 		/* got an unexpected message type in reply */
1273 		ret = -EINVAL;
1274 		goto release_msg;
1275 	}
1276 
1277 	*major_out = rmsg->u.version.major;
1278 	*minor_out = rmsg->u.version.minor;
1279 
1280 release_msg:
1281 	vchi_held_msg_release(&rmsg_handle);
1282 
1283 	return ret;
1284 }
1285 
1286 /* do a port action with a port as a parameter */
port_action_port(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type)1287 static int port_action_port(struct vchiq_mmal_instance *instance,
1288 			    struct vchiq_mmal_port *port,
1289 			    enum mmal_msg_port_action_type action_type)
1290 {
1291 	int ret;
1292 	struct mmal_msg m;
1293 	struct mmal_msg *rmsg;
1294 	VCHI_HELD_MSG_T rmsg_handle;
1295 
1296 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1297 	m.u.port_action_port.component_handle = port->component->handle;
1298 	m.u.port_action_port.port_handle = port->handle;
1299 	m.u.port_action_port.action = action_type;
1300 
1301 	port_to_mmal_msg(port, &m.u.port_action_port.port);
1302 
1303 	ret = send_synchronous_mmal_msg(instance, &m,
1304 					sizeof(m.u.port_action_port),
1305 					&rmsg, &rmsg_handle);
1306 	if (ret)
1307 		return ret;
1308 
1309 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1310 		/* got an unexpected message type in reply */
1311 		ret = -EINVAL;
1312 		goto release_msg;
1313 	}
1314 
1315 	ret = -rmsg->u.port_action_reply.status;
1316 
1317 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1318 		 __func__,
1319 		 ret, port->component->handle, port->handle,
1320 		 port_action_type_names[action_type], action_type);
1321 
1322 release_msg:
1323 	vchi_held_msg_release(&rmsg_handle);
1324 
1325 	return ret;
1326 }
1327 
1328 /* do a port action with handles as parameters */
port_action_handle(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type,u32 connect_component_handle,u32 connect_port_handle)1329 static int port_action_handle(struct vchiq_mmal_instance *instance,
1330 			      struct vchiq_mmal_port *port,
1331 			      enum mmal_msg_port_action_type action_type,
1332 			      u32 connect_component_handle,
1333 			      u32 connect_port_handle)
1334 {
1335 	int ret;
1336 	struct mmal_msg m;
1337 	struct mmal_msg *rmsg;
1338 	VCHI_HELD_MSG_T rmsg_handle;
1339 
1340 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1341 
1342 	m.u.port_action_handle.component_handle = port->component->handle;
1343 	m.u.port_action_handle.port_handle = port->handle;
1344 	m.u.port_action_handle.action = action_type;
1345 
1346 	m.u.port_action_handle.connect_component_handle =
1347 	    connect_component_handle;
1348 	m.u.port_action_handle.connect_port_handle = connect_port_handle;
1349 
1350 	ret = send_synchronous_mmal_msg(instance, &m,
1351 					sizeof(m.u.port_action_handle),
1352 					&rmsg, &rmsg_handle);
1353 	if (ret)
1354 		return ret;
1355 
1356 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1357 		/* got an unexpected message type in reply */
1358 		ret = -EINVAL;
1359 		goto release_msg;
1360 	}
1361 
1362 	ret = -rmsg->u.port_action_reply.status;
1363 
1364 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)" \
1365 		 " connect component:0x%x connect port:%d\n",
1366 		 __func__,
1367 		 ret, port->component->handle, port->handle,
1368 		 port_action_type_names[action_type],
1369 		 action_type, connect_component_handle, connect_port_handle);
1370 
1371 release_msg:
1372 	vchi_held_msg_release(&rmsg_handle);
1373 
1374 	return ret;
1375 }
1376 
port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 value_size)1377 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1378 			      struct vchiq_mmal_port *port,
1379 			      u32 parameter_id, void *value, u32 value_size)
1380 {
1381 	int ret;
1382 	struct mmal_msg m;
1383 	struct mmal_msg *rmsg;
1384 	VCHI_HELD_MSG_T rmsg_handle;
1385 
1386 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1387 
1388 	m.u.port_parameter_set.component_handle = port->component->handle;
1389 	m.u.port_parameter_set.port_handle = port->handle;
1390 	m.u.port_parameter_set.id = parameter_id;
1391 	m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1392 	memcpy(&m.u.port_parameter_set.value, value, value_size);
1393 
1394 	ret = send_synchronous_mmal_msg(instance, &m,
1395 					(4 * sizeof(u32)) + value_size,
1396 					&rmsg, &rmsg_handle);
1397 	if (ret)
1398 		return ret;
1399 
1400 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1401 		/* got an unexpected message type in reply */
1402 		ret = -EINVAL;
1403 		goto release_msg;
1404 	}
1405 
1406 	ret = -rmsg->u.port_parameter_set_reply.status;
1407 
1408 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1409 		 __func__,
1410 		 ret, port->component->handle, port->handle, parameter_id);
1411 
1412 release_msg:
1413 	vchi_held_msg_release(&rmsg_handle);
1414 
1415 	return ret;
1416 }
1417 
port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 * value_size)1418 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1419 			      struct vchiq_mmal_port *port,
1420 			      u32 parameter_id, void *value, u32 *value_size)
1421 {
1422 	int ret;
1423 	struct mmal_msg m;
1424 	struct mmal_msg *rmsg;
1425 	VCHI_HELD_MSG_T rmsg_handle;
1426 
1427 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1428 
1429 	m.u.port_parameter_get.component_handle = port->component->handle;
1430 	m.u.port_parameter_get.port_handle = port->handle;
1431 	m.u.port_parameter_get.id = parameter_id;
1432 	m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1433 
1434 	ret = send_synchronous_mmal_msg(instance, &m,
1435 					sizeof(struct
1436 					       mmal_msg_port_parameter_get),
1437 					&rmsg, &rmsg_handle);
1438 	if (ret)
1439 		return ret;
1440 
1441 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1442 		/* got an unexpected message type in reply */
1443 		pr_err("Incorrect reply type %d\n", rmsg->h.type);
1444 		ret = -EINVAL;
1445 		goto release_msg;
1446 	}
1447 
1448 	ret = -rmsg->u.port_parameter_get_reply.status;
1449 	/* port_parameter_get_reply.size includes the header,
1450 	 * whilst *value_size doesn't.
1451 	 */
1452 	rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1453 
1454 	if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1455 		/* Copy only as much as we have space for
1456 		 * but report true size of parameter
1457 		 */
1458 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1459 		       *value_size);
1460 		*value_size = rmsg->u.port_parameter_get_reply.size;
1461 	} else
1462 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1463 		       rmsg->u.port_parameter_get_reply.size);
1464 
1465 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1466 		 ret, port->component->handle, port->handle, parameter_id);
1467 
1468 release_msg:
1469 	vchi_held_msg_release(&rmsg_handle);
1470 
1471 	return ret;
1472 }
1473 
1474 /* disables a port and drains buffers from it */
port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1475 static int port_disable(struct vchiq_mmal_instance *instance,
1476 			struct vchiq_mmal_port *port)
1477 {
1478 	int ret;
1479 	struct list_head *q, *buf_head;
1480 	unsigned long flags = 0;
1481 
1482 	if (!port->enabled)
1483 		return 0;
1484 
1485 	port->enabled = false;
1486 
1487 	ret = port_action_port(instance, port,
1488 			       MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1489 	if (ret == 0) {
1490 		/* drain all queued buffers on port */
1491 		spin_lock_irqsave(&port->slock, flags);
1492 
1493 		list_for_each_safe(buf_head, q, &port->buffers) {
1494 			struct mmal_buffer *mmalbuf;
1495 
1496 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1497 					     list);
1498 			list_del(buf_head);
1499 			if (port->buffer_cb)
1500 				port->buffer_cb(instance,
1501 						port, 0, mmalbuf, 0, 0,
1502 						MMAL_TIME_UNKNOWN,
1503 						MMAL_TIME_UNKNOWN);
1504 		}
1505 
1506 		spin_unlock_irqrestore(&port->slock, flags);
1507 
1508 		ret = port_info_get(instance, port);
1509 	}
1510 
1511 	return ret;
1512 }
1513 
1514 /* enable a port */
port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1515 static int port_enable(struct vchiq_mmal_instance *instance,
1516 		       struct vchiq_mmal_port *port)
1517 {
1518 	unsigned int hdr_count;
1519 	struct list_head *buf_head;
1520 	int ret;
1521 
1522 	if (port->enabled)
1523 		return 0;
1524 
1525 	/* ensure there are enough buffers queued to cover the buffer headers */
1526 	if (port->buffer_cb) {
1527 		hdr_count = 0;
1528 		list_for_each(buf_head, &port->buffers) {
1529 			hdr_count++;
1530 		}
1531 		if (hdr_count < port->current_buffer.num)
1532 			return -ENOSPC;
1533 	}
1534 
1535 	ret = port_action_port(instance, port,
1536 			       MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1537 	if (ret)
1538 		goto done;
1539 
1540 	port->enabled = true;
1541 
1542 	if (port->buffer_cb) {
1543 		/* send buffer headers to videocore */
1544 		hdr_count = 1;
1545 		list_for_each(buf_head, &port->buffers) {
1546 			struct mmal_buffer *mmalbuf;
1547 
1548 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1549 					     list);
1550 			ret = buffer_from_host(instance, port, mmalbuf);
1551 			if (ret)
1552 				goto done;
1553 
1554 			hdr_count++;
1555 			if (hdr_count > port->current_buffer.num)
1556 				break;
1557 		}
1558 	}
1559 
1560 	ret = port_info_get(instance, port);
1561 
1562 done:
1563 	return ret;
1564 }
1565 
1566 /* ------------------------------------------------------------------
1567  * Exported API
1568  *------------------------------------------------------------------
1569  */
1570 
vchiq_mmal_port_set_format(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1571 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1572 			       struct vchiq_mmal_port *port)
1573 {
1574 	int ret;
1575 
1576 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1577 		return -EINTR;
1578 
1579 	ret = port_info_set(instance, port);
1580 	if (ret)
1581 		goto release_unlock;
1582 
1583 	/* read what has actually been set */
1584 	ret = port_info_get(instance, port);
1585 
1586 release_unlock:
1587 	mutex_unlock(&instance->vchiq_mutex);
1588 
1589 	return ret;
1590 }
1591 
vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 value_size)1592 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1593 				  struct vchiq_mmal_port *port,
1594 				  u32 parameter, void *value, u32 value_size)
1595 {
1596 	int ret;
1597 
1598 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1599 		return -EINTR;
1600 
1601 	ret = port_parameter_set(instance, port, parameter, value, value_size);
1602 
1603 	mutex_unlock(&instance->vchiq_mutex);
1604 
1605 	return ret;
1606 }
1607 
vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 * value_size)1608 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1609 				  struct vchiq_mmal_port *port,
1610 				  u32 parameter, void *value, u32 *value_size)
1611 {
1612 	int ret;
1613 
1614 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1615 		return -EINTR;
1616 
1617 	ret = port_parameter_get(instance, port, parameter, value, value_size);
1618 
1619 	mutex_unlock(&instance->vchiq_mutex);
1620 
1621 	return ret;
1622 }
1623 
1624 /* enable a port
1625  *
1626  * enables a port and queues buffers for satisfying callbacks if we
1627  * provide a callback handler
1628  */
vchiq_mmal_port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,vchiq_mmal_buffer_cb buffer_cb)1629 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1630 			   struct vchiq_mmal_port *port,
1631 			   vchiq_mmal_buffer_cb buffer_cb)
1632 {
1633 	int ret;
1634 
1635 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1636 		return -EINTR;
1637 
1638 	/* already enabled - noop */
1639 	if (port->enabled) {
1640 		ret = 0;
1641 		goto unlock;
1642 	}
1643 
1644 	port->buffer_cb = buffer_cb;
1645 
1646 	ret = port_enable(instance, port);
1647 
1648 unlock:
1649 	mutex_unlock(&instance->vchiq_mutex);
1650 
1651 	return ret;
1652 }
1653 
vchiq_mmal_port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1654 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1655 			    struct vchiq_mmal_port *port)
1656 {
1657 	int ret;
1658 
1659 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1660 		return -EINTR;
1661 
1662 	if (!port->enabled) {
1663 		mutex_unlock(&instance->vchiq_mutex);
1664 		return 0;
1665 	}
1666 
1667 	ret = port_disable(instance, port);
1668 
1669 	mutex_unlock(&instance->vchiq_mutex);
1670 
1671 	return ret;
1672 }
1673 
1674 /* ports will be connected in a tunneled manner so data buffers
1675  * are not handled by client.
1676  */
vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * src,struct vchiq_mmal_port * dst)1677 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1678 				   struct vchiq_mmal_port *src,
1679 				   struct vchiq_mmal_port *dst)
1680 {
1681 	int ret;
1682 
1683 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1684 		return -EINTR;
1685 
1686 	/* disconnect ports if connected */
1687 	if (src->connected) {
1688 		ret = port_disable(instance, src);
1689 		if (ret) {
1690 			pr_err("failed disabling src port(%d)\n", ret);
1691 			goto release_unlock;
1692 		}
1693 
1694 		/* do not need to disable the destination port as they
1695 		 * are connected and it is done automatically
1696 		 */
1697 
1698 		ret = port_action_handle(instance, src,
1699 					 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1700 					 src->connected->component->handle,
1701 					 src->connected->handle);
1702 		if (ret < 0) {
1703 			pr_err("failed disconnecting src port\n");
1704 			goto release_unlock;
1705 		}
1706 		src->connected->enabled = false;
1707 		src->connected = NULL;
1708 	}
1709 
1710 	if (!dst) {
1711 		/* do not make new connection */
1712 		ret = 0;
1713 		pr_debug("not making new connection\n");
1714 		goto release_unlock;
1715 	}
1716 
1717 	/* copy src port format to dst */
1718 	dst->format.encoding = src->format.encoding;
1719 	dst->es.video.width = src->es.video.width;
1720 	dst->es.video.height = src->es.video.height;
1721 	dst->es.video.crop.x = src->es.video.crop.x;
1722 	dst->es.video.crop.y = src->es.video.crop.y;
1723 	dst->es.video.crop.width = src->es.video.crop.width;
1724 	dst->es.video.crop.height = src->es.video.crop.height;
1725 	dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1726 	dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1727 
1728 	/* set new format */
1729 	ret = port_info_set(instance, dst);
1730 	if (ret) {
1731 		pr_debug("setting port info failed\n");
1732 		goto release_unlock;
1733 	}
1734 
1735 	/* read what has actually been set */
1736 	ret = port_info_get(instance, dst);
1737 	if (ret) {
1738 		pr_debug("read back port info failed\n");
1739 		goto release_unlock;
1740 	}
1741 
1742 	/* connect two ports together */
1743 	ret = port_action_handle(instance, src,
1744 				 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1745 				 dst->component->handle, dst->handle);
1746 	if (ret < 0) {
1747 		pr_debug("connecting port %d:%d to %d:%d failed\n",
1748 			 src->component->handle, src->handle,
1749 			 dst->component->handle, dst->handle);
1750 		goto release_unlock;
1751 	}
1752 	src->connected = dst;
1753 
1754 release_unlock:
1755 
1756 	mutex_unlock(&instance->vchiq_mutex);
1757 
1758 	return ret;
1759 }
1760 
vchiq_mmal_submit_buffer(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buffer)1761 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1762 			     struct vchiq_mmal_port *port,
1763 			     struct mmal_buffer *buffer)
1764 {
1765 	unsigned long flags = 0;
1766 
1767 	spin_lock_irqsave(&port->slock, flags);
1768 	list_add_tail(&buffer->list, &port->buffers);
1769 	spin_unlock_irqrestore(&port->slock, flags);
1770 
1771 	/* the port previously underflowed because it was missing a
1772 	 * mmal_buffer which has just been added, submit that buffer
1773 	 * to the mmal service.
1774 	 */
1775 	if (port->buffer_underflow) {
1776 		port_buffer_from_host(instance, port);
1777 		port->buffer_underflow--;
1778 	}
1779 
1780 	return 0;
1781 }
1782 
1783 /* Initialise a mmal component and its ports
1784  *
1785  */
vchiq_mmal_component_init(struct vchiq_mmal_instance * instance,const char * name,struct vchiq_mmal_component ** component_out)1786 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1787 			      const char *name,
1788 			      struct vchiq_mmal_component **component_out)
1789 {
1790 	int ret;
1791 	int idx;		/* port index */
1792 	struct vchiq_mmal_component *component;
1793 
1794 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1795 		return -EINTR;
1796 
1797 	if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
1798 		ret = -EINVAL;	/* todo is this correct error? */
1799 		goto unlock;
1800 	}
1801 
1802 	component = &instance->component[instance->component_idx];
1803 
1804 	ret = create_component(instance, component, name);
1805 	if (ret < 0)
1806 		goto unlock;
1807 
1808 	/* ports info needs gathering */
1809 	component->control.type = MMAL_PORT_TYPE_CONTROL;
1810 	component->control.index = 0;
1811 	component->control.component = component;
1812 	spin_lock_init(&component->control.slock);
1813 	INIT_LIST_HEAD(&component->control.buffers);
1814 	ret = port_info_get(instance, &component->control);
1815 	if (ret < 0)
1816 		goto release_component;
1817 
1818 	for (idx = 0; idx < component->inputs; idx++) {
1819 		component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1820 		component->input[idx].index = idx;
1821 		component->input[idx].component = component;
1822 		spin_lock_init(&component->input[idx].slock);
1823 		INIT_LIST_HEAD(&component->input[idx].buffers);
1824 		ret = port_info_get(instance, &component->input[idx]);
1825 		if (ret < 0)
1826 			goto release_component;
1827 	}
1828 
1829 	for (idx = 0; idx < component->outputs; idx++) {
1830 		component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1831 		component->output[idx].index = idx;
1832 		component->output[idx].component = component;
1833 		spin_lock_init(&component->output[idx].slock);
1834 		INIT_LIST_HEAD(&component->output[idx].buffers);
1835 		ret = port_info_get(instance, &component->output[idx]);
1836 		if (ret < 0)
1837 			goto release_component;
1838 	}
1839 
1840 	for (idx = 0; idx < component->clocks; idx++) {
1841 		component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1842 		component->clock[idx].index = idx;
1843 		component->clock[idx].component = component;
1844 		spin_lock_init(&component->clock[idx].slock);
1845 		INIT_LIST_HEAD(&component->clock[idx].buffers);
1846 		ret = port_info_get(instance, &component->clock[idx]);
1847 		if (ret < 0)
1848 			goto release_component;
1849 	}
1850 
1851 	instance->component_idx++;
1852 
1853 	*component_out = component;
1854 
1855 	mutex_unlock(&instance->vchiq_mutex);
1856 
1857 	return 0;
1858 
1859 release_component:
1860 	destroy_component(instance, component);
1861 unlock:
1862 	mutex_unlock(&instance->vchiq_mutex);
1863 
1864 	return ret;
1865 }
1866 
1867 /*
1868  * cause a mmal component to be destroyed
1869  */
vchiq_mmal_component_finalise(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1870 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1871 				  struct vchiq_mmal_component *component)
1872 {
1873 	int ret;
1874 
1875 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1876 		return -EINTR;
1877 
1878 	if (component->enabled)
1879 		ret = disable_component(instance, component);
1880 
1881 	ret = destroy_component(instance, component);
1882 
1883 	mutex_unlock(&instance->vchiq_mutex);
1884 
1885 	return ret;
1886 }
1887 
1888 /*
1889  * cause a mmal component to be enabled
1890  */
vchiq_mmal_component_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1891 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1892 				struct vchiq_mmal_component *component)
1893 {
1894 	int ret;
1895 
1896 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1897 		return -EINTR;
1898 
1899 	if (component->enabled) {
1900 		mutex_unlock(&instance->vchiq_mutex);
1901 		return 0;
1902 	}
1903 
1904 	ret = enable_component(instance, component);
1905 	if (ret == 0)
1906 		component->enabled = true;
1907 
1908 	mutex_unlock(&instance->vchiq_mutex);
1909 
1910 	return ret;
1911 }
1912 
1913 /*
1914  * cause a mmal component to be enabled
1915  */
vchiq_mmal_component_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1916 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1917 				 struct vchiq_mmal_component *component)
1918 {
1919 	int ret;
1920 
1921 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1922 		return -EINTR;
1923 
1924 	if (!component->enabled) {
1925 		mutex_unlock(&instance->vchiq_mutex);
1926 		return 0;
1927 	}
1928 
1929 	ret = disable_component(instance, component);
1930 	if (ret == 0)
1931 		component->enabled = false;
1932 
1933 	mutex_unlock(&instance->vchiq_mutex);
1934 
1935 	return ret;
1936 }
1937 
vchiq_mmal_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1938 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1939 		       u32 *major_out, u32 *minor_out)
1940 {
1941 	int ret;
1942 
1943 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1944 		return -EINTR;
1945 
1946 	ret = get_version(instance, major_out, minor_out);
1947 
1948 	mutex_unlock(&instance->vchiq_mutex);
1949 
1950 	return ret;
1951 }
1952 
vchiq_mmal_finalise(struct vchiq_mmal_instance * instance)1953 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1954 {
1955 	int status = 0;
1956 
1957 	if (!instance)
1958 		return -EINVAL;
1959 
1960 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1961 		return -EINTR;
1962 
1963 	vchi_service_use(instance->handle);
1964 
1965 	status = vchi_service_close(instance->handle);
1966 	if (status != 0)
1967 		pr_err("mmal-vchiq: VCHIQ close failed");
1968 
1969 	mutex_unlock(&instance->vchiq_mutex);
1970 
1971 	vfree(instance->bulk_scratch);
1972 
1973 	mmal_context_map_destroy(&instance->context_map);
1974 
1975 	kfree(instance);
1976 
1977 	return status;
1978 }
1979 
vchiq_mmal_init(struct vchiq_mmal_instance ** out_instance)1980 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1981 {
1982 	int status;
1983 	struct vchiq_mmal_instance *instance;
1984 	static VCHI_CONNECTION_T *vchi_connection;
1985 	static VCHI_INSTANCE_T vchi_instance;
1986 	SERVICE_CREATION_T params = {
1987 		.version		= VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1988 		.service_id		= VC_MMAL_SERVER_NAME,
1989 		.connection		= vchi_connection,
1990 		.rx_fifo_size		= 0,
1991 		.tx_fifo_size		= 0,
1992 		.callback		= service_callback,
1993 		.callback_param		= NULL,
1994 		.want_unaligned_bulk_rx = 1,
1995 		.want_unaligned_bulk_tx = 1,
1996 		.want_crc		= 0
1997 	};
1998 
1999 	/* compile time checks to ensure structure size as they are
2000 	 * directly (de)serialised from memory.
2001 	 */
2002 
2003 	/* ensure the header structure has packed to the correct size */
2004 	BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
2005 
2006 	/* ensure message structure does not exceed maximum length */
2007 	BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
2008 
2009 	/* mmal port struct is correct size */
2010 	BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
2011 
2012 	/* create a vchi instance */
2013 	status = vchi_initialise(&vchi_instance);
2014 	if (status) {
2015 		pr_err("Failed to initialise VCHI instance (status=%d)\n",
2016 		       status);
2017 		return -EIO;
2018 	}
2019 
2020 	status = vchi_connect(NULL, 0, vchi_instance);
2021 	if (status) {
2022 		pr_err("Failed to connect VCHI instance (status=%d)\n", status);
2023 		return -EIO;
2024 	}
2025 
2026 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
2027 
2028 	if (!instance)
2029 		return -ENOMEM;
2030 
2031 	mutex_init(&instance->vchiq_mutex);
2032 	mutex_init(&instance->bulk_mutex);
2033 
2034 	instance->bulk_scratch = vmalloc(PAGE_SIZE);
2035 
2036 	status = mmal_context_map_init(&instance->context_map);
2037 	if (status) {
2038 		pr_err("Failed to init context map (status=%d)\n", status);
2039 		kfree(instance);
2040 		return status;
2041 	}
2042 
2043 	params.callback_param = instance;
2044 
2045 	status = vchi_service_open(vchi_instance, &params, &instance->handle);
2046 	if (status) {
2047 		pr_err("Failed to open VCHI service connection (status=%d)\n",
2048 		       status);
2049 		goto err_close_services;
2050 	}
2051 
2052 	vchi_service_release(instance->handle);
2053 
2054 	*out_instance = instance;
2055 
2056 	return 0;
2057 
2058 err_close_services:
2059 
2060 	vchi_service_close(instance->handle);
2061 	vfree(instance->bulk_scratch);
2062 	kfree(instance);
2063 	return -ENODEV;
2064 }
2065