• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <soc/bcm2835/raspberrypi-firmware.h>
29 
30 #include "vchiq_core.h"
31 #include "vchiq_ioctl.h"
32 #include "vchiq_arm.h"
33 #include "vchiq_debugfs.h"
34 
35 #define DEVICE_NAME "vchiq"
36 
37 /* Override the default prefix, which would be vchiq_arm (from the filename) */
38 #undef MODULE_PARAM_PREFIX
39 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
40 
41 /* Some per-instance constants */
42 #define MAX_COMPLETIONS 128
43 #define MAX_SERVICES 64
44 #define MAX_ELEMENTS 8
45 #define MSG_QUEUE_SIZE 128
46 
47 #define KEEPALIVE_VER 1
48 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
49 
50 /* Run time control of log level, based on KERN_XXX level. */
51 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
52 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
53 
54 struct user_service {
55 	struct vchiq_service *service;
56 	void __user *userdata;
57 	struct vchiq_instance *instance;
58 	char is_vchi;
59 	char dequeue_pending;
60 	char close_pending;
61 	int message_available_pos;
62 	int msg_insert;
63 	int msg_remove;
64 	struct completion insert_event;
65 	struct completion remove_event;
66 	struct completion close_event;
67 	struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
68 };
69 
70 struct bulk_waiter_node {
71 	struct bulk_waiter bulk_waiter;
72 	int pid;
73 	struct list_head list;
74 };
75 
76 struct vchiq_instance {
77 	struct vchiq_state *state;
78 	struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
79 	int completion_insert;
80 	int completion_remove;
81 	struct completion insert_event;
82 	struct completion remove_event;
83 	struct mutex completion_mutex;
84 
85 	int connected;
86 	int closing;
87 	int pid;
88 	int mark;
89 	int use_close_delivered;
90 	int trace;
91 
92 	struct list_head bulk_waiter_list;
93 	struct mutex bulk_waiter_list_mutex;
94 
95 	struct vchiq_debugfs_node debugfs_node;
96 };
97 
98 struct dump_context {
99 	char __user *buf;
100 	size_t actual;
101 	size_t space;
102 	loff_t offset;
103 };
104 
105 static struct cdev    vchiq_cdev;
106 static dev_t          vchiq_devid;
107 static struct vchiq_state g_state;
108 static struct class  *vchiq_class;
109 static DEFINE_SPINLOCK(msg_queue_spinlock);
110 static struct platform_device *bcm2835_camera;
111 static struct platform_device *bcm2835_audio;
112 
113 static struct vchiq_drvdata bcm2835_drvdata = {
114 	.cache_line_size = 32,
115 };
116 
117 static struct vchiq_drvdata bcm2836_drvdata = {
118 	.cache_line_size = 64,
119 };
120 
121 static const char *const ioctl_names[] = {
122 	"CONNECT",
123 	"SHUTDOWN",
124 	"CREATE_SERVICE",
125 	"REMOVE_SERVICE",
126 	"QUEUE_MESSAGE",
127 	"QUEUE_BULK_TRANSMIT",
128 	"QUEUE_BULK_RECEIVE",
129 	"AWAIT_COMPLETION",
130 	"DEQUEUE_MESSAGE",
131 	"GET_CLIENT_ID",
132 	"GET_CONFIG",
133 	"CLOSE_SERVICE",
134 	"USE_SERVICE",
135 	"RELEASE_SERVICE",
136 	"SET_SERVICE_OPTION",
137 	"DUMP_PHYS_MEM",
138 	"LIB_VERSION",
139 	"CLOSE_DELIVERED"
140 };
141 
142 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
143 		    (VCHIQ_IOC_MAX + 1));
144 
145 static enum vchiq_status
146 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
147 	unsigned int size, enum vchiq_bulk_dir dir);
148 
149 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_instance ** instance_out)150 int vchiq_initialise(struct vchiq_instance **instance_out)
151 {
152 	struct vchiq_state *state;
153 	struct vchiq_instance *instance = NULL;
154 	int i, ret;
155 
156 	vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
157 
158 	/* VideoCore may not be ready due to boot up timing.
159 	 * It may never be ready if kernel and firmware are mismatched,so don't
160 	 * block forever.
161 	 */
162 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
163 		state = vchiq_get_state();
164 		if (state)
165 			break;
166 		usleep_range(500, 600);
167 	}
168 	if (i == VCHIQ_INIT_RETRIES) {
169 		vchiq_log_error(vchiq_core_log_level,
170 			"%s: videocore not initialized\n", __func__);
171 		ret = -ENOTCONN;
172 		goto failed;
173 	} else if (i > 0) {
174 		vchiq_log_warning(vchiq_core_log_level,
175 			"%s: videocore initialized after %d retries\n",
176 			__func__, i);
177 	}
178 
179 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
180 	if (!instance) {
181 		vchiq_log_error(vchiq_core_log_level,
182 			"%s: error allocating vchiq instance\n", __func__);
183 		ret = -ENOMEM;
184 		goto failed;
185 	}
186 
187 	instance->connected = 0;
188 	instance->state = state;
189 	mutex_init(&instance->bulk_waiter_list_mutex);
190 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
191 
192 	*instance_out = instance;
193 
194 	ret = 0;
195 
196 failed:
197 	vchiq_log_trace(vchiq_core_log_level,
198 		"%s(%p): returning %d", __func__, instance, ret);
199 
200 	return ret;
201 }
202 EXPORT_SYMBOL(vchiq_initialise);
203 
vchiq_shutdown(struct vchiq_instance * instance)204 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
205 {
206 	enum vchiq_status status;
207 	struct vchiq_state *state = instance->state;
208 
209 	vchiq_log_trace(vchiq_core_log_level,
210 		"%s(%p) called", __func__, instance);
211 
212 	if (mutex_lock_killable(&state->mutex))
213 		return VCHIQ_RETRY;
214 
215 	/* Remove all services */
216 	status = vchiq_shutdown_internal(state, instance);
217 
218 	mutex_unlock(&state->mutex);
219 
220 	vchiq_log_trace(vchiq_core_log_level,
221 		"%s(%p): returning %d", __func__, instance, status);
222 
223 	if (status == VCHIQ_SUCCESS) {
224 		struct bulk_waiter_node *waiter, *next;
225 
226 		list_for_each_entry_safe(waiter, next,
227 					 &instance->bulk_waiter_list, list) {
228 			list_del(&waiter->list);
229 			vchiq_log_info(vchiq_arm_log_level,
230 					"bulk_waiter - cleaned up %pK for pid %d",
231 					waiter, waiter->pid);
232 			kfree(waiter);
233 		}
234 		kfree(instance);
235 	}
236 
237 	return status;
238 }
239 EXPORT_SYMBOL(vchiq_shutdown);
240 
vchiq_is_connected(struct vchiq_instance * instance)241 static int vchiq_is_connected(struct vchiq_instance *instance)
242 {
243 	return instance->connected;
244 }
245 
vchiq_connect(struct vchiq_instance * instance)246 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
247 {
248 	enum vchiq_status status;
249 	struct vchiq_state *state = instance->state;
250 
251 	vchiq_log_trace(vchiq_core_log_level,
252 		"%s(%p) called", __func__, instance);
253 
254 	if (mutex_lock_killable(&state->mutex)) {
255 		vchiq_log_trace(vchiq_core_log_level,
256 			"%s: call to mutex_lock failed", __func__);
257 		status = VCHIQ_RETRY;
258 		goto failed;
259 	}
260 	status = vchiq_connect_internal(state, instance);
261 
262 	if (status == VCHIQ_SUCCESS)
263 		instance->connected = 1;
264 
265 	mutex_unlock(&state->mutex);
266 
267 failed:
268 	vchiq_log_trace(vchiq_core_log_level,
269 		"%s(%p): returning %d", __func__, instance, status);
270 
271 	return status;
272 }
273 EXPORT_SYMBOL(vchiq_connect);
274 
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)275 static enum vchiq_status vchiq_add_service(
276 	struct vchiq_instance             *instance,
277 	const struct vchiq_service_params_kernel *params,
278 	unsigned int       *phandle)
279 {
280 	enum vchiq_status status;
281 	struct vchiq_state *state = instance->state;
282 	struct vchiq_service *service = NULL;
283 	int srvstate;
284 
285 	vchiq_log_trace(vchiq_core_log_level,
286 		"%s(%p) called", __func__, instance);
287 
288 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
289 
290 	srvstate = vchiq_is_connected(instance)
291 		? VCHIQ_SRVSTATE_LISTENING
292 		: VCHIQ_SRVSTATE_HIDDEN;
293 
294 	service = vchiq_add_service_internal(
295 		state,
296 		params,
297 		srvstate,
298 		instance,
299 		NULL);
300 
301 	if (service) {
302 		*phandle = service->handle;
303 		status = VCHIQ_SUCCESS;
304 	} else
305 		status = VCHIQ_ERROR;
306 
307 	vchiq_log_trace(vchiq_core_log_level,
308 		"%s(%p): returning %d", __func__, instance, status);
309 
310 	return status;
311 }
312 
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)313 enum vchiq_status vchiq_open_service(
314 	struct vchiq_instance             *instance,
315 	const struct vchiq_service_params_kernel *params,
316 	unsigned int       *phandle)
317 {
318 	enum vchiq_status   status = VCHIQ_ERROR;
319 	struct vchiq_state   *state = instance->state;
320 	struct vchiq_service *service = NULL;
321 
322 	vchiq_log_trace(vchiq_core_log_level,
323 		"%s(%p) called", __func__, instance);
324 
325 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
326 
327 	if (!vchiq_is_connected(instance))
328 		goto failed;
329 
330 	service = vchiq_add_service_internal(state,
331 		params,
332 		VCHIQ_SRVSTATE_OPENING,
333 		instance,
334 		NULL);
335 
336 	if (service) {
337 		*phandle = service->handle;
338 		status = vchiq_open_service_internal(service, current->pid);
339 		if (status != VCHIQ_SUCCESS) {
340 			vchiq_remove_service(service->handle);
341 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
342 		}
343 	}
344 
345 failed:
346 	vchiq_log_trace(vchiq_core_log_level,
347 		"%s(%p): returning %d", __func__, instance, status);
348 
349 	return status;
350 }
351 EXPORT_SYMBOL(vchiq_open_service);
352 
353 enum vchiq_status
vchiq_bulk_transmit(unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)354 vchiq_bulk_transmit(unsigned int handle, const void *data,
355 	unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
356 {
357 	enum vchiq_status status;
358 
359 	while (1) {
360 		switch (mode) {
361 		case VCHIQ_BULK_MODE_NOCALLBACK:
362 		case VCHIQ_BULK_MODE_CALLBACK:
363 			status = vchiq_bulk_transfer(handle,
364 						     (void *)data, NULL,
365 						     size, userdata, mode,
366 						     VCHIQ_BULK_TRANSMIT);
367 			break;
368 		case VCHIQ_BULK_MODE_BLOCKING:
369 			status = vchiq_blocking_bulk_transfer(handle,
370 				(void *)data, size, VCHIQ_BULK_TRANSMIT);
371 			break;
372 		default:
373 			return VCHIQ_ERROR;
374 		}
375 
376 		/*
377 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
378 		 * to implement a retry mechanism since this function is
379 		 * supposed to block until queued
380 		 */
381 		if (status != VCHIQ_RETRY)
382 			break;
383 
384 		msleep(1);
385 	}
386 
387 	return status;
388 }
389 EXPORT_SYMBOL(vchiq_bulk_transmit);
390 
vchiq_bulk_receive(unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)391 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
392 				     unsigned int size, void *userdata,
393 				     enum vchiq_bulk_mode mode)
394 {
395 	enum vchiq_status status;
396 
397 	while (1) {
398 		switch (mode) {
399 		case VCHIQ_BULK_MODE_NOCALLBACK:
400 		case VCHIQ_BULK_MODE_CALLBACK:
401 			status = vchiq_bulk_transfer(handle, data, NULL,
402 						     size, userdata,
403 						     mode, VCHIQ_BULK_RECEIVE);
404 			break;
405 		case VCHIQ_BULK_MODE_BLOCKING:
406 			status = vchiq_blocking_bulk_transfer(handle,
407 				(void *)data, size, VCHIQ_BULK_RECEIVE);
408 			break;
409 		default:
410 			return VCHIQ_ERROR;
411 		}
412 
413 		/*
414 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
415 		 * to implement a retry mechanism since this function is
416 		 * supposed to block until queued
417 		 */
418 		if (status != VCHIQ_RETRY)
419 			break;
420 
421 		msleep(1);
422 	}
423 
424 	return status;
425 }
426 EXPORT_SYMBOL(vchiq_bulk_receive);
427 
428 static enum vchiq_status
vchiq_blocking_bulk_transfer(unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)429 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
430 	unsigned int size, enum vchiq_bulk_dir dir)
431 {
432 	struct vchiq_instance *instance;
433 	struct vchiq_service *service;
434 	enum vchiq_status status;
435 	struct bulk_waiter_node *waiter = NULL;
436 	bool found = false;
437 
438 	service = find_service_by_handle(handle);
439 	if (!service)
440 		return VCHIQ_ERROR;
441 
442 	instance = service->instance;
443 
444 	unlock_service(service);
445 
446 	mutex_lock(&instance->bulk_waiter_list_mutex);
447 	list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
448 		if (waiter->pid == current->pid) {
449 			list_del(&waiter->list);
450 			found = true;
451 			break;
452 		}
453 	}
454 	mutex_unlock(&instance->bulk_waiter_list_mutex);
455 
456 	if (found) {
457 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
458 
459 		if (bulk) {
460 			/* This thread has an outstanding bulk transfer. */
461 			/* FIXME: why compare a dma address to a pointer? */
462 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
463 				(bulk->size != size)) {
464 				/* This is not a retry of the previous one.
465 				 * Cancel the signal when the transfer
466 				 * completes.
467 				 */
468 				spin_lock(&bulk_waiter_spinlock);
469 				bulk->userdata = NULL;
470 				spin_unlock(&bulk_waiter_spinlock);
471 			}
472 		}
473 	} else {
474 		waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
475 		if (!waiter) {
476 			vchiq_log_error(vchiq_core_log_level,
477 				"%s - out of memory", __func__);
478 			return VCHIQ_ERROR;
479 		}
480 	}
481 
482 	status = vchiq_bulk_transfer(handle, data, NULL, size,
483 				     &waiter->bulk_waiter,
484 				     VCHIQ_BULK_MODE_BLOCKING, dir);
485 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
486 		!waiter->bulk_waiter.bulk) {
487 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
488 
489 		if (bulk) {
490 			/* Cancel the signal when the transfer
491 			 * completes.
492 			 */
493 			spin_lock(&bulk_waiter_spinlock);
494 			bulk->userdata = NULL;
495 			spin_unlock(&bulk_waiter_spinlock);
496 		}
497 		kfree(waiter);
498 	} else {
499 		waiter->pid = current->pid;
500 		mutex_lock(&instance->bulk_waiter_list_mutex);
501 		list_add(&waiter->list, &instance->bulk_waiter_list);
502 		mutex_unlock(&instance->bulk_waiter_list_mutex);
503 		vchiq_log_info(vchiq_arm_log_level,
504 				"saved bulk_waiter %pK for pid %d",
505 				waiter, current->pid);
506 	}
507 
508 	return status;
509 }
510 /****************************************************************************
511 *
512 *   add_completion
513 *
514 ***************************************************************************/
515 
516 static enum vchiq_status
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)517 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
518 	       struct vchiq_header *header, struct user_service *user_service,
519 	       void *bulk_userdata)
520 {
521 	struct vchiq_completion_data_kernel *completion;
522 	int insert;
523 
524 	DEBUG_INITIALISE(g_state.local)
525 
526 	insert = instance->completion_insert;
527 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
528 		/* Out of space - wait for the client */
529 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
530 		vchiq_log_trace(vchiq_arm_log_level,
531 			"%s - completion queue full", __func__);
532 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
533 		if (wait_for_completion_interruptible(
534 					&instance->remove_event)) {
535 			vchiq_log_info(vchiq_arm_log_level,
536 				"service_callback interrupted");
537 			return VCHIQ_RETRY;
538 		} else if (instance->closing) {
539 			vchiq_log_info(vchiq_arm_log_level,
540 				"service_callback closing");
541 			return VCHIQ_SUCCESS;
542 		}
543 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
544 	}
545 
546 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
547 
548 	completion->header = header;
549 	completion->reason = reason;
550 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
551 	completion->service_userdata = user_service->service;
552 	completion->bulk_userdata = bulk_userdata;
553 
554 	if (reason == VCHIQ_SERVICE_CLOSED) {
555 		/* Take an extra reference, to be held until
556 		   this CLOSED notification is delivered. */
557 		lock_service(user_service->service);
558 		if (instance->use_close_delivered)
559 			user_service->close_pending = 1;
560 	}
561 
562 	/* A write barrier is needed here to ensure that the entire completion
563 		record is written out before the insert point. */
564 	wmb();
565 
566 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
567 		user_service->message_available_pos = insert;
568 
569 	insert++;
570 	instance->completion_insert = insert;
571 
572 	complete(&instance->insert_event);
573 
574 	return VCHIQ_SUCCESS;
575 }
576 
577 /****************************************************************************
578 *
579 *   service_callback
580 *
581 ***************************************************************************/
582 
583 static enum vchiq_status
service_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)584 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
585 		 unsigned int handle, void *bulk_userdata)
586 {
587 	/* How do we ensure the callback goes to the right client?
588 	** The service_user data points to a user_service record
589 	** containing the original callback and the user state structure, which
590 	** contains a circular buffer for completion records.
591 	*/
592 	struct user_service *user_service;
593 	struct vchiq_service *service;
594 	struct vchiq_instance *instance;
595 	bool skip_completion = false;
596 
597 	DEBUG_INITIALISE(g_state.local)
598 
599 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
600 
601 	service = handle_to_service(handle);
602 	BUG_ON(!service);
603 	user_service = (struct user_service *)service->base.userdata;
604 	instance = user_service->instance;
605 
606 	if (!instance || instance->closing)
607 		return VCHIQ_SUCCESS;
608 
609 	vchiq_log_trace(vchiq_arm_log_level,
610 		"%s - service %lx(%d,%p), reason %d, header %lx, "
611 		"instance %lx, bulk_userdata %lx",
612 		__func__, (unsigned long)user_service,
613 		service->localport, user_service->userdata,
614 		reason, (unsigned long)header,
615 		(unsigned long)instance, (unsigned long)bulk_userdata);
616 
617 	if (header && user_service->is_vchi) {
618 		spin_lock(&msg_queue_spinlock);
619 		while (user_service->msg_insert ==
620 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
621 			spin_unlock(&msg_queue_spinlock);
622 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
623 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
624 			vchiq_log_trace(vchiq_arm_log_level,
625 				"service_callback - msg queue full");
626 			/* If there is no MESSAGE_AVAILABLE in the completion
627 			** queue, add one
628 			*/
629 			if ((user_service->message_available_pos -
630 				instance->completion_remove) < 0) {
631 				enum vchiq_status status;
632 
633 				vchiq_log_info(vchiq_arm_log_level,
634 					"Inserting extra MESSAGE_AVAILABLE");
635 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
636 				status = add_completion(instance, reason,
637 					NULL, user_service, bulk_userdata);
638 				if (status != VCHIQ_SUCCESS) {
639 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
640 					return status;
641 				}
642 			}
643 
644 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
645 			if (wait_for_completion_interruptible(
646 						&user_service->remove_event)) {
647 				vchiq_log_info(vchiq_arm_log_level,
648 					"%s interrupted", __func__);
649 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
650 				return VCHIQ_RETRY;
651 			} else if (instance->closing) {
652 				vchiq_log_info(vchiq_arm_log_level,
653 					"%s closing", __func__);
654 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
655 				return VCHIQ_ERROR;
656 			}
657 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
658 			spin_lock(&msg_queue_spinlock);
659 		}
660 
661 		user_service->msg_queue[user_service->msg_insert &
662 			(MSG_QUEUE_SIZE - 1)] = header;
663 		user_service->msg_insert++;
664 
665 		/* If there is a thread waiting in DEQUEUE_MESSAGE, or if
666 		** there is a MESSAGE_AVAILABLE in the completion queue then
667 		** bypass the completion queue.
668 		*/
669 		if (((user_service->message_available_pos -
670 			instance->completion_remove) >= 0) ||
671 			user_service->dequeue_pending) {
672 			user_service->dequeue_pending = 0;
673 			skip_completion = true;
674 		}
675 
676 		spin_unlock(&msg_queue_spinlock);
677 		complete(&user_service->insert_event);
678 
679 		header = NULL;
680 	}
681 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
682 
683 	if (skip_completion)
684 		return VCHIQ_SUCCESS;
685 
686 	return add_completion(instance, reason, header, user_service,
687 		bulk_userdata);
688 }
689 
690 /****************************************************************************
691 *
692 *   user_service_free
693 *
694 ***************************************************************************/
695 static void
user_service_free(void * userdata)696 user_service_free(void *userdata)
697 {
698 	kfree(userdata);
699 }
700 
701 /****************************************************************************
702 *
703 *   close_delivered
704 *
705 ***************************************************************************/
close_delivered(struct user_service * user_service)706 static void close_delivered(struct user_service *user_service)
707 {
708 	vchiq_log_info(vchiq_arm_log_level,
709 		"%s(handle=%x)",
710 		__func__, user_service->service->handle);
711 
712 	if (user_service->close_pending) {
713 		/* Allow the underlying service to be culled */
714 		unlock_service(user_service->service);
715 
716 		/* Wake the user-thread blocked in close_ or remove_service */
717 		complete(&user_service->close_event);
718 
719 		user_service->close_pending = 0;
720 	}
721 }
722 
723 struct vchiq_io_copy_callback_context {
724 	struct vchiq_element *element;
725 	size_t element_offset;
726 	unsigned long elements_to_go;
727 };
728 
vchiq_ioc_copy_element_data(void * context,void * dest,size_t offset,size_t maxsize)729 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
730 					   size_t offset, size_t maxsize)
731 {
732 	struct vchiq_io_copy_callback_context *cc = context;
733 	size_t total_bytes_copied = 0;
734 	size_t bytes_this_round;
735 
736 	while (total_bytes_copied < maxsize) {
737 		if (!cc->elements_to_go)
738 			return total_bytes_copied;
739 
740 		if (!cc->element->size) {
741 			cc->elements_to_go--;
742 			cc->element++;
743 			cc->element_offset = 0;
744 			continue;
745 		}
746 
747 		bytes_this_round = min(cc->element->size - cc->element_offset,
748 				       maxsize - total_bytes_copied);
749 
750 		if (copy_from_user(dest + total_bytes_copied,
751 				  cc->element->data + cc->element_offset,
752 				  bytes_this_round))
753 			return -EFAULT;
754 
755 		cc->element_offset += bytes_this_round;
756 		total_bytes_copied += bytes_this_round;
757 
758 		if (cc->element_offset == cc->element->size) {
759 			cc->elements_to_go--;
760 			cc->element++;
761 			cc->element_offset = 0;
762 		}
763 	}
764 
765 	return maxsize;
766 }
767 
768 /**************************************************************************
769  *
770  *   vchiq_ioc_queue_message
771  *
772  **************************************************************************/
773 static int
vchiq_ioc_queue_message(unsigned int handle,struct vchiq_element * elements,unsigned long count)774 vchiq_ioc_queue_message(unsigned int handle,
775 			struct vchiq_element *elements,
776 			unsigned long count)
777 {
778 	struct vchiq_io_copy_callback_context context;
779 	enum vchiq_status status = VCHIQ_SUCCESS;
780 	unsigned long i;
781 	size_t total_size = 0;
782 
783 	context.element = elements;
784 	context.element_offset = 0;
785 	context.elements_to_go = count;
786 
787 	for (i = 0; i < count; i++) {
788 		if (!elements[i].data && elements[i].size != 0)
789 			return -EFAULT;
790 
791 		total_size += elements[i].size;
792 	}
793 
794 	status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
795 				     &context, total_size);
796 
797 	if (status == VCHIQ_ERROR)
798 		return -EIO;
799 	else if (status == VCHIQ_RETRY)
800 		return -EINTR;
801 	return 0;
802 }
803 
vchiq_ioc_create_service(struct vchiq_instance * instance,struct vchiq_create_service * args)804 static int vchiq_ioc_create_service(struct vchiq_instance *instance,
805 				    struct vchiq_create_service *args)
806 {
807 	struct user_service *user_service = NULL;
808 	struct vchiq_service *service;
809 	enum vchiq_status status = VCHIQ_SUCCESS;
810 	struct vchiq_service_params_kernel params;
811 	int srvstate;
812 
813 	user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
814 	if (!user_service)
815 		return -ENOMEM;
816 
817 	if (args->is_open) {
818 		if (!instance->connected) {
819 			kfree(user_service);
820 			return -ENOTCONN;
821 		}
822 		srvstate = VCHIQ_SRVSTATE_OPENING;
823 	} else {
824 		srvstate = instance->connected ?
825 			 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
826 	}
827 
828 	params = (struct vchiq_service_params_kernel) {
829 		.fourcc   = args->params.fourcc,
830 		.callback = service_callback,
831 		.userdata = user_service,
832 		.version  = args->params.version,
833 		.version_min = args->params.version_min,
834 	};
835 	service = vchiq_add_service_internal(instance->state, &params,
836 					     srvstate, instance,
837 					     user_service_free);
838 	if (!service) {
839 		kfree(user_service);
840 		return -EEXIST;
841 	}
842 
843 	user_service->service = service;
844 	user_service->userdata = args->params.userdata;
845 	user_service->instance = instance;
846 	user_service->is_vchi = (args->is_vchi != 0);
847 	user_service->dequeue_pending = 0;
848 	user_service->close_pending = 0;
849 	user_service->message_available_pos = instance->completion_remove - 1;
850 	user_service->msg_insert = 0;
851 	user_service->msg_remove = 0;
852 	init_completion(&user_service->insert_event);
853 	init_completion(&user_service->remove_event);
854 	init_completion(&user_service->close_event);
855 
856 	if (args->is_open) {
857 		status = vchiq_open_service_internal(service, instance->pid);
858 		if (status != VCHIQ_SUCCESS) {
859 			vchiq_remove_service(service->handle);
860 			return (status == VCHIQ_RETRY) ?
861 				-EINTR : -EIO;
862 		}
863 	}
864 	args->handle = service->handle;
865 
866 	return 0;
867 }
868 
vchiq_ioc_dequeue_message(struct vchiq_instance * instance,struct vchiq_dequeue_message * args)869 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
870 				     struct vchiq_dequeue_message *args)
871 {
872 	struct user_service *user_service;
873 	struct vchiq_service *service;
874 	struct vchiq_header *header;
875 	int ret;
876 
877 	DEBUG_INITIALISE(g_state.local)
878 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
879 	service = find_service_for_instance(instance, args->handle);
880 	if (!service)
881 		return -EINVAL;
882 
883 	user_service = (struct user_service *)service->base.userdata;
884 	if (user_service->is_vchi == 0) {
885 		ret = -EINVAL;
886 		goto out;
887 	}
888 
889 	spin_lock(&msg_queue_spinlock);
890 	if (user_service->msg_remove == user_service->msg_insert) {
891 		if (!args->blocking) {
892 			spin_unlock(&msg_queue_spinlock);
893 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
894 			ret = -EWOULDBLOCK;
895 			goto out;
896 		}
897 		user_service->dequeue_pending = 1;
898 		ret = 0;
899 		do {
900 			spin_unlock(&msg_queue_spinlock);
901 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
902 			if (wait_for_completion_interruptible(
903 				&user_service->insert_event)) {
904 				vchiq_log_info(vchiq_arm_log_level,
905 					"DEQUEUE_MESSAGE interrupted");
906 				ret = -EINTR;
907 				break;
908 			}
909 			spin_lock(&msg_queue_spinlock);
910 		} while (user_service->msg_remove ==
911 			user_service->msg_insert);
912 
913 		if (ret)
914 			goto out;
915 	}
916 
917 	BUG_ON((int)(user_service->msg_insert -
918 		user_service->msg_remove) < 0);
919 
920 	header = user_service->msg_queue[user_service->msg_remove &
921 		(MSG_QUEUE_SIZE - 1)];
922 	user_service->msg_remove++;
923 	spin_unlock(&msg_queue_spinlock);
924 
925 	complete(&user_service->remove_event);
926 	if (!header) {
927 		ret = -ENOTCONN;
928 	} else if (header->size <= args->bufsize) {
929 		/* Copy to user space if msgbuf is not NULL */
930 		if (!args->buf || (copy_to_user(args->buf,
931 					header->data, header->size) == 0)) {
932 			ret = header->size;
933 			vchiq_release_message(service->handle, header);
934 		} else
935 			ret = -EFAULT;
936 	} else {
937 		vchiq_log_error(vchiq_arm_log_level,
938 			"header %pK: bufsize %x < size %x",
939 			header, args->bufsize, header->size);
940 		WARN(1, "invalid size\n");
941 		ret = -EMSGSIZE;
942 	}
943 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
944 out:
945 	unlock_service(service);
946 	return ret;
947 }
948 
vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance * instance,struct vchiq_queue_bulk_transfer * args,enum vchiq_bulk_dir dir,enum vchiq_bulk_mode __user * mode)949 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
950 				      struct vchiq_queue_bulk_transfer *args,
951 				      enum vchiq_bulk_dir dir,
952 				      enum vchiq_bulk_mode __user *mode)
953 {
954 	struct vchiq_service *service;
955 	struct bulk_waiter_node *waiter = NULL;
956 	bool found = false;
957 	void *userdata;
958 	int status = 0;
959 	int ret;
960 
961 	service = find_service_for_instance(instance, args->handle);
962 	if (!service)
963 		return -EINVAL;
964 
965 	if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
966 		waiter = kzalloc(sizeof(struct bulk_waiter_node),
967 			GFP_KERNEL);
968 		if (!waiter) {
969 			ret = -ENOMEM;
970 			goto out;
971 		}
972 
973 		userdata = &waiter->bulk_waiter;
974 	} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
975 		mutex_lock(&instance->bulk_waiter_list_mutex);
976 		list_for_each_entry(waiter, &instance->bulk_waiter_list,
977 				    list) {
978 			if (waiter->pid == current->pid) {
979 				list_del(&waiter->list);
980 				found = true;
981 				break;
982 			}
983 		}
984 		mutex_unlock(&instance->bulk_waiter_list_mutex);
985 		if (!found) {
986 			vchiq_log_error(vchiq_arm_log_level,
987 				"no bulk_waiter found for pid %d",
988 				current->pid);
989 			ret = -ESRCH;
990 			goto out;
991 		}
992 		vchiq_log_info(vchiq_arm_log_level,
993 			"found bulk_waiter %pK for pid %d", waiter,
994 			current->pid);
995 		userdata = &waiter->bulk_waiter;
996 	} else {
997 		userdata = args->userdata;
998 	}
999 
1000 	/*
1001 	 * FIXME address space mismatch:
1002 	 * args->data may be interpreted as a kernel pointer
1003 	 * in create_pagelist() called from vchiq_bulk_transfer(),
1004 	 * accessing kernel data instead of user space, based on the
1005 	 * address.
1006 	 */
1007 	status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
1008 				     userdata, args->mode, dir);
1009 
1010 	if (!waiter) {
1011 		ret = 0;
1012 		goto out;
1013 	}
1014 
1015 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1016 		!waiter->bulk_waiter.bulk) {
1017 		if (waiter->bulk_waiter.bulk) {
1018 			/* Cancel the signal when the transfer
1019 			** completes. */
1020 			spin_lock(&bulk_waiter_spinlock);
1021 			waiter->bulk_waiter.bulk->userdata = NULL;
1022 			spin_unlock(&bulk_waiter_spinlock);
1023 		}
1024 		kfree(waiter);
1025 		ret = 0;
1026 	} else {
1027 		const enum vchiq_bulk_mode mode_waiting =
1028 			VCHIQ_BULK_MODE_WAITING;
1029 		waiter->pid = current->pid;
1030 		mutex_lock(&instance->bulk_waiter_list_mutex);
1031 		list_add(&waiter->list, &instance->bulk_waiter_list);
1032 		mutex_unlock(&instance->bulk_waiter_list_mutex);
1033 		vchiq_log_info(vchiq_arm_log_level,
1034 			"saved bulk_waiter %pK for pid %d",
1035 			waiter, current->pid);
1036 
1037 		ret = put_user(mode_waiting, mode);
1038 	}
1039 out:
1040 	unlock_service(service);
1041 	if (ret)
1042 		return ret;
1043 	else if (status == VCHIQ_ERROR)
1044 		return -EIO;
1045 	else if (status == VCHIQ_RETRY)
1046 		return -EINTR;
1047 	return 0;
1048 }
1049 
1050 /* read a user pointer value from an array pointers in user space */
vchiq_get_user_ptr(void __user ** buf,void __user * ubuf,int index)1051 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
1052 {
1053 	int ret;
1054 
1055 	if (in_compat_syscall()) {
1056 		compat_uptr_t ptr32;
1057 		compat_uptr_t __user *uptr = ubuf;
1058 		ret = get_user(ptr32, uptr + index);
1059 		*buf = compat_ptr(ptr32);
1060 	} else {
1061 		uintptr_t ptr, __user *uptr = ubuf;
1062 		ret = get_user(ptr, uptr + index);
1063 		*buf = (void __user *)ptr;
1064 	}
1065 
1066 	return ret;
1067 }
1068 
1069 struct vchiq_completion_data32 {
1070 	enum vchiq_reason reason;
1071 	compat_uptr_t header;
1072 	compat_uptr_t service_userdata;
1073 	compat_uptr_t bulk_userdata;
1074 };
1075 
vchiq_put_completion(struct vchiq_completion_data __user * buf,struct vchiq_completion_data * completion,int index)1076 static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
1077 				struct vchiq_completion_data *completion,
1078 				int index)
1079 {
1080 	struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
1081 
1082 	if (in_compat_syscall()) {
1083 		struct vchiq_completion_data32 tmp = {
1084 			.reason		  = completion->reason,
1085 			.header		  = ptr_to_compat(completion->header),
1086 			.service_userdata = ptr_to_compat(completion->service_userdata),
1087 			.bulk_userdata	  = ptr_to_compat(completion->bulk_userdata),
1088 		};
1089 		if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
1090 			return -EFAULT;
1091 	} else {
1092 		if (copy_to_user(&buf[index], completion, sizeof(*completion)))
1093 			return -EFAULT;
1094 	}
1095 
1096 	return 0;
1097 }
1098 
vchiq_ioc_await_completion(struct vchiq_instance * instance,struct vchiq_await_completion * args,int __user * msgbufcountp)1099 static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
1100 				      struct vchiq_await_completion *args,
1101 				      int __user *msgbufcountp)
1102 {
1103 	int msgbufcount;
1104 	int remove;
1105 	int ret;
1106 
1107 	DEBUG_INITIALISE(g_state.local)
1108 
1109 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1110 	if (!instance->connected) {
1111 		return -ENOTCONN;
1112 	}
1113 
1114 	mutex_lock(&instance->completion_mutex);
1115 
1116 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1117 	while ((instance->completion_remove ==
1118 		instance->completion_insert)
1119 		&& !instance->closing) {
1120 		int rc;
1121 
1122 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1123 		mutex_unlock(&instance->completion_mutex);
1124 		rc = wait_for_completion_interruptible(
1125 					&instance->insert_event);
1126 		mutex_lock(&instance->completion_mutex);
1127 		if (rc) {
1128 			DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1129 			vchiq_log_info(vchiq_arm_log_level,
1130 				"AWAIT_COMPLETION interrupted");
1131 			ret = -EINTR;
1132 			goto out;
1133 		}
1134 	}
1135 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1136 
1137 	msgbufcount = args->msgbufcount;
1138 	remove = instance->completion_remove;
1139 
1140 	for (ret = 0; ret < args->count; ret++) {
1141 		struct vchiq_completion_data_kernel *completion;
1142 		struct vchiq_completion_data user_completion;
1143 		struct vchiq_service *service;
1144 		struct user_service *user_service;
1145 		struct vchiq_header *header;
1146 
1147 		if (remove == instance->completion_insert)
1148 			break;
1149 
1150 		completion = &instance->completions[
1151 			remove & (MAX_COMPLETIONS - 1)];
1152 
1153 		/*
1154 		 * A read memory barrier is needed to stop
1155 		 * prefetch of a stale completion record
1156 		 */
1157 		rmb();
1158 
1159 		service = completion->service_userdata;
1160 		user_service = service->base.userdata;
1161 
1162 		memset(&user_completion, 0, sizeof(user_completion));
1163 		user_completion = (struct vchiq_completion_data) {
1164 			.reason = completion->reason,
1165 			.service_userdata = user_service->userdata,
1166 		};
1167 
1168 		header = completion->header;
1169 		if (header) {
1170 			void __user *msgbuf;
1171 			int msglen;
1172 
1173 			msglen = header->size + sizeof(struct vchiq_header);
1174 			/* This must be a VCHIQ-style service */
1175 			if (args->msgbufsize < msglen) {
1176 				vchiq_log_error(vchiq_arm_log_level,
1177 					"header %pK: msgbufsize %x < msglen %x",
1178 					header, args->msgbufsize, msglen);
1179 				WARN(1, "invalid message size\n");
1180 				if (ret == 0)
1181 					ret = -EMSGSIZE;
1182 				break;
1183 			}
1184 			if (msgbufcount <= 0)
1185 				/* Stall here for lack of a
1186 				** buffer for the message. */
1187 				break;
1188 			/* Get the pointer from user space */
1189 			msgbufcount--;
1190 			if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
1191 						msgbufcount)) {
1192 				if (ret == 0)
1193 					ret = -EFAULT;
1194 				break;
1195 			}
1196 
1197 			/* Copy the message to user space */
1198 			if (copy_to_user(msgbuf, header, msglen)) {
1199 				if (ret == 0)
1200 					ret = -EFAULT;
1201 				break;
1202 			}
1203 
1204 			/* Now it has been copied, the message
1205 			** can be released. */
1206 			vchiq_release_message(service->handle, header);
1207 
1208 			/* The completion must point to the
1209 			** msgbuf. */
1210 			user_completion.header = msgbuf;
1211 		}
1212 
1213 		if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
1214 		    !instance->use_close_delivered)
1215 			unlock_service(service);
1216 
1217 		/*
1218 		 * FIXME: address space mismatch, does bulk_userdata
1219 		 * actually point to user or kernel memory?
1220 		 */
1221 		user_completion.bulk_userdata = completion->bulk_userdata;
1222 
1223 		if (vchiq_put_completion(args->buf, &user_completion, ret)) {
1224 			if (ret == 0)
1225 				ret = -EFAULT;
1226 			break;
1227 		}
1228 
1229 		/*
1230 		 * Ensure that the above copy has completed
1231 		 * before advancing the remove pointer.
1232 		 */
1233 		mb();
1234 		remove++;
1235 		instance->completion_remove = remove;
1236 	}
1237 
1238 	if (msgbufcount != args->msgbufcount) {
1239 		if (put_user(msgbufcount, msgbufcountp))
1240 			ret = -EFAULT;
1241 	}
1242 out:
1243 	if (ret)
1244 		complete(&instance->remove_event);
1245 	mutex_unlock(&instance->completion_mutex);
1246 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1247 
1248 	return ret;
1249 }
1250 
1251 /****************************************************************************
1252 *
1253 *   vchiq_ioctl
1254 *
1255 ***************************************************************************/
1256 static long
vchiq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1257 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1258 {
1259 	struct vchiq_instance *instance = file->private_data;
1260 	enum vchiq_status status = VCHIQ_SUCCESS;
1261 	struct vchiq_service *service = NULL;
1262 	long ret = 0;
1263 	int i, rc;
1264 
1265 	vchiq_log_trace(vchiq_arm_log_level,
1266 		"%s - instance %pK, cmd %s, arg %lx",
1267 		__func__, instance,
1268 		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
1269 		(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
1270 		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
1271 
1272 	switch (cmd) {
1273 	case VCHIQ_IOC_SHUTDOWN:
1274 		if (!instance->connected)
1275 			break;
1276 
1277 		/* Remove all services */
1278 		i = 0;
1279 		while ((service = next_service_by_instance(instance->state,
1280 			instance, &i))) {
1281 			status = vchiq_remove_service(service->handle);
1282 			unlock_service(service);
1283 			if (status != VCHIQ_SUCCESS)
1284 				break;
1285 		}
1286 		service = NULL;
1287 
1288 		if (status == VCHIQ_SUCCESS) {
1289 			/* Wake the completion thread and ask it to exit */
1290 			instance->closing = 1;
1291 			complete(&instance->insert_event);
1292 		}
1293 
1294 		break;
1295 
1296 	case VCHIQ_IOC_CONNECT:
1297 		if (instance->connected) {
1298 			ret = -EINVAL;
1299 			break;
1300 		}
1301 		rc = mutex_lock_killable(&instance->state->mutex);
1302 		if (rc) {
1303 			vchiq_log_error(vchiq_arm_log_level,
1304 				"vchiq: connect: could not lock mutex for "
1305 				"state %d: %d",
1306 				instance->state->id, rc);
1307 			ret = -EINTR;
1308 			break;
1309 		}
1310 		status = vchiq_connect_internal(instance->state, instance);
1311 		mutex_unlock(&instance->state->mutex);
1312 
1313 		if (status == VCHIQ_SUCCESS)
1314 			instance->connected = 1;
1315 		else
1316 			vchiq_log_error(vchiq_arm_log_level,
1317 				"vchiq: could not connect: %d", status);
1318 		break;
1319 
1320 	case VCHIQ_IOC_CREATE_SERVICE: {
1321 		struct vchiq_create_service __user *argp;
1322 		struct vchiq_create_service args;
1323 
1324 		argp = (void __user *)arg;
1325 		if (copy_from_user(&args, argp, sizeof(args))) {
1326 			ret = -EFAULT;
1327 			break;
1328 		}
1329 
1330 		ret = vchiq_ioc_create_service(instance, &args);
1331 		if (ret < 0)
1332 			break;
1333 
1334 		if (put_user(args.handle, &argp->handle)) {
1335 			vchiq_remove_service(args.handle);
1336 			ret = -EFAULT;
1337 		}
1338 	} break;
1339 
1340 	case VCHIQ_IOC_CLOSE_SERVICE:
1341 	case VCHIQ_IOC_REMOVE_SERVICE: {
1342 		unsigned int handle = (unsigned int)arg;
1343 		struct user_service *user_service;
1344 
1345 		service = find_service_for_instance(instance, handle);
1346 		if (!service) {
1347 			ret = -EINVAL;
1348 			break;
1349 		}
1350 
1351 		user_service = service->base.userdata;
1352 
1353 		/* close_pending is false on first entry, and when the
1354 		   wait in vchiq_close_service has been interrupted. */
1355 		if (!user_service->close_pending) {
1356 			status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
1357 				 vchiq_close_service(service->handle) :
1358 				 vchiq_remove_service(service->handle);
1359 			if (status != VCHIQ_SUCCESS)
1360 				break;
1361 		}
1362 
1363 		/* close_pending is true once the underlying service
1364 		   has been closed until the client library calls the
1365 		   CLOSE_DELIVERED ioctl, signalling close_event. */
1366 		if (user_service->close_pending &&
1367 			wait_for_completion_interruptible(
1368 				&user_service->close_event))
1369 			status = VCHIQ_RETRY;
1370 		break;
1371 	}
1372 
1373 	case VCHIQ_IOC_USE_SERVICE:
1374 	case VCHIQ_IOC_RELEASE_SERVICE:	{
1375 		unsigned int handle = (unsigned int)arg;
1376 
1377 		service = find_service_for_instance(instance, handle);
1378 		if (service) {
1379 			status = (cmd == VCHIQ_IOC_USE_SERVICE)	?
1380 				vchiq_use_service_internal(service) :
1381 				vchiq_release_service_internal(service);
1382 			if (status != VCHIQ_SUCCESS) {
1383 				vchiq_log_error(vchiq_susp_log_level,
1384 					"%s: cmd %s returned error %d for "
1385 					"service %c%c%c%c:%03d",
1386 					__func__,
1387 					(cmd == VCHIQ_IOC_USE_SERVICE) ?
1388 						"VCHIQ_IOC_USE_SERVICE" :
1389 						"VCHIQ_IOC_RELEASE_SERVICE",
1390 					status,
1391 					VCHIQ_FOURCC_AS_4CHARS(
1392 						service->base.fourcc),
1393 					service->client_id);
1394 				ret = -EINVAL;
1395 			}
1396 		} else
1397 			ret = -EINVAL;
1398 	} break;
1399 
1400 	case VCHIQ_IOC_QUEUE_MESSAGE: {
1401 		struct vchiq_queue_message args;
1402 
1403 		if (copy_from_user(&args, (const void __user *)arg,
1404 				   sizeof(args))) {
1405 			ret = -EFAULT;
1406 			break;
1407 		}
1408 
1409 		service = find_service_for_instance(instance, args.handle);
1410 
1411 		if (service && (args.count <= MAX_ELEMENTS)) {
1412 			/* Copy elements into kernel space */
1413 			struct vchiq_element elements[MAX_ELEMENTS];
1414 
1415 			if (copy_from_user(elements, args.elements,
1416 				args.count * sizeof(struct vchiq_element)) == 0)
1417 				ret = vchiq_ioc_queue_message(args.handle, elements,
1418 							      args.count);
1419 			else
1420 				ret = -EFAULT;
1421 		} else {
1422 			ret = -EINVAL;
1423 		}
1424 	} break;
1425 
1426 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1427 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1428 		struct vchiq_queue_bulk_transfer args;
1429 		struct vchiq_queue_bulk_transfer __user *argp;
1430 
1431 		enum vchiq_bulk_dir dir =
1432 			(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1433 			VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1434 
1435 		argp = (void __user *)arg;
1436 		if (copy_from_user(&args, argp, sizeof(args))) {
1437 			ret = -EFAULT;
1438 			break;
1439 		}
1440 
1441 		ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
1442 						 dir, &argp->mode);
1443 	} break;
1444 
1445 	case VCHIQ_IOC_AWAIT_COMPLETION: {
1446 		struct vchiq_await_completion args;
1447 		struct vchiq_await_completion __user *argp;
1448 
1449 		argp = (void __user *)arg;
1450 		if (copy_from_user(&args, argp, sizeof(args))) {
1451 			ret = -EFAULT;
1452 			break;
1453 		}
1454 
1455 		ret = vchiq_ioc_await_completion(instance, &args,
1456 						 &argp->msgbufcount);
1457 	} break;
1458 
1459 	case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1460 		struct vchiq_dequeue_message args;
1461 
1462 		if (copy_from_user(&args, (const void __user *)arg,
1463 				   sizeof(args))) {
1464 			ret = -EFAULT;
1465 			break;
1466 		}
1467 
1468 		ret = vchiq_ioc_dequeue_message(instance, &args);
1469 	} break;
1470 
1471 	case VCHIQ_IOC_GET_CLIENT_ID: {
1472 		unsigned int handle = (unsigned int)arg;
1473 
1474 		ret = vchiq_get_client_id(handle);
1475 	} break;
1476 
1477 	case VCHIQ_IOC_GET_CONFIG: {
1478 		struct vchiq_get_config args;
1479 		struct vchiq_config config;
1480 
1481 		if (copy_from_user(&args, (const void __user *)arg,
1482 				   sizeof(args))) {
1483 			ret = -EFAULT;
1484 			break;
1485 		}
1486 		if (args.config_size > sizeof(config)) {
1487 			ret = -EINVAL;
1488 			break;
1489 		}
1490 
1491 		vchiq_get_config(&config);
1492 		if (copy_to_user(args.pconfig, &config, args.config_size)) {
1493 			ret = -EFAULT;
1494 			break;
1495 		}
1496 	} break;
1497 
1498 	case VCHIQ_IOC_SET_SERVICE_OPTION: {
1499 		struct vchiq_set_service_option args;
1500 
1501 		if (copy_from_user(&args, (const void __user *)arg,
1502 				   sizeof(args))) {
1503 			ret = -EFAULT;
1504 			break;
1505 		}
1506 
1507 		service = find_service_for_instance(instance, args.handle);
1508 		if (!service) {
1509 			ret = -EINVAL;
1510 			break;
1511 		}
1512 
1513 		status = vchiq_set_service_option(
1514 				args.handle, args.option, args.value);
1515 	} break;
1516 
1517 	case VCHIQ_IOC_LIB_VERSION: {
1518 		unsigned int lib_version = (unsigned int)arg;
1519 
1520 		if (lib_version < VCHIQ_VERSION_MIN)
1521 			ret = -EINVAL;
1522 		else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1523 			instance->use_close_delivered = 1;
1524 	} break;
1525 
1526 	case VCHIQ_IOC_CLOSE_DELIVERED: {
1527 		unsigned int handle = (unsigned int)arg;
1528 
1529 		service = find_closed_service_for_instance(instance, handle);
1530 		if (service) {
1531 			struct user_service *user_service =
1532 				(struct user_service *)service->base.userdata;
1533 			close_delivered(user_service);
1534 		} else
1535 			ret = -EINVAL;
1536 	} break;
1537 
1538 	default:
1539 		ret = -ENOTTY;
1540 		break;
1541 	}
1542 
1543 	if (service)
1544 		unlock_service(service);
1545 
1546 	if (ret == 0) {
1547 		if (status == VCHIQ_ERROR)
1548 			ret = -EIO;
1549 		else if (status == VCHIQ_RETRY)
1550 			ret = -EINTR;
1551 	}
1552 
1553 	if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1554 		(ret != -EWOULDBLOCK))
1555 		vchiq_log_info(vchiq_arm_log_level,
1556 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1557 			instance,
1558 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1559 				ioctl_names[_IOC_NR(cmd)] :
1560 				"<invalid>",
1561 			status, ret);
1562 	else
1563 		vchiq_log_trace(vchiq_arm_log_level,
1564 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1565 			instance,
1566 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1567 				ioctl_names[_IOC_NR(cmd)] :
1568 				"<invalid>",
1569 			status, ret);
1570 
1571 	return ret;
1572 }
1573 
1574 #if defined(CONFIG_COMPAT)
1575 
1576 struct vchiq_service_params32 {
1577 	int fourcc;
1578 	compat_uptr_t callback;
1579 	compat_uptr_t userdata;
1580 	short version; /* Increment for non-trivial changes */
1581 	short version_min; /* Update for incompatible changes */
1582 };
1583 
1584 struct vchiq_create_service32 {
1585 	struct vchiq_service_params32 params;
1586 	int is_open;
1587 	int is_vchi;
1588 	unsigned int handle; /* OUT */
1589 };
1590 
1591 #define VCHIQ_IOC_CREATE_SERVICE32 \
1592 	_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1593 
1594 static long
vchiq_compat_ioctl_create_service(struct file * file,unsigned int cmd,struct vchiq_create_service32 __user * ptrargs32)1595 vchiq_compat_ioctl_create_service(
1596 	struct file *file,
1597 	unsigned int cmd,
1598 	struct vchiq_create_service32 __user *ptrargs32)
1599 {
1600 	struct vchiq_create_service args;
1601 	struct vchiq_create_service32 args32;
1602 	long ret;
1603 
1604 	if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1605 		return -EFAULT;
1606 
1607 	args = (struct vchiq_create_service) {
1608 		.params = {
1609 			.fourcc	     = args32.params.fourcc,
1610 			.callback    = compat_ptr(args32.params.callback),
1611 			.userdata    = compat_ptr(args32.params.userdata),
1612 			.version     = args32.params.version,
1613 			.version_min = args32.params.version_min,
1614 		},
1615 		.is_open = args32.is_open,
1616 		.is_vchi = args32.is_vchi,
1617 		.handle  = args32.handle,
1618 	};
1619 
1620 	ret = vchiq_ioc_create_service(file->private_data, &args);
1621 	if (ret < 0)
1622 		return ret;
1623 
1624 	if (put_user(args.handle, &ptrargs32->handle)) {
1625 		vchiq_remove_service(args.handle);
1626 		return -EFAULT;
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 struct vchiq_element32 {
1633 	compat_uptr_t data;
1634 	unsigned int size;
1635 };
1636 
1637 struct vchiq_queue_message32 {
1638 	unsigned int handle;
1639 	unsigned int count;
1640 	compat_uptr_t elements;
1641 };
1642 
1643 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1644 	_IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
1645 
1646 static long
vchiq_compat_ioctl_queue_message(struct file * file,unsigned int cmd,struct vchiq_queue_message32 __user * arg)1647 vchiq_compat_ioctl_queue_message(struct file *file,
1648 				 unsigned int cmd,
1649 				 struct vchiq_queue_message32 __user *arg)
1650 {
1651 	struct vchiq_queue_message args;
1652 	struct vchiq_queue_message32 args32;
1653 	struct vchiq_service *service;
1654 	int ret;
1655 
1656 	if (copy_from_user(&args32, arg, sizeof(args32)))
1657 		return -EFAULT;
1658 
1659 	args = (struct vchiq_queue_message) {
1660 		.handle   = args32.handle,
1661 		.count    = args32.count,
1662 		.elements = compat_ptr(args32.elements),
1663 	};
1664 
1665 	if (args32.count > MAX_ELEMENTS)
1666 		return -EINVAL;
1667 
1668 	service = find_service_for_instance(file->private_data, args.handle);
1669 	if (!service)
1670 		return -EINVAL;
1671 
1672 	if (args32.elements && args32.count) {
1673 		struct vchiq_element32 element32[MAX_ELEMENTS];
1674 		struct vchiq_element elements[MAX_ELEMENTS];
1675 		unsigned int count;
1676 
1677 		if (copy_from_user(&element32, args.elements,
1678 				   sizeof(element32))) {
1679 			unlock_service(service);
1680 			return -EFAULT;
1681 		}
1682 
1683 		for (count = 0; count < args32.count; count++) {
1684 			elements[count].data =
1685 				compat_ptr(element32[count].data);
1686 			elements[count].size = element32[count].size;
1687 		}
1688 		ret = vchiq_ioc_queue_message(args.handle, elements,
1689 					      args.count);
1690 	} else {
1691 		ret = -EINVAL;
1692 	}
1693 	unlock_service(service);
1694 
1695 	return ret;
1696 }
1697 
1698 struct vchiq_queue_bulk_transfer32 {
1699 	unsigned int handle;
1700 	compat_uptr_t data;
1701 	unsigned int size;
1702 	compat_uptr_t userdata;
1703 	enum vchiq_bulk_mode mode;
1704 };
1705 
1706 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1707 	_IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1708 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1709 	_IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1710 
1711 static long
vchiq_compat_ioctl_queue_bulk(struct file * file,unsigned int cmd,struct vchiq_queue_bulk_transfer32 __user * argp)1712 vchiq_compat_ioctl_queue_bulk(struct file *file,
1713 			      unsigned int cmd,
1714 			      struct vchiq_queue_bulk_transfer32 __user *argp)
1715 {
1716 	struct vchiq_queue_bulk_transfer32 args32;
1717 	struct vchiq_queue_bulk_transfer args;
1718 	enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
1719 				  VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1720 
1721 	if (copy_from_user(&args32, argp, sizeof(args32)))
1722 		return -EFAULT;
1723 
1724 	args = (struct vchiq_queue_bulk_transfer) {
1725 		.handle   = args32.handle,
1726 		.data	  = compat_ptr(args32.data),
1727 		.size	  = args32.size,
1728 		.userdata = compat_ptr(args32.userdata),
1729 		.mode	  = args32.mode,
1730 	};
1731 
1732 	return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1733 					  dir, &argp->mode);
1734 }
1735 
1736 struct vchiq_await_completion32 {
1737 	unsigned int count;
1738 	compat_uptr_t buf;
1739 	unsigned int msgbufsize;
1740 	unsigned int msgbufcount; /* IN/OUT */
1741 	compat_uptr_t msgbufs;
1742 };
1743 
1744 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1745 	_IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1746 
1747 static long
vchiq_compat_ioctl_await_completion(struct file * file,unsigned int cmd,struct vchiq_await_completion32 __user * argp)1748 vchiq_compat_ioctl_await_completion(struct file *file,
1749 				    unsigned int cmd,
1750 				    struct vchiq_await_completion32 __user *argp)
1751 {
1752 	struct vchiq_await_completion args;
1753 	struct vchiq_await_completion32 args32;
1754 
1755 	if (copy_from_user(&args32, argp, sizeof(args32)))
1756 		return -EFAULT;
1757 
1758 	args = (struct vchiq_await_completion) {
1759 		.count		= args32.count,
1760 		.buf		= compat_ptr(args32.buf),
1761 		.msgbufsize	= args32.msgbufsize,
1762 		.msgbufcount	= args32.msgbufcount,
1763 		.msgbufs	= compat_ptr(args32.msgbufs),
1764 	};
1765 
1766 	return vchiq_ioc_await_completion(file->private_data, &args,
1767 					  &argp->msgbufcount);
1768 }
1769 
1770 struct vchiq_dequeue_message32 {
1771 	unsigned int handle;
1772 	int blocking;
1773 	unsigned int bufsize;
1774 	compat_uptr_t buf;
1775 };
1776 
1777 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1778 	_IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1779 
1780 static long
vchiq_compat_ioctl_dequeue_message(struct file * file,unsigned int cmd,struct vchiq_dequeue_message32 __user * arg)1781 vchiq_compat_ioctl_dequeue_message(struct file *file,
1782 				   unsigned int cmd,
1783 				   struct vchiq_dequeue_message32 __user *arg)
1784 {
1785 	struct vchiq_dequeue_message32 args32;
1786 	struct vchiq_dequeue_message args;
1787 
1788 	if (copy_from_user(&args32, arg, sizeof(args32)))
1789 		return -EFAULT;
1790 
1791 	args = (struct vchiq_dequeue_message) {
1792 		.handle		= args32.handle,
1793 		.blocking	= args32.blocking,
1794 		.bufsize	= args32.bufsize,
1795 		.buf		= compat_ptr(args32.buf),
1796 	};
1797 
1798 	return vchiq_ioc_dequeue_message(file->private_data, &args);
1799 }
1800 
1801 struct vchiq_get_config32 {
1802 	unsigned int config_size;
1803 	compat_uptr_t pconfig;
1804 };
1805 
1806 #define VCHIQ_IOC_GET_CONFIG32 \
1807 	_IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1808 
1809 static long
vchiq_compat_ioctl_get_config(struct file * file,unsigned int cmd,struct vchiq_get_config32 __user * arg)1810 vchiq_compat_ioctl_get_config(struct file *file,
1811 			      unsigned int cmd,
1812 			      struct vchiq_get_config32 __user *arg)
1813 {
1814 	struct vchiq_get_config32 args32;
1815 	struct vchiq_config config;
1816 	void __user *ptr;
1817 
1818 	if (copy_from_user(&args32, arg, sizeof(args32)))
1819 		return -EFAULT;
1820 	if (args32.config_size > sizeof(config))
1821 		return -EINVAL;
1822 
1823 	vchiq_get_config(&config);
1824 	ptr = compat_ptr(args32.pconfig);
1825 	if (copy_to_user(ptr, &config, args32.config_size))
1826 		return -EFAULT;
1827 
1828 	return 0;
1829 }
1830 
1831 static long
vchiq_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1832 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1833 {
1834 	void __user *argp = compat_ptr(arg);
1835 	switch (cmd) {
1836 	case VCHIQ_IOC_CREATE_SERVICE32:
1837 		return vchiq_compat_ioctl_create_service(file, cmd, argp);
1838 	case VCHIQ_IOC_QUEUE_MESSAGE32:
1839 		return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1840 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1841 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1842 		return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1843 	case VCHIQ_IOC_AWAIT_COMPLETION32:
1844 		return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1845 	case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1846 		return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1847 	case VCHIQ_IOC_GET_CONFIG32:
1848 		return vchiq_compat_ioctl_get_config(file, cmd, argp);
1849 	default:
1850 		return vchiq_ioctl(file, cmd, (unsigned long)argp);
1851 	}
1852 }
1853 
1854 #endif
1855 
vchiq_open(struct inode * inode,struct file * file)1856 static int vchiq_open(struct inode *inode, struct file *file)
1857 {
1858 	struct vchiq_state *state = vchiq_get_state();
1859 	struct vchiq_instance *instance;
1860 
1861 	vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1862 
1863 	if (!state) {
1864 		vchiq_log_error(vchiq_arm_log_level,
1865 				"vchiq has no connection to VideoCore");
1866 		return -ENOTCONN;
1867 	}
1868 
1869 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1870 	if (!instance)
1871 		return -ENOMEM;
1872 
1873 	instance->state = state;
1874 	instance->pid = current->tgid;
1875 
1876 	vchiq_debugfs_add_instance(instance);
1877 
1878 	init_completion(&instance->insert_event);
1879 	init_completion(&instance->remove_event);
1880 	mutex_init(&instance->completion_mutex);
1881 	mutex_init(&instance->bulk_waiter_list_mutex);
1882 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
1883 
1884 	file->private_data = instance;
1885 
1886 	return 0;
1887 }
1888 
vchiq_release(struct inode * inode,struct file * file)1889 static int vchiq_release(struct inode *inode, struct file *file)
1890 {
1891 	struct vchiq_instance *instance = file->private_data;
1892 	struct vchiq_state *state = vchiq_get_state();
1893 	struct vchiq_service *service;
1894 	int ret = 0;
1895 	int i;
1896 
1897 	vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1898 		       (unsigned long)instance);
1899 
1900 	if (!state) {
1901 		ret = -EPERM;
1902 		goto out;
1903 	}
1904 
1905 	/* Ensure videocore is awake to allow termination. */
1906 	vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1907 
1908 	mutex_lock(&instance->completion_mutex);
1909 
1910 	/* Wake the completion thread and ask it to exit */
1911 	instance->closing = 1;
1912 	complete(&instance->insert_event);
1913 
1914 	mutex_unlock(&instance->completion_mutex);
1915 
1916 	/* Wake the slot handler if the completion queue is full. */
1917 	complete(&instance->remove_event);
1918 
1919 	/* Mark all services for termination... */
1920 	i = 0;
1921 	while ((service = next_service_by_instance(state, instance, &i))) {
1922 		struct user_service *user_service = service->base.userdata;
1923 
1924 		/* Wake the slot handler if the msg queue is full. */
1925 		complete(&user_service->remove_event);
1926 
1927 		vchiq_terminate_service_internal(service);
1928 		unlock_service(service);
1929 	}
1930 
1931 	/* ...and wait for them to die */
1932 	i = 0;
1933 	while ((service = next_service_by_instance(state, instance, &i))) {
1934 		struct user_service *user_service = service->base.userdata;
1935 
1936 		wait_for_completion(&service->remove_event);
1937 
1938 		BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1939 
1940 		spin_lock(&msg_queue_spinlock);
1941 
1942 		while (user_service->msg_remove != user_service->msg_insert) {
1943 			struct vchiq_header *header;
1944 			int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1945 
1946 			header = user_service->msg_queue[m];
1947 			user_service->msg_remove++;
1948 			spin_unlock(&msg_queue_spinlock);
1949 
1950 			if (header)
1951 				vchiq_release_message(service->handle, header);
1952 			spin_lock(&msg_queue_spinlock);
1953 		}
1954 
1955 		spin_unlock(&msg_queue_spinlock);
1956 
1957 		unlock_service(service);
1958 	}
1959 
1960 	/* Release any closed services */
1961 	while (instance->completion_remove !=
1962 		instance->completion_insert) {
1963 		struct vchiq_completion_data_kernel *completion;
1964 		struct vchiq_service *service;
1965 
1966 		completion = &instance->completions[
1967 			instance->completion_remove & (MAX_COMPLETIONS - 1)];
1968 		service = completion->service_userdata;
1969 		if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1970 			struct user_service *user_service =
1971 							service->base.userdata;
1972 
1973 			/* Wake any blocked user-thread */
1974 			if (instance->use_close_delivered)
1975 				complete(&user_service->close_event);
1976 			unlock_service(service);
1977 		}
1978 		instance->completion_remove++;
1979 	}
1980 
1981 	/* Release the PEER service count. */
1982 	vchiq_release_internal(instance->state, NULL);
1983 
1984 	{
1985 		struct bulk_waiter_node *waiter, *next;
1986 
1987 		list_for_each_entry_safe(waiter, next,
1988 					 &instance->bulk_waiter_list, list) {
1989 			list_del(&waiter->list);
1990 			vchiq_log_info(vchiq_arm_log_level,
1991 				"bulk_waiter - cleaned up %pK for pid %d",
1992 				waiter, waiter->pid);
1993 			kfree(waiter);
1994 		}
1995 	}
1996 
1997 	vchiq_debugfs_remove_instance(instance);
1998 
1999 	kfree(instance);
2000 	file->private_data = NULL;
2001 
2002 out:
2003 	return ret;
2004 }
2005 
2006 /****************************************************************************
2007 *
2008 *   vchiq_dump
2009 *
2010 ***************************************************************************/
2011 
vchiq_dump(void * dump_context,const char * str,int len)2012 int vchiq_dump(void *dump_context, const char *str, int len)
2013 {
2014 	struct dump_context *context = (struct dump_context *)dump_context;
2015 	int copy_bytes;
2016 
2017 	if (context->actual >= context->space)
2018 		return 0;
2019 
2020 	if (context->offset > 0) {
2021 		int skip_bytes = min_t(int, len, context->offset);
2022 
2023 		str += skip_bytes;
2024 		len -= skip_bytes;
2025 		context->offset -= skip_bytes;
2026 		if (context->offset > 0)
2027 			return 0;
2028 	}
2029 	copy_bytes = min_t(int, len, context->space - context->actual);
2030 	if (copy_bytes == 0)
2031 		return 0;
2032 	if (copy_to_user(context->buf + context->actual, str,
2033 			 copy_bytes))
2034 		return -EFAULT;
2035 	context->actual += copy_bytes;
2036 	len -= copy_bytes;
2037 
2038 	/*
2039 	 * If the terminating NUL is included in the length, then it
2040 	 * marks the end of a line and should be replaced with a
2041 	 * carriage return.
2042 	 */
2043 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
2044 		char cr = '\n';
2045 
2046 		if (copy_to_user(context->buf + context->actual - 1,
2047 				 &cr, 1))
2048 			return -EFAULT;
2049 	}
2050 	return 0;
2051 }
2052 
2053 /****************************************************************************
2054 *
2055 *   vchiq_dump_platform_instance_state
2056 *
2057 ***************************************************************************/
2058 
vchiq_dump_platform_instances(void * dump_context)2059 int vchiq_dump_platform_instances(void *dump_context)
2060 {
2061 	struct vchiq_state *state = vchiq_get_state();
2062 	char buf[80];
2063 	int len;
2064 	int i;
2065 
2066 	/* There is no list of instances, so instead scan all services,
2067 		marking those that have been dumped. */
2068 
2069 	rcu_read_lock();
2070 	for (i = 0; i < state->unused_service; i++) {
2071 		struct vchiq_service *service;
2072 		struct vchiq_instance *instance;
2073 
2074 		service = rcu_dereference(state->services[i]);
2075 		if (!service || service->base.callback != service_callback)
2076 			continue;
2077 
2078 		instance = service->instance;
2079 		if (instance)
2080 			instance->mark = 0;
2081 	}
2082 	rcu_read_unlock();
2083 
2084 	for (i = 0; i < state->unused_service; i++) {
2085 		struct vchiq_service *service;
2086 		struct vchiq_instance *instance;
2087 		int err;
2088 
2089 		rcu_read_lock();
2090 		service = rcu_dereference(state->services[i]);
2091 		if (!service || service->base.callback != service_callback) {
2092 			rcu_read_unlock();
2093 			continue;
2094 		}
2095 
2096 		instance = service->instance;
2097 		if (!instance || instance->mark) {
2098 			rcu_read_unlock();
2099 			continue;
2100 		}
2101 		rcu_read_unlock();
2102 
2103 		len = snprintf(buf, sizeof(buf),
2104 			       "Instance %pK: pid %d,%s completions %d/%d",
2105 			       instance, instance->pid,
2106 			       instance->connected ? " connected, " :
2107 			       "",
2108 			       instance->completion_insert -
2109 			       instance->completion_remove,
2110 			       MAX_COMPLETIONS);
2111 		err = vchiq_dump(dump_context, buf, len + 1);
2112 		if (err)
2113 			return err;
2114 		instance->mark = 1;
2115 	}
2116 	return 0;
2117 }
2118 
2119 /****************************************************************************
2120 *
2121 *   vchiq_dump_platform_service_state
2122 *
2123 ***************************************************************************/
2124 
vchiq_dump_platform_service_state(void * dump_context,struct vchiq_service * service)2125 int vchiq_dump_platform_service_state(void *dump_context,
2126 				      struct vchiq_service *service)
2127 {
2128 	struct user_service *user_service =
2129 			(struct user_service *)service->base.userdata;
2130 	char buf[80];
2131 	int len;
2132 
2133 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
2134 
2135 	if ((service->base.callback == service_callback) &&
2136 		user_service->is_vchi) {
2137 		len += scnprintf(buf + len, sizeof(buf) - len,
2138 			", %d/%d messages",
2139 			user_service->msg_insert - user_service->msg_remove,
2140 			MSG_QUEUE_SIZE);
2141 
2142 		if (user_service->dequeue_pending)
2143 			len += scnprintf(buf + len, sizeof(buf) - len,
2144 				" (dequeue pending)");
2145 	}
2146 
2147 	return vchiq_dump(dump_context, buf, len + 1);
2148 }
2149 
2150 /****************************************************************************
2151 *
2152 *   vchiq_read
2153 *
2154 ***************************************************************************/
2155 
2156 static ssize_t
vchiq_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2157 vchiq_read(struct file *file, char __user *buf,
2158 	size_t count, loff_t *ppos)
2159 {
2160 	struct dump_context context;
2161 	int err;
2162 
2163 	context.buf = buf;
2164 	context.actual = 0;
2165 	context.space = count;
2166 	context.offset = *ppos;
2167 
2168 	err = vchiq_dump_state(&context, &g_state);
2169 	if (err)
2170 		return err;
2171 
2172 	*ppos += context.actual;
2173 
2174 	return context.actual;
2175 }
2176 
2177 struct vchiq_state *
vchiq_get_state(void)2178 vchiq_get_state(void)
2179 {
2180 
2181 	if (!g_state.remote)
2182 		printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2183 	else if (g_state.remote->initialised != 1)
2184 		printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2185 			__func__, g_state.remote->initialised);
2186 
2187 	return (g_state.remote &&
2188 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
2189 }
2190 
2191 static const struct file_operations
2192 vchiq_fops = {
2193 	.owner = THIS_MODULE,
2194 	.unlocked_ioctl = vchiq_ioctl,
2195 #if defined(CONFIG_COMPAT)
2196 	.compat_ioctl = vchiq_compat_ioctl,
2197 #endif
2198 	.open = vchiq_open,
2199 	.release = vchiq_release,
2200 	.read = vchiq_read
2201 };
2202 
2203 /*
2204  * Autosuspend related functionality
2205  */
2206 
2207 static enum vchiq_status
vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)2208 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
2209 	struct vchiq_header *header,
2210 	unsigned int service_user,
2211 	void *bulk_user)
2212 {
2213 	vchiq_log_error(vchiq_susp_log_level,
2214 		"%s callback reason %d", __func__, reason);
2215 	return 0;
2216 }
2217 
2218 static int
vchiq_keepalive_thread_func(void * v)2219 vchiq_keepalive_thread_func(void *v)
2220 {
2221 	struct vchiq_state *state = (struct vchiq_state *)v;
2222 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2223 
2224 	enum vchiq_status status;
2225 	struct vchiq_instance *instance;
2226 	unsigned int ka_handle;
2227 	int ret;
2228 
2229 	struct vchiq_service_params_kernel params = {
2230 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2231 		.callback    = vchiq_keepalive_vchiq_callback,
2232 		.version     = KEEPALIVE_VER,
2233 		.version_min = KEEPALIVE_VER_MIN
2234 	};
2235 
2236 	ret = vchiq_initialise(&instance);
2237 	if (ret) {
2238 		vchiq_log_error(vchiq_susp_log_level,
2239 			"%s vchiq_initialise failed %d", __func__, ret);
2240 		goto exit;
2241 	}
2242 
2243 	status = vchiq_connect(instance);
2244 	if (status != VCHIQ_SUCCESS) {
2245 		vchiq_log_error(vchiq_susp_log_level,
2246 			"%s vchiq_connect failed %d", __func__, status);
2247 		goto shutdown;
2248 	}
2249 
2250 	status = vchiq_add_service(instance, &params, &ka_handle);
2251 	if (status != VCHIQ_SUCCESS) {
2252 		vchiq_log_error(vchiq_susp_log_level,
2253 			"%s vchiq_open_service failed %d", __func__, status);
2254 		goto shutdown;
2255 	}
2256 
2257 	while (1) {
2258 		long rc = 0, uc = 0;
2259 
2260 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
2261 			vchiq_log_error(vchiq_susp_log_level,
2262 				"%s interrupted", __func__);
2263 			flush_signals(current);
2264 			continue;
2265 		}
2266 
2267 		/* read and clear counters.  Do release_count then use_count to
2268 		 * prevent getting more releases than uses */
2269 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
2270 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
2271 
2272 		/* Call use/release service the requisite number of times.
2273 		 * Process use before release so use counts don't go negative */
2274 		while (uc--) {
2275 			atomic_inc(&arm_state->ka_use_ack_count);
2276 			status = vchiq_use_service(ka_handle);
2277 			if (status != VCHIQ_SUCCESS) {
2278 				vchiq_log_error(vchiq_susp_log_level,
2279 					"%s vchiq_use_service error %d",
2280 					__func__, status);
2281 			}
2282 		}
2283 		while (rc--) {
2284 			status = vchiq_release_service(ka_handle);
2285 			if (status != VCHIQ_SUCCESS) {
2286 				vchiq_log_error(vchiq_susp_log_level,
2287 					"%s vchiq_release_service error %d",
2288 					__func__, status);
2289 			}
2290 		}
2291 	}
2292 
2293 shutdown:
2294 	vchiq_shutdown(instance);
2295 exit:
2296 	return 0;
2297 }
2298 
2299 enum vchiq_status
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)2300 vchiq_arm_init_state(struct vchiq_state *state,
2301 		     struct vchiq_arm_state *arm_state)
2302 {
2303 	if (arm_state) {
2304 		rwlock_init(&arm_state->susp_res_lock);
2305 
2306 		init_completion(&arm_state->ka_evt);
2307 		atomic_set(&arm_state->ka_use_count, 0);
2308 		atomic_set(&arm_state->ka_use_ack_count, 0);
2309 		atomic_set(&arm_state->ka_release_count, 0);
2310 
2311 		arm_state->state = state;
2312 		arm_state->first_connect = 0;
2313 
2314 	}
2315 	return VCHIQ_SUCCESS;
2316 }
2317 
2318 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)2319 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2320 		   enum USE_TYPE_E use_type)
2321 {
2322 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2323 	enum vchiq_status ret = VCHIQ_SUCCESS;
2324 	char entity[16];
2325 	int *entity_uc;
2326 	int local_uc;
2327 
2328 	if (!arm_state)
2329 		goto out;
2330 
2331 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2332 
2333 	if (use_type == USE_TYPE_VCHIQ) {
2334 		sprintf(entity, "VCHIQ:   ");
2335 		entity_uc = &arm_state->peer_use_count;
2336 	} else if (service) {
2337 		sprintf(entity, "%c%c%c%c:%03d",
2338 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2339 			service->client_id);
2340 		entity_uc = &service->service_use_count;
2341 	} else {
2342 		vchiq_log_error(vchiq_susp_log_level, "%s null service "
2343 				"ptr", __func__);
2344 		ret = VCHIQ_ERROR;
2345 		goto out;
2346 	}
2347 
2348 	write_lock_bh(&arm_state->susp_res_lock);
2349 	local_uc = ++arm_state->videocore_use_count;
2350 	++(*entity_uc);
2351 
2352 	vchiq_log_trace(vchiq_susp_log_level,
2353 		"%s %s count %d, state count %d",
2354 		__func__, entity, *entity_uc, local_uc);
2355 
2356 	write_unlock_bh(&arm_state->susp_res_lock);
2357 
2358 	if (ret == VCHIQ_SUCCESS) {
2359 		enum vchiq_status status = VCHIQ_SUCCESS;
2360 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2361 
2362 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2363 			/* Send the use notify to videocore */
2364 			status = vchiq_send_remote_use_active(state);
2365 			if (status == VCHIQ_SUCCESS)
2366 				ack_cnt--;
2367 			else
2368 				atomic_add(ack_cnt,
2369 					&arm_state->ka_use_ack_count);
2370 		}
2371 	}
2372 
2373 out:
2374 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2375 	return ret;
2376 }
2377 
2378 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)2379 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
2380 {
2381 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2382 	enum vchiq_status ret = VCHIQ_SUCCESS;
2383 	char entity[16];
2384 	int *entity_uc;
2385 
2386 	if (!arm_state)
2387 		goto out;
2388 
2389 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2390 
2391 	if (service) {
2392 		sprintf(entity, "%c%c%c%c:%03d",
2393 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2394 			service->client_id);
2395 		entity_uc = &service->service_use_count;
2396 	} else {
2397 		sprintf(entity, "PEER:   ");
2398 		entity_uc = &arm_state->peer_use_count;
2399 	}
2400 
2401 	write_lock_bh(&arm_state->susp_res_lock);
2402 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
2403 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
2404 		WARN_ON(!arm_state->videocore_use_count);
2405 		WARN_ON(!(*entity_uc));
2406 		ret = VCHIQ_ERROR;
2407 		goto unlock;
2408 	}
2409 	--arm_state->videocore_use_count;
2410 	--(*entity_uc);
2411 
2412 	vchiq_log_trace(vchiq_susp_log_level,
2413 		"%s %s count %d, state count %d",
2414 		__func__, entity, *entity_uc,
2415 		arm_state->videocore_use_count);
2416 
2417 unlock:
2418 	write_unlock_bh(&arm_state->susp_res_lock);
2419 
2420 out:
2421 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2422 	return ret;
2423 }
2424 
2425 void
vchiq_on_remote_use(struct vchiq_state * state)2426 vchiq_on_remote_use(struct vchiq_state *state)
2427 {
2428 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2429 
2430 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2431 	atomic_inc(&arm_state->ka_use_count);
2432 	complete(&arm_state->ka_evt);
2433 }
2434 
2435 void
vchiq_on_remote_release(struct vchiq_state * state)2436 vchiq_on_remote_release(struct vchiq_state *state)
2437 {
2438 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2439 
2440 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2441 	atomic_inc(&arm_state->ka_release_count);
2442 	complete(&arm_state->ka_evt);
2443 }
2444 
2445 enum vchiq_status
vchiq_use_service_internal(struct vchiq_service * service)2446 vchiq_use_service_internal(struct vchiq_service *service)
2447 {
2448 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2449 }
2450 
2451 enum vchiq_status
vchiq_release_service_internal(struct vchiq_service * service)2452 vchiq_release_service_internal(struct vchiq_service *service)
2453 {
2454 	return vchiq_release_internal(service->state, service);
2455 }
2456 
2457 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)2458 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
2459 {
2460 	return &instance->debugfs_node;
2461 }
2462 
2463 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)2464 vchiq_instance_get_use_count(struct vchiq_instance *instance)
2465 {
2466 	struct vchiq_service *service;
2467 	int use_count = 0, i;
2468 
2469 	i = 0;
2470 	rcu_read_lock();
2471 	while ((service = __next_service_by_instance(instance->state,
2472 						     instance, &i)))
2473 		use_count += service->service_use_count;
2474 	rcu_read_unlock();
2475 	return use_count;
2476 }
2477 
2478 int
vchiq_instance_get_pid(struct vchiq_instance * instance)2479 vchiq_instance_get_pid(struct vchiq_instance *instance)
2480 {
2481 	return instance->pid;
2482 }
2483 
2484 int
vchiq_instance_get_trace(struct vchiq_instance * instance)2485 vchiq_instance_get_trace(struct vchiq_instance *instance)
2486 {
2487 	return instance->trace;
2488 }
2489 
2490 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)2491 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
2492 {
2493 	struct vchiq_service *service;
2494 	int i;
2495 
2496 	i = 0;
2497 	rcu_read_lock();
2498 	while ((service = __next_service_by_instance(instance->state,
2499 						     instance, &i)))
2500 		service->trace = trace;
2501 	rcu_read_unlock();
2502 	instance->trace = (trace != 0);
2503 }
2504 
2505 enum vchiq_status
vchiq_use_service(unsigned int handle)2506 vchiq_use_service(unsigned int handle)
2507 {
2508 	enum vchiq_status ret = VCHIQ_ERROR;
2509 	struct vchiq_service *service = find_service_by_handle(handle);
2510 
2511 	if (service) {
2512 		ret = vchiq_use_internal(service->state, service,
2513 				USE_TYPE_SERVICE);
2514 		unlock_service(service);
2515 	}
2516 	return ret;
2517 }
2518 EXPORT_SYMBOL(vchiq_use_service);
2519 
2520 enum vchiq_status
vchiq_release_service(unsigned int handle)2521 vchiq_release_service(unsigned int handle)
2522 {
2523 	enum vchiq_status ret = VCHIQ_ERROR;
2524 	struct vchiq_service *service = find_service_by_handle(handle);
2525 
2526 	if (service) {
2527 		ret = vchiq_release_internal(service->state, service);
2528 		unlock_service(service);
2529 	}
2530 	return ret;
2531 }
2532 EXPORT_SYMBOL(vchiq_release_service);
2533 
2534 struct service_data_struct {
2535 	int fourcc;
2536 	int clientid;
2537 	int use_count;
2538 };
2539 
2540 void
vchiq_dump_service_use_state(struct vchiq_state * state)2541 vchiq_dump_service_use_state(struct vchiq_state *state)
2542 {
2543 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2544 	struct service_data_struct *service_data;
2545 	int i, found = 0;
2546 	/* If there's more than 64 services, only dump ones with
2547 	 * non-zero counts */
2548 	int only_nonzero = 0;
2549 	static const char *nz = "<-- preventing suspend";
2550 
2551 	int peer_count;
2552 	int vc_use_count;
2553 	int active_services;
2554 
2555 	if (!arm_state)
2556 		return;
2557 
2558 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
2559 				     GFP_KERNEL);
2560 	if (!service_data)
2561 		return;
2562 
2563 	read_lock_bh(&arm_state->susp_res_lock);
2564 	peer_count = arm_state->peer_use_count;
2565 	vc_use_count = arm_state->videocore_use_count;
2566 	active_services = state->unused_service;
2567 	if (active_services > MAX_SERVICES)
2568 		only_nonzero = 1;
2569 
2570 	rcu_read_lock();
2571 	for (i = 0; i < active_services; i++) {
2572 		struct vchiq_service *service_ptr =
2573 			rcu_dereference(state->services[i]);
2574 
2575 		if (!service_ptr)
2576 			continue;
2577 
2578 		if (only_nonzero && !service_ptr->service_use_count)
2579 			continue;
2580 
2581 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
2582 			continue;
2583 
2584 		service_data[found].fourcc = service_ptr->base.fourcc;
2585 		service_data[found].clientid = service_ptr->client_id;
2586 		service_data[found].use_count = service_ptr->service_use_count;
2587 		found++;
2588 		if (found >= MAX_SERVICES)
2589 			break;
2590 	}
2591 	rcu_read_unlock();
2592 
2593 	read_unlock_bh(&arm_state->susp_res_lock);
2594 
2595 	if (only_nonzero)
2596 		vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2597 			"services (%d).  Only dumping up to first %d services "
2598 			"with non-zero use-count", active_services, found);
2599 
2600 	for (i = 0; i < found; i++) {
2601 		vchiq_log_warning(vchiq_susp_log_level,
2602 			"----- %c%c%c%c:%d service count %d %s",
2603 			VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2604 			service_data[i].clientid,
2605 			service_data[i].use_count,
2606 			service_data[i].use_count ? nz : "");
2607 	}
2608 	vchiq_log_warning(vchiq_susp_log_level,
2609 		"----- VCHIQ use count count %d", peer_count);
2610 	vchiq_log_warning(vchiq_susp_log_level,
2611 		"--- Overall vchiq instance use count %d", vc_use_count);
2612 
2613 	kfree(service_data);
2614 }
2615 
2616 enum vchiq_status
vchiq_check_service(struct vchiq_service * service)2617 vchiq_check_service(struct vchiq_service *service)
2618 {
2619 	struct vchiq_arm_state *arm_state;
2620 	enum vchiq_status ret = VCHIQ_ERROR;
2621 
2622 	if (!service || !service->state)
2623 		goto out;
2624 
2625 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2626 
2627 	arm_state = vchiq_platform_get_arm_state(service->state);
2628 
2629 	read_lock_bh(&arm_state->susp_res_lock);
2630 	if (service->service_use_count)
2631 		ret = VCHIQ_SUCCESS;
2632 	read_unlock_bh(&arm_state->susp_res_lock);
2633 
2634 	if (ret == VCHIQ_ERROR) {
2635 		vchiq_log_error(vchiq_susp_log_level,
2636 			"%s ERROR - %c%c%c%c:%d service count %d, "
2637 			"state count %d", __func__,
2638 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2639 			service->client_id, service->service_use_count,
2640 			arm_state->videocore_use_count);
2641 		vchiq_dump_service_use_state(service->state);
2642 	}
2643 out:
2644 	return ret;
2645 }
2646 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)2647 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
2648 				       enum vchiq_connstate oldstate,
2649 				       enum vchiq_connstate newstate)
2650 {
2651 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2652 	char threadname[16];
2653 
2654 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2655 		get_conn_state_name(oldstate), get_conn_state_name(newstate));
2656 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
2657 		return;
2658 
2659 	write_lock_bh(&arm_state->susp_res_lock);
2660 	if (arm_state->first_connect) {
2661 		write_unlock_bh(&arm_state->susp_res_lock);
2662 		return;
2663 	}
2664 
2665 	arm_state->first_connect = 1;
2666 	write_unlock_bh(&arm_state->susp_res_lock);
2667 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
2668 		 state->id);
2669 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
2670 					      (void *)state,
2671 					      threadname);
2672 	if (IS_ERR(arm_state->ka_thread)) {
2673 		vchiq_log_error(vchiq_susp_log_level,
2674 				"vchiq: FATAL: couldn't create thread %s",
2675 				threadname);
2676 	} else {
2677 		wake_up_process(arm_state->ka_thread);
2678 	}
2679 }
2680 
2681 static const struct of_device_id vchiq_of_match[] = {
2682 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
2683 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
2684 	{},
2685 };
2686 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2687 
2688 static struct platform_device *
vchiq_register_child(struct platform_device * pdev,const char * name)2689 vchiq_register_child(struct platform_device *pdev, const char *name)
2690 {
2691 	struct platform_device_info pdevinfo;
2692 	struct platform_device *child;
2693 
2694 	memset(&pdevinfo, 0, sizeof(pdevinfo));
2695 
2696 	pdevinfo.parent = &pdev->dev;
2697 	pdevinfo.name = name;
2698 	pdevinfo.id = PLATFORM_DEVID_NONE;
2699 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
2700 
2701 	child = platform_device_register_full(&pdevinfo);
2702 	if (IS_ERR(child)) {
2703 		dev_warn(&pdev->dev, "%s not registered\n", name);
2704 		child = NULL;
2705 	}
2706 
2707 	return child;
2708 }
2709 
vchiq_probe(struct platform_device * pdev)2710 static int vchiq_probe(struct platform_device *pdev)
2711 {
2712 	struct device_node *fw_node;
2713 	const struct of_device_id *of_id;
2714 	struct vchiq_drvdata *drvdata;
2715 	struct device *vchiq_dev;
2716 	int err;
2717 
2718 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
2719 	drvdata = (struct vchiq_drvdata *)of_id->data;
2720 	if (!drvdata)
2721 		return -EINVAL;
2722 
2723 	fw_node = of_find_compatible_node(NULL, NULL,
2724 					  "raspberrypi,bcm2835-firmware");
2725 	if (!fw_node) {
2726 		dev_err(&pdev->dev, "Missing firmware node\n");
2727 		return -ENOENT;
2728 	}
2729 
2730 	drvdata->fw = rpi_firmware_get(fw_node);
2731 	of_node_put(fw_node);
2732 	if (!drvdata->fw)
2733 		return -EPROBE_DEFER;
2734 
2735 	platform_set_drvdata(pdev, drvdata);
2736 
2737 	err = vchiq_platform_init(pdev, &g_state);
2738 	if (err)
2739 		goto failed_platform_init;
2740 
2741 	cdev_init(&vchiq_cdev, &vchiq_fops);
2742 	vchiq_cdev.owner = THIS_MODULE;
2743 	err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2744 	if (err) {
2745 		vchiq_log_error(vchiq_arm_log_level,
2746 			"Unable to register device");
2747 		goto failed_platform_init;
2748 	}
2749 
2750 	vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
2751 				  "vchiq");
2752 	if (IS_ERR(vchiq_dev)) {
2753 		err = PTR_ERR(vchiq_dev);
2754 		goto failed_device_create;
2755 	}
2756 
2757 	vchiq_debugfs_init();
2758 
2759 	vchiq_log_info(vchiq_arm_log_level,
2760 		"vchiq: initialised - version %d (min %d), device %d.%d",
2761 		VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2762 		MAJOR(vchiq_devid), MINOR(vchiq_devid));
2763 
2764 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
2765 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
2766 
2767 	return 0;
2768 
2769 failed_device_create:
2770 	cdev_del(&vchiq_cdev);
2771 failed_platform_init:
2772 	vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2773 	return err;
2774 }
2775 
vchiq_remove(struct platform_device * pdev)2776 static int vchiq_remove(struct platform_device *pdev)
2777 {
2778 	platform_device_unregister(bcm2835_audio);
2779 	platform_device_unregister(bcm2835_camera);
2780 	vchiq_debugfs_deinit();
2781 	device_destroy(vchiq_class, vchiq_devid);
2782 	cdev_del(&vchiq_cdev);
2783 
2784 	return 0;
2785 }
2786 
2787 static struct platform_driver vchiq_driver = {
2788 	.driver = {
2789 		.name = "bcm2835_vchiq",
2790 		.of_match_table = vchiq_of_match,
2791 	},
2792 	.probe = vchiq_probe,
2793 	.remove = vchiq_remove,
2794 };
2795 
vchiq_driver_init(void)2796 static int __init vchiq_driver_init(void)
2797 {
2798 	int ret;
2799 
2800 	vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2801 	if (IS_ERR(vchiq_class)) {
2802 		pr_err("Failed to create vchiq class\n");
2803 		return PTR_ERR(vchiq_class);
2804 	}
2805 
2806 	ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
2807 	if (ret) {
2808 		pr_err("Failed to allocate vchiq's chrdev region\n");
2809 		goto class_destroy;
2810 	}
2811 
2812 	ret = platform_driver_register(&vchiq_driver);
2813 	if (ret) {
2814 		pr_err("Failed to register vchiq driver\n");
2815 		goto region_unregister;
2816 	}
2817 
2818 	return 0;
2819 
2820 region_unregister:
2821 	unregister_chrdev_region(vchiq_devid, 1);
2822 
2823 class_destroy:
2824 	class_destroy(vchiq_class);
2825 
2826 	return ret;
2827 }
2828 module_init(vchiq_driver_init);
2829 
vchiq_driver_exit(void)2830 static void __exit vchiq_driver_exit(void)
2831 {
2832 	platform_driver_unregister(&vchiq_driver);
2833 	unregister_chrdev_region(vchiq_devid, 1);
2834 	class_destroy(vchiq_class);
2835 }
2836 module_exit(vchiq_driver_exit);
2837 
2838 MODULE_LICENSE("Dual BSD/GPL");
2839 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2840 MODULE_AUTHOR("Broadcom Corporation");
2841