1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <soc/bcm2835/raspberrypi-firmware.h>
29
30 #include "vchiq_core.h"
31 #include "vchiq_ioctl.h"
32 #include "vchiq_arm.h"
33 #include "vchiq_debugfs.h"
34
35 #define DEVICE_NAME "vchiq"
36
37 /* Override the default prefix, which would be vchiq_arm (from the filename) */
38 #undef MODULE_PARAM_PREFIX
39 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
40
41 /* Some per-instance constants */
42 #define MAX_COMPLETIONS 128
43 #define MAX_SERVICES 64
44 #define MAX_ELEMENTS 8
45 #define MSG_QUEUE_SIZE 128
46
47 #define KEEPALIVE_VER 1
48 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
49
50 /* Run time control of log level, based on KERN_XXX level. */
51 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
52 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
53
54 struct user_service {
55 struct vchiq_service *service;
56 void __user *userdata;
57 struct vchiq_instance *instance;
58 char is_vchi;
59 char dequeue_pending;
60 char close_pending;
61 int message_available_pos;
62 int msg_insert;
63 int msg_remove;
64 struct completion insert_event;
65 struct completion remove_event;
66 struct completion close_event;
67 struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
68 };
69
70 struct bulk_waiter_node {
71 struct bulk_waiter bulk_waiter;
72 int pid;
73 struct list_head list;
74 };
75
76 struct vchiq_instance {
77 struct vchiq_state *state;
78 struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
79 int completion_insert;
80 int completion_remove;
81 struct completion insert_event;
82 struct completion remove_event;
83 struct mutex completion_mutex;
84
85 int connected;
86 int closing;
87 int pid;
88 int mark;
89 int use_close_delivered;
90 int trace;
91
92 struct list_head bulk_waiter_list;
93 struct mutex bulk_waiter_list_mutex;
94
95 struct vchiq_debugfs_node debugfs_node;
96 };
97
98 struct dump_context {
99 char __user *buf;
100 size_t actual;
101 size_t space;
102 loff_t offset;
103 };
104
105 static struct cdev vchiq_cdev;
106 static dev_t vchiq_devid;
107 static struct vchiq_state g_state;
108 static struct class *vchiq_class;
109 static DEFINE_SPINLOCK(msg_queue_spinlock);
110 static struct platform_device *bcm2835_camera;
111 static struct platform_device *bcm2835_audio;
112
113 static struct vchiq_drvdata bcm2835_drvdata = {
114 .cache_line_size = 32,
115 };
116
117 static struct vchiq_drvdata bcm2836_drvdata = {
118 .cache_line_size = 64,
119 };
120
121 static const char *const ioctl_names[] = {
122 "CONNECT",
123 "SHUTDOWN",
124 "CREATE_SERVICE",
125 "REMOVE_SERVICE",
126 "QUEUE_MESSAGE",
127 "QUEUE_BULK_TRANSMIT",
128 "QUEUE_BULK_RECEIVE",
129 "AWAIT_COMPLETION",
130 "DEQUEUE_MESSAGE",
131 "GET_CLIENT_ID",
132 "GET_CONFIG",
133 "CLOSE_SERVICE",
134 "USE_SERVICE",
135 "RELEASE_SERVICE",
136 "SET_SERVICE_OPTION",
137 "DUMP_PHYS_MEM",
138 "LIB_VERSION",
139 "CLOSE_DELIVERED"
140 };
141
142 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
143 (VCHIQ_IOC_MAX + 1));
144
145 static enum vchiq_status
146 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
147 unsigned int size, enum vchiq_bulk_dir dir);
148
149 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_instance ** instance_out)150 enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
151 {
152 enum vchiq_status status = VCHIQ_ERROR;
153 struct vchiq_state *state;
154 struct vchiq_instance *instance = NULL;
155 int i;
156
157 vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
158
159 /* VideoCore may not be ready due to boot up timing.
160 * It may never be ready if kernel and firmware are mismatched,so don't
161 * block forever.
162 */
163 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
164 state = vchiq_get_state();
165 if (state)
166 break;
167 usleep_range(500, 600);
168 }
169 if (i == VCHIQ_INIT_RETRIES) {
170 vchiq_log_error(vchiq_core_log_level,
171 "%s: videocore not initialized\n", __func__);
172 goto failed;
173 } else if (i > 0) {
174 vchiq_log_warning(vchiq_core_log_level,
175 "%s: videocore initialized after %d retries\n",
176 __func__, i);
177 }
178
179 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
180 if (!instance) {
181 vchiq_log_error(vchiq_core_log_level,
182 "%s: error allocating vchiq instance\n", __func__);
183 goto failed;
184 }
185
186 instance->connected = 0;
187 instance->state = state;
188 mutex_init(&instance->bulk_waiter_list_mutex);
189 INIT_LIST_HEAD(&instance->bulk_waiter_list);
190
191 *instance_out = instance;
192
193 status = VCHIQ_SUCCESS;
194
195 failed:
196 vchiq_log_trace(vchiq_core_log_level,
197 "%s(%p): returning %d", __func__, instance, status);
198
199 return status;
200 }
201 EXPORT_SYMBOL(vchiq_initialise);
202
vchiq_shutdown(struct vchiq_instance * instance)203 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
204 {
205 enum vchiq_status status;
206 struct vchiq_state *state = instance->state;
207
208 vchiq_log_trace(vchiq_core_log_level,
209 "%s(%p) called", __func__, instance);
210
211 if (mutex_lock_killable(&state->mutex))
212 return VCHIQ_RETRY;
213
214 /* Remove all services */
215 status = vchiq_shutdown_internal(state, instance);
216
217 mutex_unlock(&state->mutex);
218
219 vchiq_log_trace(vchiq_core_log_level,
220 "%s(%p): returning %d", __func__, instance, status);
221
222 if (status == VCHIQ_SUCCESS) {
223 struct bulk_waiter_node *waiter, *next;
224
225 list_for_each_entry_safe(waiter, next,
226 &instance->bulk_waiter_list, list) {
227 list_del(&waiter->list);
228 vchiq_log_info(vchiq_arm_log_level,
229 "bulk_waiter - cleaned up %pK for pid %d",
230 waiter, waiter->pid);
231 kfree(waiter);
232 }
233 kfree(instance);
234 }
235
236 return status;
237 }
238 EXPORT_SYMBOL(vchiq_shutdown);
239
vchiq_is_connected(struct vchiq_instance * instance)240 static int vchiq_is_connected(struct vchiq_instance *instance)
241 {
242 return instance->connected;
243 }
244
vchiq_connect(struct vchiq_instance * instance)245 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
246 {
247 enum vchiq_status status;
248 struct vchiq_state *state = instance->state;
249
250 vchiq_log_trace(vchiq_core_log_level,
251 "%s(%p) called", __func__, instance);
252
253 if (mutex_lock_killable(&state->mutex)) {
254 vchiq_log_trace(vchiq_core_log_level,
255 "%s: call to mutex_lock failed", __func__);
256 status = VCHIQ_RETRY;
257 goto failed;
258 }
259 status = vchiq_connect_internal(state, instance);
260
261 if (status == VCHIQ_SUCCESS)
262 instance->connected = 1;
263
264 mutex_unlock(&state->mutex);
265
266 failed:
267 vchiq_log_trace(vchiq_core_log_level,
268 "%s(%p): returning %d", __func__, instance, status);
269
270 return status;
271 }
272 EXPORT_SYMBOL(vchiq_connect);
273
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)274 static enum vchiq_status vchiq_add_service(
275 struct vchiq_instance *instance,
276 const struct vchiq_service_params_kernel *params,
277 unsigned int *phandle)
278 {
279 enum vchiq_status status;
280 struct vchiq_state *state = instance->state;
281 struct vchiq_service *service = NULL;
282 int srvstate;
283
284 vchiq_log_trace(vchiq_core_log_level,
285 "%s(%p) called", __func__, instance);
286
287 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
288
289 srvstate = vchiq_is_connected(instance)
290 ? VCHIQ_SRVSTATE_LISTENING
291 : VCHIQ_SRVSTATE_HIDDEN;
292
293 service = vchiq_add_service_internal(
294 state,
295 params,
296 srvstate,
297 instance,
298 NULL);
299
300 if (service) {
301 *phandle = service->handle;
302 status = VCHIQ_SUCCESS;
303 } else
304 status = VCHIQ_ERROR;
305
306 vchiq_log_trace(vchiq_core_log_level,
307 "%s(%p): returning %d", __func__, instance, status);
308
309 return status;
310 }
311
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)312 enum vchiq_status vchiq_open_service(
313 struct vchiq_instance *instance,
314 const struct vchiq_service_params_kernel *params,
315 unsigned int *phandle)
316 {
317 enum vchiq_status status = VCHIQ_ERROR;
318 struct vchiq_state *state = instance->state;
319 struct vchiq_service *service = NULL;
320
321 vchiq_log_trace(vchiq_core_log_level,
322 "%s(%p) called", __func__, instance);
323
324 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
325
326 if (!vchiq_is_connected(instance))
327 goto failed;
328
329 service = vchiq_add_service_internal(state,
330 params,
331 VCHIQ_SRVSTATE_OPENING,
332 instance,
333 NULL);
334
335 if (service) {
336 *phandle = service->handle;
337 status = vchiq_open_service_internal(service, current->pid);
338 if (status != VCHIQ_SUCCESS) {
339 vchiq_remove_service(service->handle);
340 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
341 }
342 }
343
344 failed:
345 vchiq_log_trace(vchiq_core_log_level,
346 "%s(%p): returning %d", __func__, instance, status);
347
348 return status;
349 }
350 EXPORT_SYMBOL(vchiq_open_service);
351
352 enum vchiq_status
vchiq_bulk_transmit(unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)353 vchiq_bulk_transmit(unsigned int handle, const void *data,
354 unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
355 {
356 enum vchiq_status status;
357
358 while (1) {
359 switch (mode) {
360 case VCHIQ_BULK_MODE_NOCALLBACK:
361 case VCHIQ_BULK_MODE_CALLBACK:
362 status = vchiq_bulk_transfer(handle,
363 (void *)data, NULL,
364 size, userdata, mode,
365 VCHIQ_BULK_TRANSMIT);
366 break;
367 case VCHIQ_BULK_MODE_BLOCKING:
368 status = vchiq_blocking_bulk_transfer(handle,
369 (void *)data, size, VCHIQ_BULK_TRANSMIT);
370 break;
371 default:
372 return VCHIQ_ERROR;
373 }
374
375 /*
376 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
377 * to implement a retry mechanism since this function is
378 * supposed to block until queued
379 */
380 if (status != VCHIQ_RETRY)
381 break;
382
383 msleep(1);
384 }
385
386 return status;
387 }
388 EXPORT_SYMBOL(vchiq_bulk_transmit);
389
vchiq_bulk_receive(unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)390 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
391 unsigned int size, void *userdata,
392 enum vchiq_bulk_mode mode)
393 {
394 enum vchiq_status status;
395
396 while (1) {
397 switch (mode) {
398 case VCHIQ_BULK_MODE_NOCALLBACK:
399 case VCHIQ_BULK_MODE_CALLBACK:
400 status = vchiq_bulk_transfer(handle, data, NULL,
401 size, userdata,
402 mode, VCHIQ_BULK_RECEIVE);
403 break;
404 case VCHIQ_BULK_MODE_BLOCKING:
405 status = vchiq_blocking_bulk_transfer(handle,
406 (void *)data, size, VCHIQ_BULK_RECEIVE);
407 break;
408 default:
409 return VCHIQ_ERROR;
410 }
411
412 /*
413 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
414 * to implement a retry mechanism since this function is
415 * supposed to block until queued
416 */
417 if (status != VCHIQ_RETRY)
418 break;
419
420 msleep(1);
421 }
422
423 return status;
424 }
425 EXPORT_SYMBOL(vchiq_bulk_receive);
426
427 static enum vchiq_status
vchiq_blocking_bulk_transfer(unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)428 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
429 unsigned int size, enum vchiq_bulk_dir dir)
430 {
431 struct vchiq_instance *instance;
432 struct vchiq_service *service;
433 enum vchiq_status status;
434 struct bulk_waiter_node *waiter = NULL;
435 bool found = false;
436
437 service = find_service_by_handle(handle);
438 if (!service)
439 return VCHIQ_ERROR;
440
441 instance = service->instance;
442
443 unlock_service(service);
444
445 mutex_lock(&instance->bulk_waiter_list_mutex);
446 list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
447 if (waiter->pid == current->pid) {
448 list_del(&waiter->list);
449 found = true;
450 break;
451 }
452 }
453 mutex_unlock(&instance->bulk_waiter_list_mutex);
454
455 if (found) {
456 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
457
458 if (bulk) {
459 /* This thread has an outstanding bulk transfer. */
460 /* FIXME: why compare a dma address to a pointer? */
461 if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
462 (bulk->size != size)) {
463 /* This is not a retry of the previous one.
464 * Cancel the signal when the transfer
465 * completes.
466 */
467 spin_lock(&bulk_waiter_spinlock);
468 bulk->userdata = NULL;
469 spin_unlock(&bulk_waiter_spinlock);
470 }
471 }
472 } else {
473 waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
474 if (!waiter) {
475 vchiq_log_error(vchiq_core_log_level,
476 "%s - out of memory", __func__);
477 return VCHIQ_ERROR;
478 }
479 }
480
481 status = vchiq_bulk_transfer(handle, data, NULL, size,
482 &waiter->bulk_waiter,
483 VCHIQ_BULK_MODE_BLOCKING, dir);
484 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
485 !waiter->bulk_waiter.bulk) {
486 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
487
488 if (bulk) {
489 /* Cancel the signal when the transfer
490 * completes.
491 */
492 spin_lock(&bulk_waiter_spinlock);
493 bulk->userdata = NULL;
494 spin_unlock(&bulk_waiter_spinlock);
495 }
496 kfree(waiter);
497 } else {
498 waiter->pid = current->pid;
499 mutex_lock(&instance->bulk_waiter_list_mutex);
500 list_add(&waiter->list, &instance->bulk_waiter_list);
501 mutex_unlock(&instance->bulk_waiter_list_mutex);
502 vchiq_log_info(vchiq_arm_log_level,
503 "saved bulk_waiter %pK for pid %d",
504 waiter, current->pid);
505 }
506
507 return status;
508 }
509 /****************************************************************************
510 *
511 * add_completion
512 *
513 ***************************************************************************/
514
515 static enum vchiq_status
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)516 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
517 struct vchiq_header *header, struct user_service *user_service,
518 void *bulk_userdata)
519 {
520 struct vchiq_completion_data_kernel *completion;
521 int insert;
522
523 DEBUG_INITIALISE(g_state.local)
524
525 insert = instance->completion_insert;
526 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
527 /* Out of space - wait for the client */
528 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
529 vchiq_log_trace(vchiq_arm_log_level,
530 "%s - completion queue full", __func__);
531 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
532 if (wait_for_completion_interruptible(
533 &instance->remove_event)) {
534 vchiq_log_info(vchiq_arm_log_level,
535 "service_callback interrupted");
536 return VCHIQ_RETRY;
537 } else if (instance->closing) {
538 vchiq_log_info(vchiq_arm_log_level,
539 "service_callback closing");
540 return VCHIQ_SUCCESS;
541 }
542 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
543 }
544
545 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
546
547 completion->header = header;
548 completion->reason = reason;
549 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
550 completion->service_userdata = user_service->service;
551 completion->bulk_userdata = bulk_userdata;
552
553 if (reason == VCHIQ_SERVICE_CLOSED) {
554 /* Take an extra reference, to be held until
555 this CLOSED notification is delivered. */
556 lock_service(user_service->service);
557 if (instance->use_close_delivered)
558 user_service->close_pending = 1;
559 }
560
561 /* A write barrier is needed here to ensure that the entire completion
562 record is written out before the insert point. */
563 wmb();
564
565 if (reason == VCHIQ_MESSAGE_AVAILABLE)
566 user_service->message_available_pos = insert;
567
568 insert++;
569 instance->completion_insert = insert;
570
571 complete(&instance->insert_event);
572
573 return VCHIQ_SUCCESS;
574 }
575
576 /****************************************************************************
577 *
578 * service_callback
579 *
580 ***************************************************************************/
581
582 static enum vchiq_status
service_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)583 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
584 unsigned int handle, void *bulk_userdata)
585 {
586 /* How do we ensure the callback goes to the right client?
587 ** The service_user data points to a user_service record
588 ** containing the original callback and the user state structure, which
589 ** contains a circular buffer for completion records.
590 */
591 struct user_service *user_service;
592 struct vchiq_service *service;
593 struct vchiq_instance *instance;
594 bool skip_completion = false;
595
596 DEBUG_INITIALISE(g_state.local)
597
598 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
599
600 service = handle_to_service(handle);
601 BUG_ON(!service);
602 user_service = (struct user_service *)service->base.userdata;
603 instance = user_service->instance;
604
605 if (!instance || instance->closing)
606 return VCHIQ_SUCCESS;
607
608 vchiq_log_trace(vchiq_arm_log_level,
609 "%s - service %lx(%d,%p), reason %d, header %lx, "
610 "instance %lx, bulk_userdata %lx",
611 __func__, (unsigned long)user_service,
612 service->localport, user_service->userdata,
613 reason, (unsigned long)header,
614 (unsigned long)instance, (unsigned long)bulk_userdata);
615
616 if (header && user_service->is_vchi) {
617 spin_lock(&msg_queue_spinlock);
618 while (user_service->msg_insert ==
619 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
620 spin_unlock(&msg_queue_spinlock);
621 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
622 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
623 vchiq_log_trace(vchiq_arm_log_level,
624 "service_callback - msg queue full");
625 /* If there is no MESSAGE_AVAILABLE in the completion
626 ** queue, add one
627 */
628 if ((user_service->message_available_pos -
629 instance->completion_remove) < 0) {
630 enum vchiq_status status;
631
632 vchiq_log_info(vchiq_arm_log_level,
633 "Inserting extra MESSAGE_AVAILABLE");
634 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
635 status = add_completion(instance, reason,
636 NULL, user_service, bulk_userdata);
637 if (status != VCHIQ_SUCCESS) {
638 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
639 return status;
640 }
641 }
642
643 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
644 if (wait_for_completion_interruptible(
645 &user_service->remove_event)) {
646 vchiq_log_info(vchiq_arm_log_level,
647 "%s interrupted", __func__);
648 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
649 return VCHIQ_RETRY;
650 } else if (instance->closing) {
651 vchiq_log_info(vchiq_arm_log_level,
652 "%s closing", __func__);
653 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
654 return VCHIQ_ERROR;
655 }
656 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
657 spin_lock(&msg_queue_spinlock);
658 }
659
660 user_service->msg_queue[user_service->msg_insert &
661 (MSG_QUEUE_SIZE - 1)] = header;
662 user_service->msg_insert++;
663
664 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
665 ** there is a MESSAGE_AVAILABLE in the completion queue then
666 ** bypass the completion queue.
667 */
668 if (((user_service->message_available_pos -
669 instance->completion_remove) >= 0) ||
670 user_service->dequeue_pending) {
671 user_service->dequeue_pending = 0;
672 skip_completion = true;
673 }
674
675 spin_unlock(&msg_queue_spinlock);
676 complete(&user_service->insert_event);
677
678 header = NULL;
679 }
680 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
681
682 if (skip_completion)
683 return VCHIQ_SUCCESS;
684
685 return add_completion(instance, reason, header, user_service,
686 bulk_userdata);
687 }
688
689 /****************************************************************************
690 *
691 * user_service_free
692 *
693 ***************************************************************************/
694 static void
user_service_free(void * userdata)695 user_service_free(void *userdata)
696 {
697 kfree(userdata);
698 }
699
700 /****************************************************************************
701 *
702 * close_delivered
703 *
704 ***************************************************************************/
close_delivered(struct user_service * user_service)705 static void close_delivered(struct user_service *user_service)
706 {
707 vchiq_log_info(vchiq_arm_log_level,
708 "%s(handle=%x)",
709 __func__, user_service->service->handle);
710
711 if (user_service->close_pending) {
712 /* Allow the underlying service to be culled */
713 unlock_service(user_service->service);
714
715 /* Wake the user-thread blocked in close_ or remove_service */
716 complete(&user_service->close_event);
717
718 user_service->close_pending = 0;
719 }
720 }
721
722 struct vchiq_io_copy_callback_context {
723 struct vchiq_element *element;
724 size_t element_offset;
725 unsigned long elements_to_go;
726 };
727
vchiq_ioc_copy_element_data(void * context,void * dest,size_t offset,size_t maxsize)728 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
729 size_t offset, size_t maxsize)
730 {
731 struct vchiq_io_copy_callback_context *cc = context;
732 size_t total_bytes_copied = 0;
733 size_t bytes_this_round;
734
735 while (total_bytes_copied < maxsize) {
736 if (!cc->elements_to_go)
737 return total_bytes_copied;
738
739 if (!cc->element->size) {
740 cc->elements_to_go--;
741 cc->element++;
742 cc->element_offset = 0;
743 continue;
744 }
745
746 bytes_this_round = min(cc->element->size - cc->element_offset,
747 maxsize - total_bytes_copied);
748
749 if (copy_from_user(dest + total_bytes_copied,
750 cc->element->data + cc->element_offset,
751 bytes_this_round))
752 return -EFAULT;
753
754 cc->element_offset += bytes_this_round;
755 total_bytes_copied += bytes_this_round;
756
757 if (cc->element_offset == cc->element->size) {
758 cc->elements_to_go--;
759 cc->element++;
760 cc->element_offset = 0;
761 }
762 }
763
764 return maxsize;
765 }
766
767 /**************************************************************************
768 *
769 * vchiq_ioc_queue_message
770 *
771 **************************************************************************/
772 static int
vchiq_ioc_queue_message(unsigned int handle,struct vchiq_element * elements,unsigned long count)773 vchiq_ioc_queue_message(unsigned int handle,
774 struct vchiq_element *elements,
775 unsigned long count)
776 {
777 struct vchiq_io_copy_callback_context context;
778 enum vchiq_status status = VCHIQ_SUCCESS;
779 unsigned long i;
780 size_t total_size = 0;
781
782 context.element = elements;
783 context.element_offset = 0;
784 context.elements_to_go = count;
785
786 for (i = 0; i < count; i++) {
787 if (!elements[i].data && elements[i].size != 0)
788 return -EFAULT;
789
790 total_size += elements[i].size;
791 }
792
793 status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
794 &context, total_size);
795
796 if (status == VCHIQ_ERROR)
797 return -EIO;
798 else if (status == VCHIQ_RETRY)
799 return -EINTR;
800 return 0;
801 }
802
vchiq_ioc_create_service(struct vchiq_instance * instance,struct vchiq_create_service * args)803 static int vchiq_ioc_create_service(struct vchiq_instance *instance,
804 struct vchiq_create_service *args)
805 {
806 struct user_service *user_service = NULL;
807 struct vchiq_service *service;
808 enum vchiq_status status = VCHIQ_SUCCESS;
809 struct vchiq_service_params_kernel params;
810 int srvstate;
811
812 user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
813 if (!user_service)
814 return -ENOMEM;
815
816 if (args->is_open) {
817 if (!instance->connected) {
818 kfree(user_service);
819 return -ENOTCONN;
820 }
821 srvstate = VCHIQ_SRVSTATE_OPENING;
822 } else {
823 srvstate = instance->connected ?
824 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
825 }
826
827 params = (struct vchiq_service_params_kernel) {
828 .fourcc = args->params.fourcc,
829 .callback = service_callback,
830 .userdata = user_service,
831 .version = args->params.version,
832 .version_min = args->params.version_min,
833 };
834 service = vchiq_add_service_internal(instance->state, ¶ms,
835 srvstate, instance,
836 user_service_free);
837 if (!service) {
838 kfree(user_service);
839 return -EEXIST;
840 }
841
842 user_service->service = service;
843 user_service->userdata = args->params.userdata;
844 user_service->instance = instance;
845 user_service->is_vchi = (args->is_vchi != 0);
846 user_service->dequeue_pending = 0;
847 user_service->close_pending = 0;
848 user_service->message_available_pos = instance->completion_remove - 1;
849 user_service->msg_insert = 0;
850 user_service->msg_remove = 0;
851 init_completion(&user_service->insert_event);
852 init_completion(&user_service->remove_event);
853 init_completion(&user_service->close_event);
854
855 if (args->is_open) {
856 status = vchiq_open_service_internal(service, instance->pid);
857 if (status != VCHIQ_SUCCESS) {
858 vchiq_remove_service(service->handle);
859 return (status == VCHIQ_RETRY) ?
860 -EINTR : -EIO;
861 }
862 }
863 args->handle = service->handle;
864
865 return 0;
866 }
867
vchiq_ioc_dequeue_message(struct vchiq_instance * instance,struct vchiq_dequeue_message * args)868 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
869 struct vchiq_dequeue_message *args)
870 {
871 struct user_service *user_service;
872 struct vchiq_service *service;
873 struct vchiq_header *header;
874 int ret;
875
876 DEBUG_INITIALISE(g_state.local)
877 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
878 service = find_service_for_instance(instance, args->handle);
879 if (!service)
880 return -EINVAL;
881
882 user_service = (struct user_service *)service->base.userdata;
883 if (user_service->is_vchi == 0) {
884 ret = -EINVAL;
885 goto out;
886 }
887
888 spin_lock(&msg_queue_spinlock);
889 if (user_service->msg_remove == user_service->msg_insert) {
890 if (!args->blocking) {
891 spin_unlock(&msg_queue_spinlock);
892 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
893 ret = -EWOULDBLOCK;
894 goto out;
895 }
896 user_service->dequeue_pending = 1;
897 ret = 0;
898 do {
899 spin_unlock(&msg_queue_spinlock);
900 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
901 if (wait_for_completion_interruptible(
902 &user_service->insert_event)) {
903 vchiq_log_info(vchiq_arm_log_level,
904 "DEQUEUE_MESSAGE interrupted");
905 ret = -EINTR;
906 break;
907 }
908 spin_lock(&msg_queue_spinlock);
909 } while (user_service->msg_remove ==
910 user_service->msg_insert);
911
912 if (ret)
913 goto out;
914 }
915
916 BUG_ON((int)(user_service->msg_insert -
917 user_service->msg_remove) < 0);
918
919 header = user_service->msg_queue[user_service->msg_remove &
920 (MSG_QUEUE_SIZE - 1)];
921 user_service->msg_remove++;
922 spin_unlock(&msg_queue_spinlock);
923
924 complete(&user_service->remove_event);
925 if (!header) {
926 ret = -ENOTCONN;
927 } else if (header->size <= args->bufsize) {
928 /* Copy to user space if msgbuf is not NULL */
929 if (!args->buf || (copy_to_user(args->buf,
930 header->data, header->size) == 0)) {
931 ret = header->size;
932 vchiq_release_message(service->handle, header);
933 } else
934 ret = -EFAULT;
935 } else {
936 vchiq_log_error(vchiq_arm_log_level,
937 "header %pK: bufsize %x < size %x",
938 header, args->bufsize, header->size);
939 WARN(1, "invalid size\n");
940 ret = -EMSGSIZE;
941 }
942 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
943 out:
944 unlock_service(service);
945 return ret;
946 }
947
vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance * instance,struct vchiq_queue_bulk_transfer * args,enum vchiq_bulk_dir dir,enum vchiq_bulk_mode __user * mode)948 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
949 struct vchiq_queue_bulk_transfer *args,
950 enum vchiq_bulk_dir dir,
951 enum vchiq_bulk_mode __user *mode)
952 {
953 struct vchiq_service *service;
954 struct bulk_waiter_node *waiter = NULL;
955 bool found = false;
956 void *userdata;
957 int status = 0;
958 int ret;
959
960 service = find_service_for_instance(instance, args->handle);
961 if (!service)
962 return -EINVAL;
963
964 if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
965 waiter = kzalloc(sizeof(struct bulk_waiter_node),
966 GFP_KERNEL);
967 if (!waiter) {
968 ret = -ENOMEM;
969 goto out;
970 }
971
972 userdata = &waiter->bulk_waiter;
973 } else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
974 mutex_lock(&instance->bulk_waiter_list_mutex);
975 list_for_each_entry(waiter, &instance->bulk_waiter_list,
976 list) {
977 if (waiter->pid == current->pid) {
978 list_del(&waiter->list);
979 found = true;
980 break;
981 }
982 }
983 mutex_unlock(&instance->bulk_waiter_list_mutex);
984 if (!found) {
985 vchiq_log_error(vchiq_arm_log_level,
986 "no bulk_waiter found for pid %d",
987 current->pid);
988 ret = -ESRCH;
989 goto out;
990 }
991 vchiq_log_info(vchiq_arm_log_level,
992 "found bulk_waiter %pK for pid %d", waiter,
993 current->pid);
994 userdata = &waiter->bulk_waiter;
995 } else {
996 userdata = args->userdata;
997 }
998
999 /*
1000 * FIXME address space mismatch:
1001 * args->data may be interpreted as a kernel pointer
1002 * in create_pagelist() called from vchiq_bulk_transfer(),
1003 * accessing kernel data instead of user space, based on the
1004 * address.
1005 */
1006 status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
1007 userdata, args->mode, dir);
1008
1009 if (!waiter) {
1010 ret = 0;
1011 goto out;
1012 }
1013
1014 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1015 !waiter->bulk_waiter.bulk) {
1016 if (waiter->bulk_waiter.bulk) {
1017 /* Cancel the signal when the transfer
1018 ** completes. */
1019 spin_lock(&bulk_waiter_spinlock);
1020 waiter->bulk_waiter.bulk->userdata = NULL;
1021 spin_unlock(&bulk_waiter_spinlock);
1022 }
1023 kfree(waiter);
1024 ret = 0;
1025 } else {
1026 const enum vchiq_bulk_mode mode_waiting =
1027 VCHIQ_BULK_MODE_WAITING;
1028 waiter->pid = current->pid;
1029 mutex_lock(&instance->bulk_waiter_list_mutex);
1030 list_add(&waiter->list, &instance->bulk_waiter_list);
1031 mutex_unlock(&instance->bulk_waiter_list_mutex);
1032 vchiq_log_info(vchiq_arm_log_level,
1033 "saved bulk_waiter %pK for pid %d",
1034 waiter, current->pid);
1035
1036 ret = put_user(mode_waiting, mode);
1037 }
1038 out:
1039 unlock_service(service);
1040 if (ret)
1041 return ret;
1042 else if (status == VCHIQ_ERROR)
1043 return -EIO;
1044 else if (status == VCHIQ_RETRY)
1045 return -EINTR;
1046 return 0;
1047 }
1048
1049 /* read a user pointer value from an array pointers in user space */
vchiq_get_user_ptr(void __user ** buf,void __user * ubuf,int index)1050 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
1051 {
1052 int ret;
1053
1054 if (in_compat_syscall()) {
1055 compat_uptr_t ptr32;
1056 compat_uptr_t __user *uptr = ubuf;
1057 ret = get_user(ptr32, uptr + index);
1058 *buf = compat_ptr(ptr32);
1059 } else {
1060 uintptr_t ptr, __user *uptr = ubuf;
1061 ret = get_user(ptr, uptr + index);
1062 *buf = (void __user *)ptr;
1063 }
1064
1065 return ret;
1066 }
1067
1068 struct vchiq_completion_data32 {
1069 enum vchiq_reason reason;
1070 compat_uptr_t header;
1071 compat_uptr_t service_userdata;
1072 compat_uptr_t bulk_userdata;
1073 };
1074
vchiq_put_completion(struct vchiq_completion_data __user * buf,struct vchiq_completion_data * completion,int index)1075 static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
1076 struct vchiq_completion_data *completion,
1077 int index)
1078 {
1079 struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
1080
1081 if (in_compat_syscall()) {
1082 struct vchiq_completion_data32 tmp = {
1083 .reason = completion->reason,
1084 .header = ptr_to_compat(completion->header),
1085 .service_userdata = ptr_to_compat(completion->service_userdata),
1086 .bulk_userdata = ptr_to_compat(completion->bulk_userdata),
1087 };
1088 if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
1089 return -EFAULT;
1090 } else {
1091 if (copy_to_user(&buf[index], completion, sizeof(*completion)))
1092 return -EFAULT;
1093 }
1094
1095 return 0;
1096 }
1097
vchiq_ioc_await_completion(struct vchiq_instance * instance,struct vchiq_await_completion * args,int __user * msgbufcountp)1098 static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
1099 struct vchiq_await_completion *args,
1100 int __user *msgbufcountp)
1101 {
1102 int msgbufcount;
1103 int remove;
1104 int ret;
1105
1106 DEBUG_INITIALISE(g_state.local)
1107
1108 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1109 if (!instance->connected) {
1110 return -ENOTCONN;
1111 }
1112
1113 mutex_lock(&instance->completion_mutex);
1114
1115 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1116 while ((instance->completion_remove ==
1117 instance->completion_insert)
1118 && !instance->closing) {
1119 int rc;
1120
1121 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1122 mutex_unlock(&instance->completion_mutex);
1123 rc = wait_for_completion_interruptible(
1124 &instance->insert_event);
1125 mutex_lock(&instance->completion_mutex);
1126 if (rc) {
1127 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1128 vchiq_log_info(vchiq_arm_log_level,
1129 "AWAIT_COMPLETION interrupted");
1130 ret = -EINTR;
1131 goto out;
1132 }
1133 }
1134 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1135
1136 msgbufcount = args->msgbufcount;
1137 remove = instance->completion_remove;
1138
1139 for (ret = 0; ret < args->count; ret++) {
1140 struct vchiq_completion_data_kernel *completion;
1141 struct vchiq_completion_data user_completion;
1142 struct vchiq_service *service;
1143 struct user_service *user_service;
1144 struct vchiq_header *header;
1145
1146 if (remove == instance->completion_insert)
1147 break;
1148
1149 completion = &instance->completions[
1150 remove & (MAX_COMPLETIONS - 1)];
1151
1152 /*
1153 * A read memory barrier is needed to stop
1154 * prefetch of a stale completion record
1155 */
1156 rmb();
1157
1158 service = completion->service_userdata;
1159 user_service = service->base.userdata;
1160
1161 memset(&user_completion, 0, sizeof(user_completion));
1162 user_completion = (struct vchiq_completion_data) {
1163 .reason = completion->reason,
1164 .service_userdata = user_service->userdata,
1165 };
1166
1167 header = completion->header;
1168 if (header) {
1169 void __user *msgbuf;
1170 int msglen;
1171
1172 msglen = header->size + sizeof(struct vchiq_header);
1173 /* This must be a VCHIQ-style service */
1174 if (args->msgbufsize < msglen) {
1175 vchiq_log_error(vchiq_arm_log_level,
1176 "header %pK: msgbufsize %x < msglen %x",
1177 header, args->msgbufsize, msglen);
1178 WARN(1, "invalid message size\n");
1179 if (ret == 0)
1180 ret = -EMSGSIZE;
1181 break;
1182 }
1183 if (msgbufcount <= 0)
1184 /* Stall here for lack of a
1185 ** buffer for the message. */
1186 break;
1187 /* Get the pointer from user space */
1188 msgbufcount--;
1189 if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
1190 msgbufcount)) {
1191 if (ret == 0)
1192 ret = -EFAULT;
1193 break;
1194 }
1195
1196 /* Copy the message to user space */
1197 if (copy_to_user(msgbuf, header, msglen)) {
1198 if (ret == 0)
1199 ret = -EFAULT;
1200 break;
1201 }
1202
1203 /* Now it has been copied, the message
1204 ** can be released. */
1205 vchiq_release_message(service->handle, header);
1206
1207 /* The completion must point to the
1208 ** msgbuf. */
1209 user_completion.header = msgbuf;
1210 }
1211
1212 if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
1213 !instance->use_close_delivered)
1214 unlock_service(service);
1215
1216 /*
1217 * FIXME: address space mismatch, does bulk_userdata
1218 * actually point to user or kernel memory?
1219 */
1220 user_completion.bulk_userdata = completion->bulk_userdata;
1221
1222 if (vchiq_put_completion(args->buf, &user_completion, ret)) {
1223 if (ret == 0)
1224 ret = -EFAULT;
1225 break;
1226 }
1227
1228 /*
1229 * Ensure that the above copy has completed
1230 * before advancing the remove pointer.
1231 */
1232 mb();
1233 remove++;
1234 instance->completion_remove = remove;
1235 }
1236
1237 if (msgbufcount != args->msgbufcount) {
1238 if (put_user(msgbufcount, msgbufcountp))
1239 ret = -EFAULT;
1240 }
1241 out:
1242 if (ret)
1243 complete(&instance->remove_event);
1244 mutex_unlock(&instance->completion_mutex);
1245 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1246
1247 return ret;
1248 }
1249
1250 /****************************************************************************
1251 *
1252 * vchiq_ioctl
1253 *
1254 ***************************************************************************/
1255 static long
vchiq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1256 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1257 {
1258 struct vchiq_instance *instance = file->private_data;
1259 enum vchiq_status status = VCHIQ_SUCCESS;
1260 struct vchiq_service *service = NULL;
1261 long ret = 0;
1262 int i, rc;
1263
1264 vchiq_log_trace(vchiq_arm_log_level,
1265 "%s - instance %pK, cmd %s, arg %lx",
1266 __func__, instance,
1267 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
1268 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
1269 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
1270
1271 switch (cmd) {
1272 case VCHIQ_IOC_SHUTDOWN:
1273 if (!instance->connected)
1274 break;
1275
1276 /* Remove all services */
1277 i = 0;
1278 while ((service = next_service_by_instance(instance->state,
1279 instance, &i))) {
1280 status = vchiq_remove_service(service->handle);
1281 unlock_service(service);
1282 if (status != VCHIQ_SUCCESS)
1283 break;
1284 }
1285 service = NULL;
1286
1287 if (status == VCHIQ_SUCCESS) {
1288 /* Wake the completion thread and ask it to exit */
1289 instance->closing = 1;
1290 complete(&instance->insert_event);
1291 }
1292
1293 break;
1294
1295 case VCHIQ_IOC_CONNECT:
1296 if (instance->connected) {
1297 ret = -EINVAL;
1298 break;
1299 }
1300 rc = mutex_lock_killable(&instance->state->mutex);
1301 if (rc) {
1302 vchiq_log_error(vchiq_arm_log_level,
1303 "vchiq: connect: could not lock mutex for "
1304 "state %d: %d",
1305 instance->state->id, rc);
1306 ret = -EINTR;
1307 break;
1308 }
1309 status = vchiq_connect_internal(instance->state, instance);
1310 mutex_unlock(&instance->state->mutex);
1311
1312 if (status == VCHIQ_SUCCESS)
1313 instance->connected = 1;
1314 else
1315 vchiq_log_error(vchiq_arm_log_level,
1316 "vchiq: could not connect: %d", status);
1317 break;
1318
1319 case VCHIQ_IOC_CREATE_SERVICE: {
1320 struct vchiq_create_service __user *argp;
1321 struct vchiq_create_service args;
1322
1323 argp = (void __user *)arg;
1324 if (copy_from_user(&args, argp, sizeof(args))) {
1325 ret = -EFAULT;
1326 break;
1327 }
1328
1329 ret = vchiq_ioc_create_service(instance, &args);
1330 if (ret < 0)
1331 break;
1332
1333 if (put_user(args.handle, &argp->handle)) {
1334 vchiq_remove_service(args.handle);
1335 ret = -EFAULT;
1336 }
1337 } break;
1338
1339 case VCHIQ_IOC_CLOSE_SERVICE:
1340 case VCHIQ_IOC_REMOVE_SERVICE: {
1341 unsigned int handle = (unsigned int)arg;
1342 struct user_service *user_service;
1343
1344 service = find_service_for_instance(instance, handle);
1345 if (!service) {
1346 ret = -EINVAL;
1347 break;
1348 }
1349
1350 user_service = service->base.userdata;
1351
1352 /* close_pending is false on first entry, and when the
1353 wait in vchiq_close_service has been interrupted. */
1354 if (!user_service->close_pending) {
1355 status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
1356 vchiq_close_service(service->handle) :
1357 vchiq_remove_service(service->handle);
1358 if (status != VCHIQ_SUCCESS)
1359 break;
1360 }
1361
1362 /* close_pending is true once the underlying service
1363 has been closed until the client library calls the
1364 CLOSE_DELIVERED ioctl, signalling close_event. */
1365 if (user_service->close_pending &&
1366 wait_for_completion_interruptible(
1367 &user_service->close_event))
1368 status = VCHIQ_RETRY;
1369 break;
1370 }
1371
1372 case VCHIQ_IOC_USE_SERVICE:
1373 case VCHIQ_IOC_RELEASE_SERVICE: {
1374 unsigned int handle = (unsigned int)arg;
1375
1376 service = find_service_for_instance(instance, handle);
1377 if (service) {
1378 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
1379 vchiq_use_service_internal(service) :
1380 vchiq_release_service_internal(service);
1381 if (status != VCHIQ_SUCCESS) {
1382 vchiq_log_error(vchiq_susp_log_level,
1383 "%s: cmd %s returned error %d for "
1384 "service %c%c%c%c:%03d",
1385 __func__,
1386 (cmd == VCHIQ_IOC_USE_SERVICE) ?
1387 "VCHIQ_IOC_USE_SERVICE" :
1388 "VCHIQ_IOC_RELEASE_SERVICE",
1389 status,
1390 VCHIQ_FOURCC_AS_4CHARS(
1391 service->base.fourcc),
1392 service->client_id);
1393 ret = -EINVAL;
1394 }
1395 } else
1396 ret = -EINVAL;
1397 } break;
1398
1399 case VCHIQ_IOC_QUEUE_MESSAGE: {
1400 struct vchiq_queue_message args;
1401
1402 if (copy_from_user(&args, (const void __user *)arg,
1403 sizeof(args))) {
1404 ret = -EFAULT;
1405 break;
1406 }
1407
1408 service = find_service_for_instance(instance, args.handle);
1409
1410 if (service && (args.count <= MAX_ELEMENTS)) {
1411 /* Copy elements into kernel space */
1412 struct vchiq_element elements[MAX_ELEMENTS];
1413
1414 if (copy_from_user(elements, args.elements,
1415 args.count * sizeof(struct vchiq_element)) == 0)
1416 ret = vchiq_ioc_queue_message(args.handle, elements,
1417 args.count);
1418 else
1419 ret = -EFAULT;
1420 } else {
1421 ret = -EINVAL;
1422 }
1423 } break;
1424
1425 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1426 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1427 struct vchiq_queue_bulk_transfer args;
1428 struct vchiq_queue_bulk_transfer __user *argp;
1429
1430 enum vchiq_bulk_dir dir =
1431 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1432 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1433
1434 argp = (void __user *)arg;
1435 if (copy_from_user(&args, argp, sizeof(args))) {
1436 ret = -EFAULT;
1437 break;
1438 }
1439
1440 ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
1441 dir, &argp->mode);
1442 } break;
1443
1444 case VCHIQ_IOC_AWAIT_COMPLETION: {
1445 struct vchiq_await_completion args;
1446 struct vchiq_await_completion __user *argp;
1447
1448 argp = (void __user *)arg;
1449 if (copy_from_user(&args, argp, sizeof(args))) {
1450 ret = -EFAULT;
1451 break;
1452 }
1453
1454 ret = vchiq_ioc_await_completion(instance, &args,
1455 &argp->msgbufcount);
1456 } break;
1457
1458 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1459 struct vchiq_dequeue_message args;
1460
1461 if (copy_from_user(&args, (const void __user *)arg,
1462 sizeof(args))) {
1463 ret = -EFAULT;
1464 break;
1465 }
1466
1467 ret = vchiq_ioc_dequeue_message(instance, &args);
1468 } break;
1469
1470 case VCHIQ_IOC_GET_CLIENT_ID: {
1471 unsigned int handle = (unsigned int)arg;
1472
1473 ret = vchiq_get_client_id(handle);
1474 } break;
1475
1476 case VCHIQ_IOC_GET_CONFIG: {
1477 struct vchiq_get_config args;
1478 struct vchiq_config config;
1479
1480 if (copy_from_user(&args, (const void __user *)arg,
1481 sizeof(args))) {
1482 ret = -EFAULT;
1483 break;
1484 }
1485 if (args.config_size > sizeof(config)) {
1486 ret = -EINVAL;
1487 break;
1488 }
1489
1490 vchiq_get_config(&config);
1491 if (copy_to_user(args.pconfig, &config, args.config_size)) {
1492 ret = -EFAULT;
1493 break;
1494 }
1495 } break;
1496
1497 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1498 struct vchiq_set_service_option args;
1499
1500 if (copy_from_user(&args, (const void __user *)arg,
1501 sizeof(args))) {
1502 ret = -EFAULT;
1503 break;
1504 }
1505
1506 service = find_service_for_instance(instance, args.handle);
1507 if (!service) {
1508 ret = -EINVAL;
1509 break;
1510 }
1511
1512 status = vchiq_set_service_option(
1513 args.handle, args.option, args.value);
1514 } break;
1515
1516 case VCHIQ_IOC_LIB_VERSION: {
1517 unsigned int lib_version = (unsigned int)arg;
1518
1519 if (lib_version < VCHIQ_VERSION_MIN)
1520 ret = -EINVAL;
1521 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1522 instance->use_close_delivered = 1;
1523 } break;
1524
1525 case VCHIQ_IOC_CLOSE_DELIVERED: {
1526 unsigned int handle = (unsigned int)arg;
1527
1528 service = find_closed_service_for_instance(instance, handle);
1529 if (service) {
1530 struct user_service *user_service =
1531 (struct user_service *)service->base.userdata;
1532 close_delivered(user_service);
1533 } else
1534 ret = -EINVAL;
1535 } break;
1536
1537 default:
1538 ret = -ENOTTY;
1539 break;
1540 }
1541
1542 if (service)
1543 unlock_service(service);
1544
1545 if (ret == 0) {
1546 if (status == VCHIQ_ERROR)
1547 ret = -EIO;
1548 else if (status == VCHIQ_RETRY)
1549 ret = -EINTR;
1550 }
1551
1552 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1553 (ret != -EWOULDBLOCK))
1554 vchiq_log_info(vchiq_arm_log_level,
1555 " ioctl instance %pK, cmd %s -> status %d, %ld",
1556 instance,
1557 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1558 ioctl_names[_IOC_NR(cmd)] :
1559 "<invalid>",
1560 status, ret);
1561 else
1562 vchiq_log_trace(vchiq_arm_log_level,
1563 " ioctl instance %pK, cmd %s -> status %d, %ld",
1564 instance,
1565 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1566 ioctl_names[_IOC_NR(cmd)] :
1567 "<invalid>",
1568 status, ret);
1569
1570 return ret;
1571 }
1572
1573 #if defined(CONFIG_COMPAT)
1574
1575 struct vchiq_service_params32 {
1576 int fourcc;
1577 compat_uptr_t callback;
1578 compat_uptr_t userdata;
1579 short version; /* Increment for non-trivial changes */
1580 short version_min; /* Update for incompatible changes */
1581 };
1582
1583 struct vchiq_create_service32 {
1584 struct vchiq_service_params32 params;
1585 int is_open;
1586 int is_vchi;
1587 unsigned int handle; /* OUT */
1588 };
1589
1590 #define VCHIQ_IOC_CREATE_SERVICE32 \
1591 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1592
1593 static long
vchiq_compat_ioctl_create_service(struct file * file,unsigned int cmd,struct vchiq_create_service32 __user * ptrargs32)1594 vchiq_compat_ioctl_create_service(
1595 struct file *file,
1596 unsigned int cmd,
1597 struct vchiq_create_service32 __user *ptrargs32)
1598 {
1599 struct vchiq_create_service args;
1600 struct vchiq_create_service32 args32;
1601 long ret;
1602
1603 if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1604 return -EFAULT;
1605
1606 args = (struct vchiq_create_service) {
1607 .params = {
1608 .fourcc = args32.params.fourcc,
1609 .callback = compat_ptr(args32.params.callback),
1610 .userdata = compat_ptr(args32.params.userdata),
1611 .version = args32.params.version,
1612 .version_min = args32.params.version_min,
1613 },
1614 .is_open = args32.is_open,
1615 .is_vchi = args32.is_vchi,
1616 .handle = args32.handle,
1617 };
1618
1619 ret = vchiq_ioc_create_service(file->private_data, &args);
1620 if (ret < 0)
1621 return ret;
1622
1623 if (put_user(args.handle, &ptrargs32->handle)) {
1624 vchiq_remove_service(args.handle);
1625 return -EFAULT;
1626 }
1627
1628 return 0;
1629 }
1630
1631 struct vchiq_element32 {
1632 compat_uptr_t data;
1633 unsigned int size;
1634 };
1635
1636 struct vchiq_queue_message32 {
1637 unsigned int handle;
1638 unsigned int count;
1639 compat_uptr_t elements;
1640 };
1641
1642 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1643 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
1644
1645 static long
vchiq_compat_ioctl_queue_message(struct file * file,unsigned int cmd,struct vchiq_queue_message32 __user * arg)1646 vchiq_compat_ioctl_queue_message(struct file *file,
1647 unsigned int cmd,
1648 struct vchiq_queue_message32 __user *arg)
1649 {
1650 struct vchiq_queue_message args;
1651 struct vchiq_queue_message32 args32;
1652 struct vchiq_service *service;
1653 int ret;
1654
1655 if (copy_from_user(&args32, arg, sizeof(args32)))
1656 return -EFAULT;
1657
1658 args = (struct vchiq_queue_message) {
1659 .handle = args32.handle,
1660 .count = args32.count,
1661 .elements = compat_ptr(args32.elements),
1662 };
1663
1664 if (args32.count > MAX_ELEMENTS)
1665 return -EINVAL;
1666
1667 service = find_service_for_instance(file->private_data, args.handle);
1668 if (!service)
1669 return -EINVAL;
1670
1671 if (args32.elements && args32.count) {
1672 struct vchiq_element32 element32[MAX_ELEMENTS];
1673 struct vchiq_element elements[MAX_ELEMENTS];
1674 unsigned int count;
1675
1676 if (copy_from_user(&element32, args.elements,
1677 sizeof(element32))) {
1678 unlock_service(service);
1679 return -EFAULT;
1680 }
1681
1682 for (count = 0; count < args32.count; count++) {
1683 elements[count].data =
1684 compat_ptr(element32[count].data);
1685 elements[count].size = element32[count].size;
1686 }
1687 ret = vchiq_ioc_queue_message(args.handle, elements,
1688 args.count);
1689 } else {
1690 ret = -EINVAL;
1691 }
1692 unlock_service(service);
1693
1694 return ret;
1695 }
1696
1697 struct vchiq_queue_bulk_transfer32 {
1698 unsigned int handle;
1699 compat_uptr_t data;
1700 unsigned int size;
1701 compat_uptr_t userdata;
1702 enum vchiq_bulk_mode mode;
1703 };
1704
1705 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1706 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1707 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1708 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1709
1710 static long
vchiq_compat_ioctl_queue_bulk(struct file * file,unsigned int cmd,struct vchiq_queue_bulk_transfer32 __user * argp)1711 vchiq_compat_ioctl_queue_bulk(struct file *file,
1712 unsigned int cmd,
1713 struct vchiq_queue_bulk_transfer32 __user *argp)
1714 {
1715 struct vchiq_queue_bulk_transfer32 args32;
1716 struct vchiq_queue_bulk_transfer args;
1717 enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
1718 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1719
1720 if (copy_from_user(&args32, argp, sizeof(args32)))
1721 return -EFAULT;
1722
1723 args = (struct vchiq_queue_bulk_transfer) {
1724 .handle = args32.handle,
1725 .data = compat_ptr(args32.data),
1726 .size = args32.size,
1727 .userdata = compat_ptr(args32.userdata),
1728 .mode = args32.mode,
1729 };
1730
1731 return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1732 dir, &argp->mode);
1733 }
1734
1735 struct vchiq_await_completion32 {
1736 unsigned int count;
1737 compat_uptr_t buf;
1738 unsigned int msgbufsize;
1739 unsigned int msgbufcount; /* IN/OUT */
1740 compat_uptr_t msgbufs;
1741 };
1742
1743 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1744 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1745
1746 static long
vchiq_compat_ioctl_await_completion(struct file * file,unsigned int cmd,struct vchiq_await_completion32 __user * argp)1747 vchiq_compat_ioctl_await_completion(struct file *file,
1748 unsigned int cmd,
1749 struct vchiq_await_completion32 __user *argp)
1750 {
1751 struct vchiq_await_completion args;
1752 struct vchiq_await_completion32 args32;
1753
1754 if (copy_from_user(&args32, argp, sizeof(args32)))
1755 return -EFAULT;
1756
1757 args = (struct vchiq_await_completion) {
1758 .count = args32.count,
1759 .buf = compat_ptr(args32.buf),
1760 .msgbufsize = args32.msgbufsize,
1761 .msgbufcount = args32.msgbufcount,
1762 .msgbufs = compat_ptr(args32.msgbufs),
1763 };
1764
1765 return vchiq_ioc_await_completion(file->private_data, &args,
1766 &argp->msgbufcount);
1767 }
1768
1769 struct vchiq_dequeue_message32 {
1770 unsigned int handle;
1771 int blocking;
1772 unsigned int bufsize;
1773 compat_uptr_t buf;
1774 };
1775
1776 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1777 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1778
1779 static long
vchiq_compat_ioctl_dequeue_message(struct file * file,unsigned int cmd,struct vchiq_dequeue_message32 __user * arg)1780 vchiq_compat_ioctl_dequeue_message(struct file *file,
1781 unsigned int cmd,
1782 struct vchiq_dequeue_message32 __user *arg)
1783 {
1784 struct vchiq_dequeue_message32 args32;
1785 struct vchiq_dequeue_message args;
1786
1787 if (copy_from_user(&args32, arg, sizeof(args32)))
1788 return -EFAULT;
1789
1790 args = (struct vchiq_dequeue_message) {
1791 .handle = args32.handle,
1792 .blocking = args32.blocking,
1793 .bufsize = args32.bufsize,
1794 .buf = compat_ptr(args32.buf),
1795 };
1796
1797 return vchiq_ioc_dequeue_message(file->private_data, &args);
1798 }
1799
1800 struct vchiq_get_config32 {
1801 unsigned int config_size;
1802 compat_uptr_t pconfig;
1803 };
1804
1805 #define VCHIQ_IOC_GET_CONFIG32 \
1806 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1807
1808 static long
vchiq_compat_ioctl_get_config(struct file * file,unsigned int cmd,struct vchiq_get_config32 __user * arg)1809 vchiq_compat_ioctl_get_config(struct file *file,
1810 unsigned int cmd,
1811 struct vchiq_get_config32 __user *arg)
1812 {
1813 struct vchiq_get_config32 args32;
1814 struct vchiq_config config;
1815 void __user *ptr;
1816
1817 if (copy_from_user(&args32, arg, sizeof(args32)))
1818 return -EFAULT;
1819 if (args32.config_size > sizeof(config))
1820 return -EINVAL;
1821
1822 vchiq_get_config(&config);
1823 ptr = compat_ptr(args32.pconfig);
1824 if (copy_to_user(ptr, &config, args32.config_size))
1825 return -EFAULT;
1826
1827 return 0;
1828 }
1829
1830 static long
vchiq_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1831 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1832 {
1833 void __user *argp = compat_ptr(arg);
1834 switch (cmd) {
1835 case VCHIQ_IOC_CREATE_SERVICE32:
1836 return vchiq_compat_ioctl_create_service(file, cmd, argp);
1837 case VCHIQ_IOC_QUEUE_MESSAGE32:
1838 return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1839 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1840 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1841 return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1842 case VCHIQ_IOC_AWAIT_COMPLETION32:
1843 return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1844 case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1845 return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1846 case VCHIQ_IOC_GET_CONFIG32:
1847 return vchiq_compat_ioctl_get_config(file, cmd, argp);
1848 default:
1849 return vchiq_ioctl(file, cmd, (unsigned long)argp);
1850 }
1851 }
1852
1853 #endif
1854
vchiq_open(struct inode * inode,struct file * file)1855 static int vchiq_open(struct inode *inode, struct file *file)
1856 {
1857 struct vchiq_state *state = vchiq_get_state();
1858 struct vchiq_instance *instance;
1859
1860 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1861
1862 if (!state) {
1863 vchiq_log_error(vchiq_arm_log_level,
1864 "vchiq has no connection to VideoCore");
1865 return -ENOTCONN;
1866 }
1867
1868 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1869 if (!instance)
1870 return -ENOMEM;
1871
1872 instance->state = state;
1873 instance->pid = current->tgid;
1874
1875 vchiq_debugfs_add_instance(instance);
1876
1877 init_completion(&instance->insert_event);
1878 init_completion(&instance->remove_event);
1879 mutex_init(&instance->completion_mutex);
1880 mutex_init(&instance->bulk_waiter_list_mutex);
1881 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1882
1883 file->private_data = instance;
1884
1885 return 0;
1886 }
1887
vchiq_release(struct inode * inode,struct file * file)1888 static int vchiq_release(struct inode *inode, struct file *file)
1889 {
1890 struct vchiq_instance *instance = file->private_data;
1891 struct vchiq_state *state = vchiq_get_state();
1892 struct vchiq_service *service;
1893 int ret = 0;
1894 int i;
1895
1896 vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1897 (unsigned long)instance);
1898
1899 if (!state) {
1900 ret = -EPERM;
1901 goto out;
1902 }
1903
1904 /* Ensure videocore is awake to allow termination. */
1905 vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1906
1907 mutex_lock(&instance->completion_mutex);
1908
1909 /* Wake the completion thread and ask it to exit */
1910 instance->closing = 1;
1911 complete(&instance->insert_event);
1912
1913 mutex_unlock(&instance->completion_mutex);
1914
1915 /* Wake the slot handler if the completion queue is full. */
1916 complete(&instance->remove_event);
1917
1918 /* Mark all services for termination... */
1919 i = 0;
1920 while ((service = next_service_by_instance(state, instance, &i))) {
1921 struct user_service *user_service = service->base.userdata;
1922
1923 /* Wake the slot handler if the msg queue is full. */
1924 complete(&user_service->remove_event);
1925
1926 vchiq_terminate_service_internal(service);
1927 unlock_service(service);
1928 }
1929
1930 /* ...and wait for them to die */
1931 i = 0;
1932 while ((service = next_service_by_instance(state, instance, &i))) {
1933 struct user_service *user_service = service->base.userdata;
1934
1935 wait_for_completion(&service->remove_event);
1936
1937 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1938
1939 spin_lock(&msg_queue_spinlock);
1940
1941 while (user_service->msg_remove != user_service->msg_insert) {
1942 struct vchiq_header *header;
1943 int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1944
1945 header = user_service->msg_queue[m];
1946 user_service->msg_remove++;
1947 spin_unlock(&msg_queue_spinlock);
1948
1949 if (header)
1950 vchiq_release_message(service->handle, header);
1951 spin_lock(&msg_queue_spinlock);
1952 }
1953
1954 spin_unlock(&msg_queue_spinlock);
1955
1956 unlock_service(service);
1957 }
1958
1959 /* Release any closed services */
1960 while (instance->completion_remove !=
1961 instance->completion_insert) {
1962 struct vchiq_completion_data_kernel *completion;
1963 struct vchiq_service *service;
1964
1965 completion = &instance->completions[
1966 instance->completion_remove & (MAX_COMPLETIONS - 1)];
1967 service = completion->service_userdata;
1968 if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1969 struct user_service *user_service =
1970 service->base.userdata;
1971
1972 /* Wake any blocked user-thread */
1973 if (instance->use_close_delivered)
1974 complete(&user_service->close_event);
1975 unlock_service(service);
1976 }
1977 instance->completion_remove++;
1978 }
1979
1980 /* Release the PEER service count. */
1981 vchiq_release_internal(instance->state, NULL);
1982
1983 {
1984 struct bulk_waiter_node *waiter, *next;
1985
1986 list_for_each_entry_safe(waiter, next,
1987 &instance->bulk_waiter_list, list) {
1988 list_del(&waiter->list);
1989 vchiq_log_info(vchiq_arm_log_level,
1990 "bulk_waiter - cleaned up %pK for pid %d",
1991 waiter, waiter->pid);
1992 kfree(waiter);
1993 }
1994 }
1995
1996 vchiq_debugfs_remove_instance(instance);
1997
1998 kfree(instance);
1999 file->private_data = NULL;
2000
2001 out:
2002 return ret;
2003 }
2004
2005 /****************************************************************************
2006 *
2007 * vchiq_dump
2008 *
2009 ***************************************************************************/
2010
vchiq_dump(void * dump_context,const char * str,int len)2011 int vchiq_dump(void *dump_context, const char *str, int len)
2012 {
2013 struct dump_context *context = (struct dump_context *)dump_context;
2014 int copy_bytes;
2015
2016 if (context->actual >= context->space)
2017 return 0;
2018
2019 if (context->offset > 0) {
2020 int skip_bytes = min_t(int, len, context->offset);
2021
2022 str += skip_bytes;
2023 len -= skip_bytes;
2024 context->offset -= skip_bytes;
2025 if (context->offset > 0)
2026 return 0;
2027 }
2028 copy_bytes = min_t(int, len, context->space - context->actual);
2029 if (copy_bytes == 0)
2030 return 0;
2031 if (copy_to_user(context->buf + context->actual, str,
2032 copy_bytes))
2033 return -EFAULT;
2034 context->actual += copy_bytes;
2035 len -= copy_bytes;
2036
2037 /*
2038 * If the terminating NUL is included in the length, then it
2039 * marks the end of a line and should be replaced with a
2040 * carriage return.
2041 */
2042 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
2043 char cr = '\n';
2044
2045 if (copy_to_user(context->buf + context->actual - 1,
2046 &cr, 1))
2047 return -EFAULT;
2048 }
2049 return 0;
2050 }
2051
2052 /****************************************************************************
2053 *
2054 * vchiq_dump_platform_instance_state
2055 *
2056 ***************************************************************************/
2057
vchiq_dump_platform_instances(void * dump_context)2058 int vchiq_dump_platform_instances(void *dump_context)
2059 {
2060 struct vchiq_state *state = vchiq_get_state();
2061 char buf[80];
2062 int len;
2063 int i;
2064
2065 /* There is no list of instances, so instead scan all services,
2066 marking those that have been dumped. */
2067
2068 rcu_read_lock();
2069 for (i = 0; i < state->unused_service; i++) {
2070 struct vchiq_service *service;
2071 struct vchiq_instance *instance;
2072
2073 service = rcu_dereference(state->services[i]);
2074 if (!service || service->base.callback != service_callback)
2075 continue;
2076
2077 instance = service->instance;
2078 if (instance)
2079 instance->mark = 0;
2080 }
2081 rcu_read_unlock();
2082
2083 for (i = 0; i < state->unused_service; i++) {
2084 struct vchiq_service *service;
2085 struct vchiq_instance *instance;
2086 int err;
2087
2088 rcu_read_lock();
2089 service = rcu_dereference(state->services[i]);
2090 if (!service || service->base.callback != service_callback) {
2091 rcu_read_unlock();
2092 continue;
2093 }
2094
2095 instance = service->instance;
2096 if (!instance || instance->mark) {
2097 rcu_read_unlock();
2098 continue;
2099 }
2100 rcu_read_unlock();
2101
2102 len = snprintf(buf, sizeof(buf),
2103 "Instance %pK: pid %d,%s completions %d/%d",
2104 instance, instance->pid,
2105 instance->connected ? " connected, " :
2106 "",
2107 instance->completion_insert -
2108 instance->completion_remove,
2109 MAX_COMPLETIONS);
2110 err = vchiq_dump(dump_context, buf, len + 1);
2111 if (err)
2112 return err;
2113 instance->mark = 1;
2114 }
2115 return 0;
2116 }
2117
2118 /****************************************************************************
2119 *
2120 * vchiq_dump_platform_service_state
2121 *
2122 ***************************************************************************/
2123
vchiq_dump_platform_service_state(void * dump_context,struct vchiq_service * service)2124 int vchiq_dump_platform_service_state(void *dump_context,
2125 struct vchiq_service *service)
2126 {
2127 struct user_service *user_service =
2128 (struct user_service *)service->base.userdata;
2129 char buf[80];
2130 int len;
2131
2132 len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
2133
2134 if ((service->base.callback == service_callback) &&
2135 user_service->is_vchi) {
2136 len += scnprintf(buf + len, sizeof(buf) - len,
2137 ", %d/%d messages",
2138 user_service->msg_insert - user_service->msg_remove,
2139 MSG_QUEUE_SIZE);
2140
2141 if (user_service->dequeue_pending)
2142 len += scnprintf(buf + len, sizeof(buf) - len,
2143 " (dequeue pending)");
2144 }
2145
2146 return vchiq_dump(dump_context, buf, len + 1);
2147 }
2148
2149 /****************************************************************************
2150 *
2151 * vchiq_read
2152 *
2153 ***************************************************************************/
2154
2155 static ssize_t
vchiq_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2156 vchiq_read(struct file *file, char __user *buf,
2157 size_t count, loff_t *ppos)
2158 {
2159 struct dump_context context;
2160 int err;
2161
2162 context.buf = buf;
2163 context.actual = 0;
2164 context.space = count;
2165 context.offset = *ppos;
2166
2167 err = vchiq_dump_state(&context, &g_state);
2168 if (err)
2169 return err;
2170
2171 *ppos += context.actual;
2172
2173 return context.actual;
2174 }
2175
2176 struct vchiq_state *
vchiq_get_state(void)2177 vchiq_get_state(void)
2178 {
2179
2180 if (!g_state.remote)
2181 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2182 else if (g_state.remote->initialised != 1)
2183 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2184 __func__, g_state.remote->initialised);
2185
2186 return (g_state.remote &&
2187 (g_state.remote->initialised == 1)) ? &g_state : NULL;
2188 }
2189
2190 static const struct file_operations
2191 vchiq_fops = {
2192 .owner = THIS_MODULE,
2193 .unlocked_ioctl = vchiq_ioctl,
2194 #if defined(CONFIG_COMPAT)
2195 .compat_ioctl = vchiq_compat_ioctl,
2196 #endif
2197 .open = vchiq_open,
2198 .release = vchiq_release,
2199 .read = vchiq_read
2200 };
2201
2202 /*
2203 * Autosuspend related functionality
2204 */
2205
2206 static enum vchiq_status
vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)2207 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
2208 struct vchiq_header *header,
2209 unsigned int service_user,
2210 void *bulk_user)
2211 {
2212 vchiq_log_error(vchiq_susp_log_level,
2213 "%s callback reason %d", __func__, reason);
2214 return 0;
2215 }
2216
2217 static int
vchiq_keepalive_thread_func(void * v)2218 vchiq_keepalive_thread_func(void *v)
2219 {
2220 struct vchiq_state *state = (struct vchiq_state *)v;
2221 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2222
2223 enum vchiq_status status;
2224 struct vchiq_instance *instance;
2225 unsigned int ka_handle;
2226
2227 struct vchiq_service_params_kernel params = {
2228 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2229 .callback = vchiq_keepalive_vchiq_callback,
2230 .version = KEEPALIVE_VER,
2231 .version_min = KEEPALIVE_VER_MIN
2232 };
2233
2234 status = vchiq_initialise(&instance);
2235 if (status != VCHIQ_SUCCESS) {
2236 vchiq_log_error(vchiq_susp_log_level,
2237 "%s vchiq_initialise failed %d", __func__, status);
2238 goto exit;
2239 }
2240
2241 status = vchiq_connect(instance);
2242 if (status != VCHIQ_SUCCESS) {
2243 vchiq_log_error(vchiq_susp_log_level,
2244 "%s vchiq_connect failed %d", __func__, status);
2245 goto shutdown;
2246 }
2247
2248 status = vchiq_add_service(instance, ¶ms, &ka_handle);
2249 if (status != VCHIQ_SUCCESS) {
2250 vchiq_log_error(vchiq_susp_log_level,
2251 "%s vchiq_open_service failed %d", __func__, status);
2252 goto shutdown;
2253 }
2254
2255 while (1) {
2256 long rc = 0, uc = 0;
2257
2258 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
2259 vchiq_log_error(vchiq_susp_log_level,
2260 "%s interrupted", __func__);
2261 flush_signals(current);
2262 continue;
2263 }
2264
2265 /* read and clear counters. Do release_count then use_count to
2266 * prevent getting more releases than uses */
2267 rc = atomic_xchg(&arm_state->ka_release_count, 0);
2268 uc = atomic_xchg(&arm_state->ka_use_count, 0);
2269
2270 /* Call use/release service the requisite number of times.
2271 * Process use before release so use counts don't go negative */
2272 while (uc--) {
2273 atomic_inc(&arm_state->ka_use_ack_count);
2274 status = vchiq_use_service(ka_handle);
2275 if (status != VCHIQ_SUCCESS) {
2276 vchiq_log_error(vchiq_susp_log_level,
2277 "%s vchiq_use_service error %d",
2278 __func__, status);
2279 }
2280 }
2281 while (rc--) {
2282 status = vchiq_release_service(ka_handle);
2283 if (status != VCHIQ_SUCCESS) {
2284 vchiq_log_error(vchiq_susp_log_level,
2285 "%s vchiq_release_service error %d",
2286 __func__, status);
2287 }
2288 }
2289 }
2290
2291 shutdown:
2292 vchiq_shutdown(instance);
2293 exit:
2294 return 0;
2295 }
2296
2297 enum vchiq_status
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)2298 vchiq_arm_init_state(struct vchiq_state *state,
2299 struct vchiq_arm_state *arm_state)
2300 {
2301 if (arm_state) {
2302 rwlock_init(&arm_state->susp_res_lock);
2303
2304 init_completion(&arm_state->ka_evt);
2305 atomic_set(&arm_state->ka_use_count, 0);
2306 atomic_set(&arm_state->ka_use_ack_count, 0);
2307 atomic_set(&arm_state->ka_release_count, 0);
2308
2309 arm_state->state = state;
2310 arm_state->first_connect = 0;
2311
2312 }
2313 return VCHIQ_SUCCESS;
2314 }
2315
2316 enum vchiq_status
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)2317 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2318 enum USE_TYPE_E use_type)
2319 {
2320 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2321 enum vchiq_status ret = VCHIQ_SUCCESS;
2322 char entity[16];
2323 int *entity_uc;
2324 int local_uc;
2325
2326 if (!arm_state)
2327 goto out;
2328
2329 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2330
2331 if (use_type == USE_TYPE_VCHIQ) {
2332 sprintf(entity, "VCHIQ: ");
2333 entity_uc = &arm_state->peer_use_count;
2334 } else if (service) {
2335 sprintf(entity, "%c%c%c%c:%03d",
2336 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2337 service->client_id);
2338 entity_uc = &service->service_use_count;
2339 } else {
2340 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2341 "ptr", __func__);
2342 ret = VCHIQ_ERROR;
2343 goto out;
2344 }
2345
2346 write_lock_bh(&arm_state->susp_res_lock);
2347 local_uc = ++arm_state->videocore_use_count;
2348 ++(*entity_uc);
2349
2350 vchiq_log_trace(vchiq_susp_log_level,
2351 "%s %s count %d, state count %d",
2352 __func__, entity, *entity_uc, local_uc);
2353
2354 write_unlock_bh(&arm_state->susp_res_lock);
2355
2356 if (ret == VCHIQ_SUCCESS) {
2357 enum vchiq_status status = VCHIQ_SUCCESS;
2358 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2359
2360 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2361 /* Send the use notify to videocore */
2362 status = vchiq_send_remote_use_active(state);
2363 if (status == VCHIQ_SUCCESS)
2364 ack_cnt--;
2365 else
2366 atomic_add(ack_cnt,
2367 &arm_state->ka_use_ack_count);
2368 }
2369 }
2370
2371 out:
2372 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2373 return ret;
2374 }
2375
2376 enum vchiq_status
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)2377 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
2378 {
2379 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2380 enum vchiq_status ret = VCHIQ_SUCCESS;
2381 char entity[16];
2382 int *entity_uc;
2383
2384 if (!arm_state)
2385 goto out;
2386
2387 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2388
2389 if (service) {
2390 sprintf(entity, "%c%c%c%c:%03d",
2391 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2392 service->client_id);
2393 entity_uc = &service->service_use_count;
2394 } else {
2395 sprintf(entity, "PEER: ");
2396 entity_uc = &arm_state->peer_use_count;
2397 }
2398
2399 write_lock_bh(&arm_state->susp_res_lock);
2400 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2401 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2402 WARN_ON(!arm_state->videocore_use_count);
2403 WARN_ON(!(*entity_uc));
2404 ret = VCHIQ_ERROR;
2405 goto unlock;
2406 }
2407 --arm_state->videocore_use_count;
2408 --(*entity_uc);
2409
2410 vchiq_log_trace(vchiq_susp_log_level,
2411 "%s %s count %d, state count %d",
2412 __func__, entity, *entity_uc,
2413 arm_state->videocore_use_count);
2414
2415 unlock:
2416 write_unlock_bh(&arm_state->susp_res_lock);
2417
2418 out:
2419 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2420 return ret;
2421 }
2422
2423 void
vchiq_on_remote_use(struct vchiq_state * state)2424 vchiq_on_remote_use(struct vchiq_state *state)
2425 {
2426 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2427
2428 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2429 atomic_inc(&arm_state->ka_use_count);
2430 complete(&arm_state->ka_evt);
2431 }
2432
2433 void
vchiq_on_remote_release(struct vchiq_state * state)2434 vchiq_on_remote_release(struct vchiq_state *state)
2435 {
2436 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2437
2438 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2439 atomic_inc(&arm_state->ka_release_count);
2440 complete(&arm_state->ka_evt);
2441 }
2442
2443 enum vchiq_status
vchiq_use_service_internal(struct vchiq_service * service)2444 vchiq_use_service_internal(struct vchiq_service *service)
2445 {
2446 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2447 }
2448
2449 enum vchiq_status
vchiq_release_service_internal(struct vchiq_service * service)2450 vchiq_release_service_internal(struct vchiq_service *service)
2451 {
2452 return vchiq_release_internal(service->state, service);
2453 }
2454
2455 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)2456 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
2457 {
2458 return &instance->debugfs_node;
2459 }
2460
2461 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)2462 vchiq_instance_get_use_count(struct vchiq_instance *instance)
2463 {
2464 struct vchiq_service *service;
2465 int use_count = 0, i;
2466
2467 i = 0;
2468 rcu_read_lock();
2469 while ((service = __next_service_by_instance(instance->state,
2470 instance, &i)))
2471 use_count += service->service_use_count;
2472 rcu_read_unlock();
2473 return use_count;
2474 }
2475
2476 int
vchiq_instance_get_pid(struct vchiq_instance * instance)2477 vchiq_instance_get_pid(struct vchiq_instance *instance)
2478 {
2479 return instance->pid;
2480 }
2481
2482 int
vchiq_instance_get_trace(struct vchiq_instance * instance)2483 vchiq_instance_get_trace(struct vchiq_instance *instance)
2484 {
2485 return instance->trace;
2486 }
2487
2488 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)2489 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
2490 {
2491 struct vchiq_service *service;
2492 int i;
2493
2494 i = 0;
2495 rcu_read_lock();
2496 while ((service = __next_service_by_instance(instance->state,
2497 instance, &i)))
2498 service->trace = trace;
2499 rcu_read_unlock();
2500 instance->trace = (trace != 0);
2501 }
2502
2503 enum vchiq_status
vchiq_use_service(unsigned int handle)2504 vchiq_use_service(unsigned int handle)
2505 {
2506 enum vchiq_status ret = VCHIQ_ERROR;
2507 struct vchiq_service *service = find_service_by_handle(handle);
2508
2509 if (service) {
2510 ret = vchiq_use_internal(service->state, service,
2511 USE_TYPE_SERVICE);
2512 unlock_service(service);
2513 }
2514 return ret;
2515 }
2516 EXPORT_SYMBOL(vchiq_use_service);
2517
2518 enum vchiq_status
vchiq_release_service(unsigned int handle)2519 vchiq_release_service(unsigned int handle)
2520 {
2521 enum vchiq_status ret = VCHIQ_ERROR;
2522 struct vchiq_service *service = find_service_by_handle(handle);
2523
2524 if (service) {
2525 ret = vchiq_release_internal(service->state, service);
2526 unlock_service(service);
2527 }
2528 return ret;
2529 }
2530 EXPORT_SYMBOL(vchiq_release_service);
2531
2532 struct service_data_struct {
2533 int fourcc;
2534 int clientid;
2535 int use_count;
2536 };
2537
2538 void
vchiq_dump_service_use_state(struct vchiq_state * state)2539 vchiq_dump_service_use_state(struct vchiq_state *state)
2540 {
2541 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2542 struct service_data_struct *service_data;
2543 int i, found = 0;
2544 /* If there's more than 64 services, only dump ones with
2545 * non-zero counts */
2546 int only_nonzero = 0;
2547 static const char *nz = "<-- preventing suspend";
2548
2549 int peer_count;
2550 int vc_use_count;
2551 int active_services;
2552
2553 if (!arm_state)
2554 return;
2555
2556 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
2557 GFP_KERNEL);
2558 if (!service_data)
2559 return;
2560
2561 read_lock_bh(&arm_state->susp_res_lock);
2562 peer_count = arm_state->peer_use_count;
2563 vc_use_count = arm_state->videocore_use_count;
2564 active_services = state->unused_service;
2565 if (active_services > MAX_SERVICES)
2566 only_nonzero = 1;
2567
2568 rcu_read_lock();
2569 for (i = 0; i < active_services; i++) {
2570 struct vchiq_service *service_ptr =
2571 rcu_dereference(state->services[i]);
2572
2573 if (!service_ptr)
2574 continue;
2575
2576 if (only_nonzero && !service_ptr->service_use_count)
2577 continue;
2578
2579 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
2580 continue;
2581
2582 service_data[found].fourcc = service_ptr->base.fourcc;
2583 service_data[found].clientid = service_ptr->client_id;
2584 service_data[found].use_count = service_ptr->service_use_count;
2585 found++;
2586 if (found >= MAX_SERVICES)
2587 break;
2588 }
2589 rcu_read_unlock();
2590
2591 read_unlock_bh(&arm_state->susp_res_lock);
2592
2593 if (only_nonzero)
2594 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2595 "services (%d). Only dumping up to first %d services "
2596 "with non-zero use-count", active_services, found);
2597
2598 for (i = 0; i < found; i++) {
2599 vchiq_log_warning(vchiq_susp_log_level,
2600 "----- %c%c%c%c:%d service count %d %s",
2601 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2602 service_data[i].clientid,
2603 service_data[i].use_count,
2604 service_data[i].use_count ? nz : "");
2605 }
2606 vchiq_log_warning(vchiq_susp_log_level,
2607 "----- VCHIQ use count count %d", peer_count);
2608 vchiq_log_warning(vchiq_susp_log_level,
2609 "--- Overall vchiq instance use count %d", vc_use_count);
2610
2611 kfree(service_data);
2612 }
2613
2614 enum vchiq_status
vchiq_check_service(struct vchiq_service * service)2615 vchiq_check_service(struct vchiq_service *service)
2616 {
2617 struct vchiq_arm_state *arm_state;
2618 enum vchiq_status ret = VCHIQ_ERROR;
2619
2620 if (!service || !service->state)
2621 goto out;
2622
2623 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2624
2625 arm_state = vchiq_platform_get_arm_state(service->state);
2626
2627 read_lock_bh(&arm_state->susp_res_lock);
2628 if (service->service_use_count)
2629 ret = VCHIQ_SUCCESS;
2630 read_unlock_bh(&arm_state->susp_res_lock);
2631
2632 if (ret == VCHIQ_ERROR) {
2633 vchiq_log_error(vchiq_susp_log_level,
2634 "%s ERROR - %c%c%c%c:%d service count %d, "
2635 "state count %d", __func__,
2636 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2637 service->client_id, service->service_use_count,
2638 arm_state->videocore_use_count);
2639 vchiq_dump_service_use_state(service->state);
2640 }
2641 out:
2642 return ret;
2643 }
2644
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)2645 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
2646 enum vchiq_connstate oldstate,
2647 enum vchiq_connstate newstate)
2648 {
2649 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2650 char threadname[16];
2651
2652 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2653 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2654 if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
2655 return;
2656
2657 write_lock_bh(&arm_state->susp_res_lock);
2658 if (arm_state->first_connect) {
2659 write_unlock_bh(&arm_state->susp_res_lock);
2660 return;
2661 }
2662
2663 arm_state->first_connect = 1;
2664 write_unlock_bh(&arm_state->susp_res_lock);
2665 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
2666 state->id);
2667 arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
2668 (void *)state,
2669 threadname);
2670 if (IS_ERR(arm_state->ka_thread)) {
2671 vchiq_log_error(vchiq_susp_log_level,
2672 "vchiq: FATAL: couldn't create thread %s",
2673 threadname);
2674 } else {
2675 wake_up_process(arm_state->ka_thread);
2676 }
2677 }
2678
2679 static const struct of_device_id vchiq_of_match[] = {
2680 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
2681 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
2682 {},
2683 };
2684 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2685
2686 static struct platform_device *
vchiq_register_child(struct platform_device * pdev,const char * name)2687 vchiq_register_child(struct platform_device *pdev, const char *name)
2688 {
2689 struct platform_device_info pdevinfo;
2690 struct platform_device *child;
2691
2692 memset(&pdevinfo, 0, sizeof(pdevinfo));
2693
2694 pdevinfo.parent = &pdev->dev;
2695 pdevinfo.name = name;
2696 pdevinfo.id = PLATFORM_DEVID_NONE;
2697 pdevinfo.dma_mask = DMA_BIT_MASK(32);
2698
2699 child = platform_device_register_full(&pdevinfo);
2700 if (IS_ERR(child)) {
2701 dev_warn(&pdev->dev, "%s not registered\n", name);
2702 child = NULL;
2703 }
2704
2705 return child;
2706 }
2707
vchiq_probe(struct platform_device * pdev)2708 static int vchiq_probe(struct platform_device *pdev)
2709 {
2710 struct device_node *fw_node;
2711 const struct of_device_id *of_id;
2712 struct vchiq_drvdata *drvdata;
2713 struct device *vchiq_dev;
2714 int err;
2715
2716 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
2717 drvdata = (struct vchiq_drvdata *)of_id->data;
2718 if (!drvdata)
2719 return -EINVAL;
2720
2721 fw_node = of_find_compatible_node(NULL, NULL,
2722 "raspberrypi,bcm2835-firmware");
2723 if (!fw_node) {
2724 dev_err(&pdev->dev, "Missing firmware node\n");
2725 return -ENOENT;
2726 }
2727
2728 drvdata->fw = rpi_firmware_get(fw_node);
2729 of_node_put(fw_node);
2730 if (!drvdata->fw)
2731 return -EPROBE_DEFER;
2732
2733 platform_set_drvdata(pdev, drvdata);
2734
2735 err = vchiq_platform_init(pdev, &g_state);
2736 if (err)
2737 goto failed_platform_init;
2738
2739 cdev_init(&vchiq_cdev, &vchiq_fops);
2740 vchiq_cdev.owner = THIS_MODULE;
2741 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2742 if (err) {
2743 vchiq_log_error(vchiq_arm_log_level,
2744 "Unable to register device");
2745 goto failed_platform_init;
2746 }
2747
2748 vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
2749 "vchiq");
2750 if (IS_ERR(vchiq_dev)) {
2751 err = PTR_ERR(vchiq_dev);
2752 goto failed_device_create;
2753 }
2754
2755 vchiq_debugfs_init();
2756
2757 vchiq_log_info(vchiq_arm_log_level,
2758 "vchiq: initialised - version %d (min %d), device %d.%d",
2759 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2760 MAJOR(vchiq_devid), MINOR(vchiq_devid));
2761
2762 bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
2763 bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
2764
2765 return 0;
2766
2767 failed_device_create:
2768 cdev_del(&vchiq_cdev);
2769 failed_platform_init:
2770 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2771 return err;
2772 }
2773
vchiq_remove(struct platform_device * pdev)2774 static int vchiq_remove(struct platform_device *pdev)
2775 {
2776 platform_device_unregister(bcm2835_audio);
2777 platform_device_unregister(bcm2835_camera);
2778 vchiq_debugfs_deinit();
2779 device_destroy(vchiq_class, vchiq_devid);
2780 cdev_del(&vchiq_cdev);
2781
2782 return 0;
2783 }
2784
2785 static struct platform_driver vchiq_driver = {
2786 .driver = {
2787 .name = "bcm2835_vchiq",
2788 .of_match_table = vchiq_of_match,
2789 },
2790 .probe = vchiq_probe,
2791 .remove = vchiq_remove,
2792 };
2793
vchiq_driver_init(void)2794 static int __init vchiq_driver_init(void)
2795 {
2796 int ret;
2797
2798 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2799 if (IS_ERR(vchiq_class)) {
2800 pr_err("Failed to create vchiq class\n");
2801 return PTR_ERR(vchiq_class);
2802 }
2803
2804 ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
2805 if (ret) {
2806 pr_err("Failed to allocate vchiq's chrdev region\n");
2807 goto class_destroy;
2808 }
2809
2810 ret = platform_driver_register(&vchiq_driver);
2811 if (ret) {
2812 pr_err("Failed to register vchiq driver\n");
2813 goto region_unregister;
2814 }
2815
2816 return 0;
2817
2818 region_unregister:
2819 unregister_chrdev_region(vchiq_devid, 1);
2820
2821 class_destroy:
2822 class_destroy(vchiq_class);
2823
2824 return ret;
2825 }
2826 module_init(vchiq_driver_init);
2827
vchiq_driver_exit(void)2828 static void __exit vchiq_driver_exit(void)
2829 {
2830 platform_driver_unregister(&vchiq_driver);
2831 unregister_chrdev_region(vchiq_devid, 1);
2832 class_destroy(vchiq_class);
2833 }
2834 module_exit(vchiq_driver_exit);
2835
2836 MODULE_LICENSE("Dual BSD/GPL");
2837 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2838 MODULE_AUTHOR("Broadcom Corporation");
2839