• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_debugfs.h"
37 #include "vchiq_connected.h"
38 #include "vchiq_pagelist.h"
39 
40 #define DEVICE_NAME "vchiq"
41 
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
43 
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
45 
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
48 
49 #define BELL0	0x00
50 #define BELL2	0x08
51 
52 #define ARM_DS_ACTIVE	BIT(2)
53 
54 /* Override the default prefix, which would be vchiq_arm (from the filename) */
55 #undef MODULE_PARAM_PREFIX
56 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
57 
58 #define KEEPALIVE_VER 1
59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
60 
61 /* Run time control of log level, based on KERN_XXX level. */
62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
64 
65 DEFINE_SPINLOCK(msg_queue_spinlock);
66 struct vchiq_state g_state;
67 
68 static struct platform_device *bcm2835_camera;
69 static struct platform_device *bcm2835_audio;
70 
71 struct vchiq_drvdata {
72 	const unsigned int cache_line_size;
73 	struct rpi_firmware *fw;
74 };
75 
76 static struct vchiq_drvdata bcm2835_drvdata = {
77 	.cache_line_size = 32,
78 };
79 
80 static struct vchiq_drvdata bcm2836_drvdata = {
81 	.cache_line_size = 64,
82 };
83 
84 struct vchiq_arm_state {
85 	/* Keepalive-related data */
86 	struct task_struct *ka_thread;
87 	struct completion ka_evt;
88 	atomic_t ka_use_count;
89 	atomic_t ka_use_ack_count;
90 	atomic_t ka_release_count;
91 
92 	rwlock_t susp_res_lock;
93 
94 	struct vchiq_state *state;
95 
96 	/*
97 	 * Global use count for videocore.
98 	 * This is equal to the sum of the use counts for all services.  When
99 	 * this hits zero the videocore suspend procedure will be initiated.
100 	 */
101 	int videocore_use_count;
102 
103 	/*
104 	 * Use count to track requests from videocore peer.
105 	 * This use count is not associated with a service, so needs to be
106 	 * tracked separately with the state.
107 	 */
108 	int peer_use_count;
109 
110 	/*
111 	 * Flag to indicate that the first vchiq connect has made it through.
112 	 * This means that both sides should be fully ready, and we should
113 	 * be able to suspend after this point.
114 	 */
115 	int first_connect;
116 };
117 
118 struct vchiq_2835_state {
119 	int inited;
120 	struct vchiq_arm_state arm_state;
121 };
122 
123 struct vchiq_pagelist_info {
124 	struct pagelist *pagelist;
125 	size_t pagelist_buffer_size;
126 	dma_addr_t dma_addr;
127 	enum dma_data_direction dma_dir;
128 	unsigned int num_pages;
129 	unsigned int pages_need_release;
130 	struct page **pages;
131 	struct scatterlist *scatterlist;
132 	unsigned int scatterlist_mapped;
133 };
134 
135 static void __iomem *g_regs;
136 /* This value is the size of the L2 cache lines as understood by the
137  * VPU firmware, which determines the required alignment of the
138  * offsets/sizes in pagelists.
139  *
140  * Modern VPU firmware looks for a DT "cache-line-size" property in
141  * the VCHIQ node and will overwrite it with the actual L2 cache size,
142  * which the kernel must then respect.  That property was rejected
143  * upstream, so we have to use the VPU firmware's compatibility value
144  * of 32.
145  */
146 static unsigned int g_cache_line_size = 32;
147 static unsigned int g_fragments_size;
148 static char *g_fragments_base;
149 static char *g_free_fragments;
150 static struct semaphore g_free_fragments_sema;
151 
152 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
153 
154 static enum vchiq_status
155 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
156 			     unsigned int size, enum vchiq_bulk_dir dir);
157 
158 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)159 vchiq_doorbell_irq(int irq, void *dev_id)
160 {
161 	struct vchiq_state *state = dev_id;
162 	irqreturn_t ret = IRQ_NONE;
163 	unsigned int status;
164 
165 	/* Read (and clear) the doorbell */
166 	status = readl(g_regs + BELL0);
167 
168 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
169 		remote_event_pollall(state);
170 		ret = IRQ_HANDLED;
171 	}
172 
173 	return ret;
174 }
175 
176 static void
cleanup_pagelistinfo(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo)177 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
178 {
179 	if (pagelistinfo->scatterlist_mapped) {
180 		dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
181 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
182 	}
183 
184 	if (pagelistinfo->pages_need_release)
185 		unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
186 
187 	dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
188 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
189 }
190 
191 static inline bool
is_adjacent_block(u32 * addrs,u32 addr,unsigned int k)192 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
193 {
194 	u32 tmp;
195 
196 	if (!k)
197 		return false;
198 
199 	tmp = (addrs[k - 1] & PAGE_MASK) +
200 	      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
201 
202 	return tmp == (addr & PAGE_MASK);
203 }
204 
205 /* There is a potential problem with partial cache lines (pages?)
206  * at the ends of the block when reading. If the CPU accessed anything in
207  * the same line (page?) then it may have pulled old data into the cache,
208  * obscuring the new data underneath. We can solve this by transferring the
209  * partial cache lines separately, and allowing the ARM to copy into the
210  * cached area.
211  */
212 
213 static struct vchiq_pagelist_info *
create_pagelist(struct vchiq_instance * instance,char * buf,char __user * ubuf,size_t count,unsigned short type)214 create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
215 		size_t count, unsigned short type)
216 {
217 	struct pagelist *pagelist;
218 	struct vchiq_pagelist_info *pagelistinfo;
219 	struct page **pages;
220 	u32 *addrs;
221 	unsigned int num_pages, offset, i, k;
222 	int actual_pages;
223 	size_t pagelist_size;
224 	struct scatterlist *scatterlist, *sg;
225 	int dma_buffers;
226 	dma_addr_t dma_addr;
227 
228 	if (count >= INT_MAX - PAGE_SIZE)
229 		return NULL;
230 
231 	if (buf)
232 		offset = (uintptr_t)buf & (PAGE_SIZE - 1);
233 	else
234 		offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
235 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
236 
237 	if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
238 			 sizeof(struct vchiq_pagelist_info)) /
239 			(sizeof(u32) + sizeof(pages[0]) +
240 			 sizeof(struct scatterlist)))
241 		return NULL;
242 
243 	pagelist_size = sizeof(struct pagelist) +
244 			(num_pages * sizeof(u32)) +
245 			(num_pages * sizeof(pages[0]) +
246 			(num_pages * sizeof(struct scatterlist))) +
247 			sizeof(struct vchiq_pagelist_info);
248 
249 	/* Allocate enough storage to hold the page pointers and the page
250 	 * list
251 	 */
252 	pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
253 				      GFP_KERNEL);
254 
255 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
256 
257 	if (!pagelist)
258 		return NULL;
259 
260 	addrs		= pagelist->addrs;
261 	pages		= (struct page **)(addrs + num_pages);
262 	scatterlist	= (struct scatterlist *)(pages + num_pages);
263 	pagelistinfo	= (struct vchiq_pagelist_info *)
264 			  (scatterlist + num_pages);
265 
266 	pagelist->length = count;
267 	pagelist->type = type;
268 	pagelist->offset = offset;
269 
270 	/* Populate the fields of the pagelistinfo structure */
271 	pagelistinfo->pagelist = pagelist;
272 	pagelistinfo->pagelist_buffer_size = pagelist_size;
273 	pagelistinfo->dma_addr = dma_addr;
274 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
275 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
276 	pagelistinfo->num_pages = num_pages;
277 	pagelistinfo->pages_need_release = 0;
278 	pagelistinfo->pages = pages;
279 	pagelistinfo->scatterlist = scatterlist;
280 	pagelistinfo->scatterlist_mapped = 0;
281 
282 	if (buf) {
283 		unsigned long length = count;
284 		unsigned int off = offset;
285 
286 		for (actual_pages = 0; actual_pages < num_pages;
287 		     actual_pages++) {
288 			struct page *pg =
289 				vmalloc_to_page((buf +
290 						 (actual_pages * PAGE_SIZE)));
291 			size_t bytes = PAGE_SIZE - off;
292 
293 			if (!pg) {
294 				cleanup_pagelistinfo(instance, pagelistinfo);
295 				return NULL;
296 			}
297 
298 			if (bytes > length)
299 				bytes = length;
300 			pages[actual_pages] = pg;
301 			length -= bytes;
302 			off = 0;
303 		}
304 		/* do not try and release vmalloc pages */
305 	} else {
306 		actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
307 						   type == PAGELIST_READ, pages);
308 
309 		if (actual_pages != num_pages) {
310 			vchiq_log_info(vchiq_arm_log_level,
311 				       "%s - only %d/%d pages locked",
312 				       __func__, actual_pages, num_pages);
313 
314 			/* This is probably due to the process being killed */
315 			if (actual_pages > 0)
316 				unpin_user_pages(pages, actual_pages);
317 			cleanup_pagelistinfo(instance, pagelistinfo);
318 			return NULL;
319 		}
320 		 /* release user pages */
321 		pagelistinfo->pages_need_release = 1;
322 	}
323 
324 	/*
325 	 * Initialize the scatterlist so that the magic cookie
326 	 *  is filled if debugging is enabled
327 	 */
328 	sg_init_table(scatterlist, num_pages);
329 	/* Now set the pages for each scatterlist */
330 	for (i = 0; i < num_pages; i++)	{
331 		unsigned int len = PAGE_SIZE - offset;
332 
333 		if (len > count)
334 			len = count;
335 		sg_set_page(scatterlist + i, pages[i], len, offset);
336 		offset = 0;
337 		count -= len;
338 	}
339 
340 	dma_buffers = dma_map_sg(instance->state->dev,
341 				 scatterlist,
342 				 num_pages,
343 				 pagelistinfo->dma_dir);
344 
345 	if (dma_buffers == 0) {
346 		cleanup_pagelistinfo(instance, pagelistinfo);
347 		return NULL;
348 	}
349 
350 	pagelistinfo->scatterlist_mapped = 1;
351 
352 	/* Combine adjacent blocks for performance */
353 	k = 0;
354 	for_each_sg(scatterlist, sg, dma_buffers, i) {
355 		u32 len = sg_dma_len(sg);
356 		u32 addr = sg_dma_address(sg);
357 
358 		/* Note: addrs is the address + page_count - 1
359 		 * The firmware expects blocks after the first to be page-
360 		 * aligned and a multiple of the page size
361 		 */
362 		WARN_ON(len == 0);
363 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
364 		WARN_ON(i && (addr & ~PAGE_MASK));
365 		if (is_adjacent_block(addrs, addr, k))
366 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
367 		else
368 			addrs[k++] = (addr & PAGE_MASK) |
369 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
370 	}
371 
372 	/* Partial cache lines (fragments) require special measures */
373 	if ((type == PAGELIST_READ) &&
374 	    ((pagelist->offset & (g_cache_line_size - 1)) ||
375 	    ((pagelist->offset + pagelist->length) &
376 	    (g_cache_line_size - 1)))) {
377 		char *fragments;
378 
379 		if (down_interruptible(&g_free_fragments_sema)) {
380 			cleanup_pagelistinfo(instance, pagelistinfo);
381 			return NULL;
382 		}
383 
384 		WARN_ON(!g_free_fragments);
385 
386 		down(&g_free_fragments_mutex);
387 		fragments = g_free_fragments;
388 		WARN_ON(!fragments);
389 		g_free_fragments = *(char **)g_free_fragments;
390 		up(&g_free_fragments_mutex);
391 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
392 			(fragments - g_fragments_base) / g_fragments_size;
393 	}
394 
395 	return pagelistinfo;
396 }
397 
398 static void
free_pagelist(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo,int actual)399 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
400 	      int actual)
401 {
402 	struct pagelist *pagelist = pagelistinfo->pagelist;
403 	struct page **pages = pagelistinfo->pages;
404 	unsigned int num_pages = pagelistinfo->num_pages;
405 
406 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
407 			__func__, pagelistinfo->pagelist, actual);
408 
409 	/*
410 	 * NOTE: dma_unmap_sg must be called before the
411 	 * cpu can touch any of the data/pages.
412 	 */
413 	dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
414 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
415 	pagelistinfo->scatterlist_mapped = 0;
416 
417 	/* Deal with any partial cache lines (fragments) */
418 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
419 		char *fragments = g_fragments_base +
420 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
421 			g_fragments_size;
422 		int head_bytes, tail_bytes;
423 
424 		head_bytes = (g_cache_line_size - pagelist->offset) &
425 			(g_cache_line_size - 1);
426 		tail_bytes = (pagelist->offset + actual) &
427 			(g_cache_line_size - 1);
428 
429 		if ((actual >= 0) && (head_bytes != 0)) {
430 			if (head_bytes > actual)
431 				head_bytes = actual;
432 
433 			memcpy_to_page(pages[0],
434 				pagelist->offset,
435 				fragments,
436 				head_bytes);
437 		}
438 		if ((actual >= 0) && (head_bytes < actual) &&
439 		    (tail_bytes != 0))
440 			memcpy_to_page(pages[num_pages - 1],
441 				(pagelist->offset + actual) &
442 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
443 				fragments + g_cache_line_size,
444 				tail_bytes);
445 
446 		down(&g_free_fragments_mutex);
447 		*(char **)fragments = g_free_fragments;
448 		g_free_fragments = fragments;
449 		up(&g_free_fragments_mutex);
450 		up(&g_free_fragments_sema);
451 	}
452 
453 	/* Need to mark all the pages dirty. */
454 	if (pagelist->type != PAGELIST_WRITE &&
455 	    pagelistinfo->pages_need_release) {
456 		unsigned int i;
457 
458 		for (i = 0; i < num_pages; i++)
459 			set_page_dirty(pages[i]);
460 	}
461 
462 	cleanup_pagelistinfo(instance, pagelistinfo);
463 }
464 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)465 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
466 {
467 	struct device *dev = &pdev->dev;
468 	struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
469 	struct rpi_firmware *fw = drvdata->fw;
470 	struct vchiq_slot_zero *vchiq_slot_zero;
471 	void *slot_mem;
472 	dma_addr_t slot_phys;
473 	u32 channelbase;
474 	int slot_mem_size, frag_mem_size;
475 	int err, irq, i;
476 
477 	/*
478 	 * VCHI messages between the CPU and firmware use
479 	 * 32-bit bus addresses.
480 	 */
481 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
482 
483 	if (err < 0)
484 		return err;
485 
486 	g_cache_line_size = drvdata->cache_line_size;
487 	g_fragments_size = 2 * g_cache_line_size;
488 
489 	/* Allocate space for the channels in coherent memory */
490 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
491 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
492 
493 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
494 				       &slot_phys, GFP_KERNEL);
495 	if (!slot_mem) {
496 		dev_err(dev, "could not allocate DMA memory\n");
497 		return -ENOMEM;
498 	}
499 
500 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
501 
502 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
503 	if (!vchiq_slot_zero)
504 		return -EINVAL;
505 
506 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
507 		(int)slot_phys + slot_mem_size;
508 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
509 		MAX_FRAGMENTS;
510 
511 	g_fragments_base = (char *)slot_mem + slot_mem_size;
512 
513 	g_free_fragments = g_fragments_base;
514 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
515 		*(char **)&g_fragments_base[i * g_fragments_size] =
516 			&g_fragments_base[(i + 1) * g_fragments_size];
517 	}
518 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
519 	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
520 
521 	err = vchiq_init_state(state, vchiq_slot_zero, dev);
522 	if (err)
523 		return err;
524 
525 	g_regs = devm_platform_ioremap_resource(pdev, 0);
526 	if (IS_ERR(g_regs))
527 		return PTR_ERR(g_regs);
528 
529 	irq = platform_get_irq(pdev, 0);
530 	if (irq <= 0)
531 		return irq;
532 
533 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
534 			       "VCHIQ doorbell", state);
535 	if (err) {
536 		dev_err(dev, "failed to register irq=%d\n", irq);
537 		return err;
538 	}
539 
540 	/* Send the base address of the slots to VideoCore */
541 	channelbase = slot_phys;
542 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
543 				    &channelbase, sizeof(channelbase));
544 	if (err || channelbase) {
545 		dev_err(dev, "failed to set channelbase\n");
546 		return err ? : -ENXIO;
547 	}
548 
549 	vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
550 		       vchiq_slot_zero, &slot_phys);
551 
552 	vchiq_call_connected_callbacks();
553 
554 	return 0;
555 }
556 
557 static void
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)558 vchiq_arm_init_state(struct vchiq_state *state,
559 		     struct vchiq_arm_state *arm_state)
560 {
561 	if (arm_state) {
562 		rwlock_init(&arm_state->susp_res_lock);
563 
564 		init_completion(&arm_state->ka_evt);
565 		atomic_set(&arm_state->ka_use_count, 0);
566 		atomic_set(&arm_state->ka_use_ack_count, 0);
567 		atomic_set(&arm_state->ka_release_count, 0);
568 
569 		arm_state->state = state;
570 		arm_state->first_connect = 0;
571 	}
572 }
573 
574 int
vchiq_platform_init_state(struct vchiq_state * state)575 vchiq_platform_init_state(struct vchiq_state *state)
576 {
577 	struct vchiq_2835_state *platform_state;
578 
579 	state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
580 	if (!state->platform_state)
581 		return -ENOMEM;
582 
583 	platform_state = (struct vchiq_2835_state *)state->platform_state;
584 
585 	platform_state->inited = 1;
586 	vchiq_arm_init_state(state, &platform_state->arm_state);
587 
588 	return 0;
589 }
590 
vchiq_platform_get_arm_state(struct vchiq_state * state)591 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
592 {
593 	struct vchiq_2835_state *platform_state;
594 
595 	platform_state   = (struct vchiq_2835_state *)state->platform_state;
596 
597 	WARN_ON_ONCE(!platform_state->inited);
598 
599 	return &platform_state->arm_state;
600 }
601 
602 void
remote_event_signal(struct remote_event * event)603 remote_event_signal(struct remote_event *event)
604 {
605 	/*
606 	 * Ensure that all writes to shared data structures have completed
607 	 * before signalling the peer.
608 	 */
609 	wmb();
610 
611 	event->fired = 1;
612 
613 	dsb(sy);         /* data barrier operation */
614 
615 	if (event->armed)
616 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
617 }
618 
619 int
vchiq_prepare_bulk_data(struct vchiq_instance * instance,struct vchiq_bulk * bulk,void * offset,void __user * uoffset,int size,int dir)620 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
621 			void __user *uoffset, int size, int dir)
622 {
623 	struct vchiq_pagelist_info *pagelistinfo;
624 
625 	pagelistinfo = create_pagelist(instance, offset, uoffset, size,
626 				       (dir == VCHIQ_BULK_RECEIVE)
627 				       ? PAGELIST_READ
628 				       : PAGELIST_WRITE);
629 
630 	if (!pagelistinfo)
631 		return -ENOMEM;
632 
633 	bulk->data = pagelistinfo->dma_addr;
634 
635 	/*
636 	 * Store the pagelistinfo address in remote_data,
637 	 * which isn't used by the slave.
638 	 */
639 	bulk->remote_data = pagelistinfo;
640 
641 	return 0;
642 }
643 
644 void
vchiq_complete_bulk(struct vchiq_instance * instance,struct vchiq_bulk * bulk)645 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
646 {
647 	if (bulk && bulk->remote_data && bulk->actual)
648 		free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
649 			      bulk->actual);
650 }
651 
vchiq_dump_platform_state(void * dump_context)652 int vchiq_dump_platform_state(void *dump_context)
653 {
654 	char buf[80];
655 	int len;
656 
657 	len = snprintf(buf, sizeof(buf), "  Platform: 2835 (VC master)");
658 	return vchiq_dump(dump_context, buf, len + 1);
659 }
660 
661 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_instance ** instance_out)662 int vchiq_initialise(struct vchiq_instance **instance_out)
663 {
664 	struct vchiq_state *state;
665 	struct vchiq_instance *instance = NULL;
666 	int i, ret;
667 
668 	/*
669 	 * VideoCore may not be ready due to boot up timing.
670 	 * It may never be ready if kernel and firmware are mismatched,so don't
671 	 * block forever.
672 	 */
673 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
674 		state = vchiq_get_state();
675 		if (state)
676 			break;
677 		usleep_range(500, 600);
678 	}
679 	if (i == VCHIQ_INIT_RETRIES) {
680 		vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
681 		ret = -ENOTCONN;
682 		goto failed;
683 	} else if (i > 0) {
684 		vchiq_log_warning(vchiq_core_log_level,
685 				  "%s: videocore initialized after %d retries\n", __func__, i);
686 	}
687 
688 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
689 	if (!instance) {
690 		vchiq_log_error(vchiq_core_log_level,
691 				"%s: error allocating vchiq instance\n", __func__);
692 		ret = -ENOMEM;
693 		goto failed;
694 	}
695 
696 	instance->connected = 0;
697 	instance->state = state;
698 	mutex_init(&instance->bulk_waiter_list_mutex);
699 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
700 
701 	*instance_out = instance;
702 
703 	ret = 0;
704 
705 failed:
706 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
707 
708 	return ret;
709 }
710 EXPORT_SYMBOL(vchiq_initialise);
711 
free_bulk_waiter(struct vchiq_instance * instance)712 void free_bulk_waiter(struct vchiq_instance *instance)
713 {
714 	struct bulk_waiter_node *waiter, *next;
715 
716 	list_for_each_entry_safe(waiter, next,
717 				 &instance->bulk_waiter_list, list) {
718 		list_del(&waiter->list);
719 		vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
720 			       waiter, waiter->pid);
721 		kfree(waiter);
722 	}
723 }
724 
vchiq_shutdown(struct vchiq_instance * instance)725 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
726 {
727 	enum vchiq_status status = VCHIQ_SUCCESS;
728 	struct vchiq_state *state = instance->state;
729 
730 	if (mutex_lock_killable(&state->mutex))
731 		return VCHIQ_RETRY;
732 
733 	/* Remove all services */
734 	vchiq_shutdown_internal(state, instance);
735 
736 	mutex_unlock(&state->mutex);
737 
738 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
739 
740 	free_bulk_waiter(instance);
741 	kfree(instance);
742 
743 	return status;
744 }
745 EXPORT_SYMBOL(vchiq_shutdown);
746 
vchiq_is_connected(struct vchiq_instance * instance)747 static int vchiq_is_connected(struct vchiq_instance *instance)
748 {
749 	return instance->connected;
750 }
751 
vchiq_connect(struct vchiq_instance * instance)752 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
753 {
754 	enum vchiq_status status;
755 	struct vchiq_state *state = instance->state;
756 
757 	if (mutex_lock_killable(&state->mutex)) {
758 		vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
759 		status = VCHIQ_RETRY;
760 		goto failed;
761 	}
762 	status = vchiq_connect_internal(state, instance);
763 
764 	if (status == VCHIQ_SUCCESS)
765 		instance->connected = 1;
766 
767 	mutex_unlock(&state->mutex);
768 
769 failed:
770 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
771 
772 	return status;
773 }
774 EXPORT_SYMBOL(vchiq_connect);
775 
776 static enum vchiq_status
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)777 vchiq_add_service(struct vchiq_instance *instance,
778 		  const struct vchiq_service_params_kernel *params,
779 		  unsigned int *phandle)
780 {
781 	enum vchiq_status status;
782 	struct vchiq_state *state = instance->state;
783 	struct vchiq_service *service = NULL;
784 	int srvstate;
785 
786 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
787 
788 	srvstate = vchiq_is_connected(instance)
789 		? VCHIQ_SRVSTATE_LISTENING
790 		: VCHIQ_SRVSTATE_HIDDEN;
791 
792 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
793 
794 	if (service) {
795 		*phandle = service->handle;
796 		status = VCHIQ_SUCCESS;
797 	} else {
798 		status = VCHIQ_ERROR;
799 	}
800 
801 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
802 
803 	return status;
804 }
805 
806 enum vchiq_status
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)807 vchiq_open_service(struct vchiq_instance *instance,
808 		   const struct vchiq_service_params_kernel *params,
809 		   unsigned int *phandle)
810 {
811 	enum vchiq_status   status = VCHIQ_ERROR;
812 	struct vchiq_state   *state = instance->state;
813 	struct vchiq_service *service = NULL;
814 
815 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
816 
817 	if (!vchiq_is_connected(instance))
818 		goto failed;
819 
820 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
821 
822 	if (service) {
823 		*phandle = service->handle;
824 		status = vchiq_open_service_internal(service, current->pid);
825 		if (status != VCHIQ_SUCCESS) {
826 			vchiq_remove_service(instance, service->handle);
827 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
828 		}
829 	}
830 
831 failed:
832 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
833 
834 	return status;
835 }
836 EXPORT_SYMBOL(vchiq_open_service);
837 
838 enum vchiq_status
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)839 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
840 		    unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
841 {
842 	enum vchiq_status status;
843 
844 	while (1) {
845 		switch (mode) {
846 		case VCHIQ_BULK_MODE_NOCALLBACK:
847 		case VCHIQ_BULK_MODE_CALLBACK:
848 			status = vchiq_bulk_transfer(instance, handle,
849 						     (void *)data, NULL,
850 						     size, userdata, mode,
851 						     VCHIQ_BULK_TRANSMIT);
852 			break;
853 		case VCHIQ_BULK_MODE_BLOCKING:
854 			status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
855 							      VCHIQ_BULK_TRANSMIT);
856 			break;
857 		default:
858 			return VCHIQ_ERROR;
859 		}
860 
861 		/*
862 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
863 		 * to implement a retry mechanism since this function is
864 		 * supposed to block until queued
865 		 */
866 		if (status != VCHIQ_RETRY)
867 			break;
868 
869 		msleep(1);
870 	}
871 
872 	return status;
873 }
874 EXPORT_SYMBOL(vchiq_bulk_transmit);
875 
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)876 enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
877 				     void *data, unsigned int size, void *userdata,
878 				     enum vchiq_bulk_mode mode)
879 {
880 	enum vchiq_status status;
881 
882 	while (1) {
883 		switch (mode) {
884 		case VCHIQ_BULK_MODE_NOCALLBACK:
885 		case VCHIQ_BULK_MODE_CALLBACK:
886 			status = vchiq_bulk_transfer(instance, handle, data, NULL,
887 						     size, userdata,
888 						     mode, VCHIQ_BULK_RECEIVE);
889 			break;
890 		case VCHIQ_BULK_MODE_BLOCKING:
891 			status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
892 							      VCHIQ_BULK_RECEIVE);
893 			break;
894 		default:
895 			return VCHIQ_ERROR;
896 		}
897 
898 		/*
899 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
900 		 * to implement a retry mechanism since this function is
901 		 * supposed to block until queued
902 		 */
903 		if (status != VCHIQ_RETRY)
904 			break;
905 
906 		msleep(1);
907 	}
908 
909 	return status;
910 }
911 EXPORT_SYMBOL(vchiq_bulk_receive);
912 
913 static enum vchiq_status
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)914 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
915 			     unsigned int size, enum vchiq_bulk_dir dir)
916 {
917 	struct vchiq_service *service;
918 	enum vchiq_status status;
919 	struct bulk_waiter_node *waiter = NULL, *iter;
920 
921 	service = find_service_by_handle(instance, handle);
922 	if (!service)
923 		return VCHIQ_ERROR;
924 
925 	vchiq_service_put(service);
926 
927 	mutex_lock(&instance->bulk_waiter_list_mutex);
928 	list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
929 		if (iter->pid == current->pid) {
930 			list_del(&iter->list);
931 			waiter = iter;
932 			break;
933 		}
934 	}
935 	mutex_unlock(&instance->bulk_waiter_list_mutex);
936 
937 	if (waiter) {
938 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
939 
940 		if (bulk) {
941 			/* This thread has an outstanding bulk transfer. */
942 			/* FIXME: why compare a dma address to a pointer? */
943 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
944 				/*
945 				 * This is not a retry of the previous one.
946 				 * Cancel the signal when the transfer completes.
947 				 */
948 				spin_lock(&bulk_waiter_spinlock);
949 				bulk->userdata = NULL;
950 				spin_unlock(&bulk_waiter_spinlock);
951 			}
952 		}
953 	} else {
954 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
955 		if (!waiter) {
956 			vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
957 			return VCHIQ_ERROR;
958 		}
959 	}
960 
961 	status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
962 				     &waiter->bulk_waiter,
963 				     VCHIQ_BULK_MODE_BLOCKING, dir);
964 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
965 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
966 
967 		if (bulk) {
968 			/* Cancel the signal when the transfer completes. */
969 			spin_lock(&bulk_waiter_spinlock);
970 			bulk->userdata = NULL;
971 			spin_unlock(&bulk_waiter_spinlock);
972 		}
973 		kfree(waiter);
974 	} else {
975 		waiter->pid = current->pid;
976 		mutex_lock(&instance->bulk_waiter_list_mutex);
977 		list_add(&waiter->list, &instance->bulk_waiter_list);
978 		mutex_unlock(&instance->bulk_waiter_list_mutex);
979 		vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
980 			       current->pid);
981 	}
982 
983 	return status;
984 }
985 
986 static enum vchiq_status
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)987 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
988 	       struct vchiq_header *header, struct user_service *user_service,
989 	       void *bulk_userdata)
990 {
991 	struct vchiq_completion_data_kernel *completion;
992 	int insert;
993 
994 	DEBUG_INITIALISE(g_state.local);
995 
996 	insert = instance->completion_insert;
997 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
998 		/* Out of space - wait for the client */
999 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1000 		vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
1001 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1002 		if (wait_for_completion_interruptible(&instance->remove_event)) {
1003 			vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
1004 			return VCHIQ_RETRY;
1005 		} else if (instance->closing) {
1006 			vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1007 			return VCHIQ_SUCCESS;
1008 		}
1009 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1010 	}
1011 
1012 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1013 
1014 	completion->header = header;
1015 	completion->reason = reason;
1016 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1017 	completion->service_userdata = user_service->service;
1018 	completion->bulk_userdata = bulk_userdata;
1019 
1020 	if (reason == VCHIQ_SERVICE_CLOSED) {
1021 		/*
1022 		 * Take an extra reference, to be held until
1023 		 * this CLOSED notification is delivered.
1024 		 */
1025 		vchiq_service_get(user_service->service);
1026 		if (instance->use_close_delivered)
1027 			user_service->close_pending = 1;
1028 	}
1029 
1030 	/*
1031 	 * A write barrier is needed here to ensure that the entire completion
1032 	 * record is written out before the insert point.
1033 	 */
1034 	wmb();
1035 
1036 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
1037 		user_service->message_available_pos = insert;
1038 
1039 	insert++;
1040 	instance->completion_insert = insert;
1041 
1042 	complete(&instance->insert_event);
1043 
1044 	return VCHIQ_SUCCESS;
1045 }
1046 
1047 enum vchiq_status
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)1048 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
1049 		 struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
1050 {
1051 	/*
1052 	 * How do we ensure the callback goes to the right client?
1053 	 * The service_user data points to a user_service record
1054 	 * containing the original callback and the user state structure, which
1055 	 * contains a circular buffer for completion records.
1056 	 */
1057 	struct user_service *user_service;
1058 	struct vchiq_service *service;
1059 	bool skip_completion = false;
1060 
1061 	DEBUG_INITIALISE(g_state.local);
1062 
1063 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1064 
1065 	rcu_read_lock();
1066 	service = handle_to_service(instance, handle);
1067 	if (WARN_ON(!service)) {
1068 		rcu_read_unlock();
1069 		return VCHIQ_SUCCESS;
1070 	}
1071 
1072 	user_service = (struct user_service *)service->base.userdata;
1073 
1074 	if (!instance || instance->closing) {
1075 		rcu_read_unlock();
1076 		return VCHIQ_SUCCESS;
1077 	}
1078 
1079 	/*
1080 	 * As hopping around different synchronization mechanism,
1081 	 * taking an extra reference results in simpler implementation.
1082 	 */
1083 	vchiq_service_get(service);
1084 	rcu_read_unlock();
1085 
1086 	vchiq_log_trace(vchiq_arm_log_level,
1087 			"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1088 			__func__, (unsigned long)user_service, service->localport,
1089 			user_service->userdata, reason, (unsigned long)header,
1090 			(unsigned long)instance, (unsigned long)bulk_userdata);
1091 
1092 	if (header && user_service->is_vchi) {
1093 		spin_lock(&msg_queue_spinlock);
1094 		while (user_service->msg_insert ==
1095 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
1096 			spin_unlock(&msg_queue_spinlock);
1097 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1098 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1099 			vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1100 			/*
1101 			 * If there is no MESSAGE_AVAILABLE in the completion
1102 			 * queue, add one
1103 			 */
1104 			if ((user_service->message_available_pos -
1105 				instance->completion_remove) < 0) {
1106 				enum vchiq_status status;
1107 
1108 				vchiq_log_info(vchiq_arm_log_level,
1109 					       "Inserting extra MESSAGE_AVAILABLE");
1110 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1111 				status = add_completion(instance, reason, NULL, user_service,
1112 							bulk_userdata);
1113 				if (status != VCHIQ_SUCCESS) {
1114 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1115 					vchiq_service_put(service);
1116 					return status;
1117 				}
1118 			}
1119 
1120 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1121 			if (wait_for_completion_interruptible(&user_service->remove_event)) {
1122 				vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1123 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1124 				vchiq_service_put(service);
1125 				return VCHIQ_RETRY;
1126 			} else if (instance->closing) {
1127 				vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1128 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1129 				vchiq_service_put(service);
1130 				return VCHIQ_ERROR;
1131 			}
1132 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1133 			spin_lock(&msg_queue_spinlock);
1134 		}
1135 
1136 		user_service->msg_queue[user_service->msg_insert &
1137 			(MSG_QUEUE_SIZE - 1)] = header;
1138 		user_service->msg_insert++;
1139 
1140 		/*
1141 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1142 		 * there is a MESSAGE_AVAILABLE in the completion queue then
1143 		 * bypass the completion queue.
1144 		 */
1145 		if (((user_service->message_available_pos -
1146 			instance->completion_remove) >= 0) ||
1147 			user_service->dequeue_pending) {
1148 			user_service->dequeue_pending = 0;
1149 			skip_completion = true;
1150 		}
1151 
1152 		spin_unlock(&msg_queue_spinlock);
1153 		complete(&user_service->insert_event);
1154 
1155 		header = NULL;
1156 	}
1157 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1158 	vchiq_service_put(service);
1159 
1160 	if (skip_completion)
1161 		return VCHIQ_SUCCESS;
1162 
1163 	return add_completion(instance, reason, header, user_service,
1164 		bulk_userdata);
1165 }
1166 
vchiq_dump(void * dump_context,const char * str,int len)1167 int vchiq_dump(void *dump_context, const char *str, int len)
1168 {
1169 	struct dump_context *context = (struct dump_context *)dump_context;
1170 	int copy_bytes;
1171 
1172 	if (context->actual >= context->space)
1173 		return 0;
1174 
1175 	if (context->offset > 0) {
1176 		int skip_bytes = min_t(int, len, context->offset);
1177 
1178 		str += skip_bytes;
1179 		len -= skip_bytes;
1180 		context->offset -= skip_bytes;
1181 		if (context->offset > 0)
1182 			return 0;
1183 	}
1184 	copy_bytes = min_t(int, len, context->space - context->actual);
1185 	if (copy_bytes == 0)
1186 		return 0;
1187 	if (copy_to_user(context->buf + context->actual, str,
1188 			 copy_bytes))
1189 		return -EFAULT;
1190 	context->actual += copy_bytes;
1191 	len -= copy_bytes;
1192 
1193 	/*
1194 	 * If the terminating NUL is included in the length, then it
1195 	 * marks the end of a line and should be replaced with a
1196 	 * carriage return.
1197 	 */
1198 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1199 		char cr = '\n';
1200 
1201 		if (copy_to_user(context->buf + context->actual - 1,
1202 				 &cr, 1))
1203 			return -EFAULT;
1204 	}
1205 	return 0;
1206 }
1207 
vchiq_dump_platform_instances(void * dump_context)1208 int vchiq_dump_platform_instances(void *dump_context)
1209 {
1210 	struct vchiq_state *state = vchiq_get_state();
1211 	char buf[80];
1212 	int len;
1213 	int i;
1214 
1215 	if (!state)
1216 		return -ENOTCONN;
1217 
1218 	/*
1219 	 * There is no list of instances, so instead scan all services,
1220 	 * marking those that have been dumped.
1221 	 */
1222 
1223 	rcu_read_lock();
1224 	for (i = 0; i < state->unused_service; i++) {
1225 		struct vchiq_service *service;
1226 		struct vchiq_instance *instance;
1227 
1228 		service = rcu_dereference(state->services[i]);
1229 		if (!service || service->base.callback != service_callback)
1230 			continue;
1231 
1232 		instance = service->instance;
1233 		if (instance)
1234 			instance->mark = 0;
1235 	}
1236 	rcu_read_unlock();
1237 
1238 	for (i = 0; i < state->unused_service; i++) {
1239 		struct vchiq_service *service;
1240 		struct vchiq_instance *instance;
1241 		int err;
1242 
1243 		rcu_read_lock();
1244 		service = rcu_dereference(state->services[i]);
1245 		if (!service || service->base.callback != service_callback) {
1246 			rcu_read_unlock();
1247 			continue;
1248 		}
1249 
1250 		instance = service->instance;
1251 		if (!instance || instance->mark) {
1252 			rcu_read_unlock();
1253 			continue;
1254 		}
1255 		rcu_read_unlock();
1256 
1257 		len = snprintf(buf, sizeof(buf),
1258 			       "Instance %pK: pid %d,%s completions %d/%d",
1259 			       instance, instance->pid,
1260 			       instance->connected ? " connected, " :
1261 			       "",
1262 			       instance->completion_insert -
1263 			       instance->completion_remove,
1264 			       MAX_COMPLETIONS);
1265 		err = vchiq_dump(dump_context, buf, len + 1);
1266 		if (err)
1267 			return err;
1268 		instance->mark = 1;
1269 	}
1270 	return 0;
1271 }
1272 
vchiq_dump_platform_service_state(void * dump_context,struct vchiq_service * service)1273 int vchiq_dump_platform_service_state(void *dump_context,
1274 				      struct vchiq_service *service)
1275 {
1276 	struct user_service *user_service =
1277 			(struct user_service *)service->base.userdata;
1278 	char buf[80];
1279 	int len;
1280 
1281 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1282 
1283 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
1284 		len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1285 				 user_service->msg_insert - user_service->msg_remove,
1286 				 MSG_QUEUE_SIZE);
1287 
1288 		if (user_service->dequeue_pending)
1289 			len += scnprintf(buf + len, sizeof(buf) - len,
1290 				" (dequeue pending)");
1291 	}
1292 
1293 	return vchiq_dump(dump_context, buf, len + 1);
1294 }
1295 
1296 struct vchiq_state *
vchiq_get_state(void)1297 vchiq_get_state(void)
1298 {
1299 	if (!g_state.remote) {
1300 		pr_err("%s: g_state.remote == NULL\n", __func__);
1301 		return NULL;
1302 	}
1303 
1304 	if (g_state.remote->initialised != 1) {
1305 		pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1306 			  __func__, g_state.remote->initialised);
1307 		return NULL;
1308 	}
1309 
1310 	return &g_state;
1311 }
1312 
1313 /*
1314  * Autosuspend related functionality
1315  */
1316 
1317 static enum vchiq_status
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)1318 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
1319 			       enum vchiq_reason reason,
1320 			       struct vchiq_header *header,
1321 			       unsigned int service_user, void *bulk_user)
1322 {
1323 	vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1324 	return 0;
1325 }
1326 
1327 static int
vchiq_keepalive_thread_func(void * v)1328 vchiq_keepalive_thread_func(void *v)
1329 {
1330 	struct vchiq_state *state = (struct vchiq_state *)v;
1331 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1332 
1333 	enum vchiq_status status;
1334 	struct vchiq_instance *instance;
1335 	unsigned int ka_handle;
1336 	int ret;
1337 
1338 	struct vchiq_service_params_kernel params = {
1339 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1340 		.callback    = vchiq_keepalive_vchiq_callback,
1341 		.version     = KEEPALIVE_VER,
1342 		.version_min = KEEPALIVE_VER_MIN
1343 	};
1344 
1345 	ret = vchiq_initialise(&instance);
1346 	if (ret) {
1347 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1348 				ret);
1349 		goto exit;
1350 	}
1351 
1352 	status = vchiq_connect(instance);
1353 	if (status != VCHIQ_SUCCESS) {
1354 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1355 				status);
1356 		goto shutdown;
1357 	}
1358 
1359 	status = vchiq_add_service(instance, &params, &ka_handle);
1360 	if (status != VCHIQ_SUCCESS) {
1361 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1362 				status);
1363 		goto shutdown;
1364 	}
1365 
1366 	while (1) {
1367 		long rc = 0, uc = 0;
1368 
1369 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1370 			vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1371 			flush_signals(current);
1372 			continue;
1373 		}
1374 
1375 		/*
1376 		 * read and clear counters.  Do release_count then use_count to
1377 		 * prevent getting more releases than uses
1378 		 */
1379 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1380 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1381 
1382 		/*
1383 		 * Call use/release service the requisite number of times.
1384 		 * Process use before release so use counts don't go negative
1385 		 */
1386 		while (uc--) {
1387 			atomic_inc(&arm_state->ka_use_ack_count);
1388 			status = vchiq_use_service(instance, ka_handle);
1389 			if (status != VCHIQ_SUCCESS) {
1390 				vchiq_log_error(vchiq_susp_log_level,
1391 						"%s vchiq_use_service error %d", __func__, status);
1392 			}
1393 		}
1394 		while (rc--) {
1395 			status = vchiq_release_service(instance, ka_handle);
1396 			if (status != VCHIQ_SUCCESS) {
1397 				vchiq_log_error(vchiq_susp_log_level,
1398 						"%s vchiq_release_service error %d", __func__,
1399 						status);
1400 			}
1401 		}
1402 	}
1403 
1404 shutdown:
1405 	vchiq_shutdown(instance);
1406 exit:
1407 	return 0;
1408 }
1409 
1410 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1411 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1412 		   enum USE_TYPE_E use_type)
1413 {
1414 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1415 	int ret = 0;
1416 	char entity[16];
1417 	int *entity_uc;
1418 	int local_uc;
1419 
1420 	if (!arm_state) {
1421 		ret = -EINVAL;
1422 		goto out;
1423 	}
1424 
1425 	if (use_type == USE_TYPE_VCHIQ) {
1426 		sprintf(entity, "VCHIQ:   ");
1427 		entity_uc = &arm_state->peer_use_count;
1428 	} else if (service) {
1429 		sprintf(entity, "%c%c%c%c:%03d",
1430 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1431 			service->client_id);
1432 		entity_uc = &service->service_use_count;
1433 	} else {
1434 		vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1435 		ret = -EINVAL;
1436 		goto out;
1437 	}
1438 
1439 	write_lock_bh(&arm_state->susp_res_lock);
1440 	local_uc = ++arm_state->videocore_use_count;
1441 	++(*entity_uc);
1442 
1443 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1444 			*entity_uc, local_uc);
1445 
1446 	write_unlock_bh(&arm_state->susp_res_lock);
1447 
1448 	if (!ret) {
1449 		enum vchiq_status status = VCHIQ_SUCCESS;
1450 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1451 
1452 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1453 			/* Send the use notify to videocore */
1454 			status = vchiq_send_remote_use_active(state);
1455 			if (status == VCHIQ_SUCCESS)
1456 				ack_cnt--;
1457 			else
1458 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1459 		}
1460 	}
1461 
1462 out:
1463 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1464 	return ret;
1465 }
1466 
1467 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1468 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1469 {
1470 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1471 	int ret = 0;
1472 	char entity[16];
1473 	int *entity_uc;
1474 
1475 	if (!arm_state) {
1476 		ret = -EINVAL;
1477 		goto out;
1478 	}
1479 
1480 	if (service) {
1481 		sprintf(entity, "%c%c%c%c:%03d",
1482 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1483 			service->client_id);
1484 		entity_uc = &service->service_use_count;
1485 	} else {
1486 		sprintf(entity, "PEER:   ");
1487 		entity_uc = &arm_state->peer_use_count;
1488 	}
1489 
1490 	write_lock_bh(&arm_state->susp_res_lock);
1491 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1492 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
1493 		WARN_ON(!arm_state->videocore_use_count);
1494 		WARN_ON(!(*entity_uc));
1495 		ret = -EINVAL;
1496 		goto unlock;
1497 	}
1498 	--arm_state->videocore_use_count;
1499 	--(*entity_uc);
1500 
1501 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1502 			*entity_uc, arm_state->videocore_use_count);
1503 
1504 unlock:
1505 	write_unlock_bh(&arm_state->susp_res_lock);
1506 
1507 out:
1508 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1509 	return ret;
1510 }
1511 
1512 void
vchiq_on_remote_use(struct vchiq_state * state)1513 vchiq_on_remote_use(struct vchiq_state *state)
1514 {
1515 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1516 
1517 	atomic_inc(&arm_state->ka_use_count);
1518 	complete(&arm_state->ka_evt);
1519 }
1520 
1521 void
vchiq_on_remote_release(struct vchiq_state * state)1522 vchiq_on_remote_release(struct vchiq_state *state)
1523 {
1524 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1525 
1526 	atomic_inc(&arm_state->ka_release_count);
1527 	complete(&arm_state->ka_evt);
1528 }
1529 
1530 int
vchiq_use_service_internal(struct vchiq_service * service)1531 vchiq_use_service_internal(struct vchiq_service *service)
1532 {
1533 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1534 }
1535 
1536 int
vchiq_release_service_internal(struct vchiq_service * service)1537 vchiq_release_service_internal(struct vchiq_service *service)
1538 {
1539 	return vchiq_release_internal(service->state, service);
1540 }
1541 
1542 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1543 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1544 {
1545 	return &instance->debugfs_node;
1546 }
1547 
1548 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1549 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1550 {
1551 	struct vchiq_service *service;
1552 	int use_count = 0, i;
1553 
1554 	i = 0;
1555 	rcu_read_lock();
1556 	while ((service = __next_service_by_instance(instance->state,
1557 						     instance, &i)))
1558 		use_count += service->service_use_count;
1559 	rcu_read_unlock();
1560 	return use_count;
1561 }
1562 
1563 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1564 vchiq_instance_get_pid(struct vchiq_instance *instance)
1565 {
1566 	return instance->pid;
1567 }
1568 
1569 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1570 vchiq_instance_get_trace(struct vchiq_instance *instance)
1571 {
1572 	return instance->trace;
1573 }
1574 
1575 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1576 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1577 {
1578 	struct vchiq_service *service;
1579 	int i;
1580 
1581 	i = 0;
1582 	rcu_read_lock();
1583 	while ((service = __next_service_by_instance(instance->state,
1584 						     instance, &i)))
1585 		service->trace = trace;
1586 	rcu_read_unlock();
1587 	instance->trace = (trace != 0);
1588 }
1589 
1590 enum vchiq_status
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1591 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1592 {
1593 	enum vchiq_status ret = VCHIQ_ERROR;
1594 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1595 
1596 	if (service) {
1597 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1598 		vchiq_service_put(service);
1599 	}
1600 	return ret;
1601 }
1602 EXPORT_SYMBOL(vchiq_use_service);
1603 
1604 enum vchiq_status
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1605 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1606 {
1607 	enum vchiq_status ret = VCHIQ_ERROR;
1608 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1609 
1610 	if (service) {
1611 		ret = vchiq_release_internal(service->state, service);
1612 		vchiq_service_put(service);
1613 	}
1614 	return ret;
1615 }
1616 EXPORT_SYMBOL(vchiq_release_service);
1617 
1618 struct service_data_struct {
1619 	int fourcc;
1620 	int clientid;
1621 	int use_count;
1622 };
1623 
1624 void
vchiq_dump_service_use_state(struct vchiq_state * state)1625 vchiq_dump_service_use_state(struct vchiq_state *state)
1626 {
1627 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1628 	struct service_data_struct *service_data;
1629 	int i, found = 0;
1630 	/*
1631 	 * If there's more than 64 services, only dump ones with
1632 	 * non-zero counts
1633 	 */
1634 	int only_nonzero = 0;
1635 	static const char *nz = "<-- preventing suspend";
1636 
1637 	int peer_count;
1638 	int vc_use_count;
1639 	int active_services;
1640 
1641 	if (!arm_state)
1642 		return;
1643 
1644 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1645 				     GFP_KERNEL);
1646 	if (!service_data)
1647 		return;
1648 
1649 	read_lock_bh(&arm_state->susp_res_lock);
1650 	peer_count = arm_state->peer_use_count;
1651 	vc_use_count = arm_state->videocore_use_count;
1652 	active_services = state->unused_service;
1653 	if (active_services > MAX_SERVICES)
1654 		only_nonzero = 1;
1655 
1656 	rcu_read_lock();
1657 	for (i = 0; i < active_services; i++) {
1658 		struct vchiq_service *service_ptr =
1659 			rcu_dereference(state->services[i]);
1660 
1661 		if (!service_ptr)
1662 			continue;
1663 
1664 		if (only_nonzero && !service_ptr->service_use_count)
1665 			continue;
1666 
1667 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1668 			continue;
1669 
1670 		service_data[found].fourcc = service_ptr->base.fourcc;
1671 		service_data[found].clientid = service_ptr->client_id;
1672 		service_data[found].use_count = service_ptr->service_use_count;
1673 		found++;
1674 		if (found >= MAX_SERVICES)
1675 			break;
1676 	}
1677 	rcu_read_unlock();
1678 
1679 	read_unlock_bh(&arm_state->susp_res_lock);
1680 
1681 	if (only_nonzero)
1682 		vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1683 				  active_services, found);
1684 
1685 	for (i = 0; i < found; i++) {
1686 		vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1687 				  VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1688 				  service_data[i].clientid, service_data[i].use_count,
1689 				  service_data[i].use_count ? nz : "");
1690 	}
1691 	vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
1692 	vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1693 			  vc_use_count);
1694 
1695 	kfree(service_data);
1696 }
1697 
1698 enum vchiq_status
vchiq_check_service(struct vchiq_service * service)1699 vchiq_check_service(struct vchiq_service *service)
1700 {
1701 	struct vchiq_arm_state *arm_state;
1702 	enum vchiq_status ret = VCHIQ_ERROR;
1703 
1704 	if (!service || !service->state)
1705 		goto out;
1706 
1707 	arm_state = vchiq_platform_get_arm_state(service->state);
1708 
1709 	read_lock_bh(&arm_state->susp_res_lock);
1710 	if (service->service_use_count)
1711 		ret = VCHIQ_SUCCESS;
1712 	read_unlock_bh(&arm_state->susp_res_lock);
1713 
1714 	if (ret == VCHIQ_ERROR) {
1715 		vchiq_log_error(vchiq_susp_log_level,
1716 				"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1717 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1718 				service->service_use_count, arm_state->videocore_use_count);
1719 		vchiq_dump_service_use_state(service->state);
1720 	}
1721 out:
1722 	return ret;
1723 }
1724 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1725 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1726 				       enum vchiq_connstate oldstate,
1727 				       enum vchiq_connstate newstate)
1728 {
1729 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1730 	char threadname[16];
1731 
1732 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1733 		       get_conn_state_name(oldstate), get_conn_state_name(newstate));
1734 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1735 		return;
1736 
1737 	write_lock_bh(&arm_state->susp_res_lock);
1738 	if (arm_state->first_connect) {
1739 		write_unlock_bh(&arm_state->susp_res_lock);
1740 		return;
1741 	}
1742 
1743 	arm_state->first_connect = 1;
1744 	write_unlock_bh(&arm_state->susp_res_lock);
1745 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1746 		 state->id);
1747 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1748 					      (void *)state,
1749 					      threadname);
1750 	if (IS_ERR(arm_state->ka_thread)) {
1751 		vchiq_log_error(vchiq_susp_log_level,
1752 				"vchiq: FATAL: couldn't create thread %s",
1753 				threadname);
1754 	} else {
1755 		wake_up_process(arm_state->ka_thread);
1756 	}
1757 }
1758 
1759 static const struct of_device_id vchiq_of_match[] = {
1760 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1761 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1762 	{},
1763 };
1764 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1765 
1766 static struct platform_device *
vchiq_register_child(struct platform_device * pdev,const char * name)1767 vchiq_register_child(struct platform_device *pdev, const char *name)
1768 {
1769 	struct platform_device_info pdevinfo;
1770 	struct platform_device *child;
1771 
1772 	memset(&pdevinfo, 0, sizeof(pdevinfo));
1773 
1774 	pdevinfo.parent = &pdev->dev;
1775 	pdevinfo.name = name;
1776 	pdevinfo.id = PLATFORM_DEVID_NONE;
1777 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
1778 
1779 	child = platform_device_register_full(&pdevinfo);
1780 	if (IS_ERR(child)) {
1781 		dev_warn(&pdev->dev, "%s not registered\n", name);
1782 		child = NULL;
1783 	}
1784 
1785 	return child;
1786 }
1787 
vchiq_probe(struct platform_device * pdev)1788 static int vchiq_probe(struct platform_device *pdev)
1789 {
1790 	struct device_node *fw_node;
1791 	const struct of_device_id *of_id;
1792 	struct vchiq_drvdata *drvdata;
1793 	int err;
1794 
1795 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1796 	drvdata = (struct vchiq_drvdata *)of_id->data;
1797 	if (!drvdata)
1798 		return -EINVAL;
1799 
1800 	fw_node = of_find_compatible_node(NULL, NULL,
1801 					  "raspberrypi,bcm2835-firmware");
1802 	if (!fw_node) {
1803 		dev_err(&pdev->dev, "Missing firmware node\n");
1804 		return -ENOENT;
1805 	}
1806 
1807 	drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1808 	of_node_put(fw_node);
1809 	if (!drvdata->fw)
1810 		return -EPROBE_DEFER;
1811 
1812 	platform_set_drvdata(pdev, drvdata);
1813 
1814 	err = vchiq_platform_init(pdev, &g_state);
1815 	if (err)
1816 		goto failed_platform_init;
1817 
1818 	vchiq_debugfs_init();
1819 
1820 	vchiq_log_info(vchiq_arm_log_level,
1821 		       "vchiq: platform initialised - version %d (min %d)",
1822 		       VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1823 
1824 	/*
1825 	 * Simply exit on error since the function handles cleanup in
1826 	 * cases of failure.
1827 	 */
1828 	err = vchiq_register_chrdev(&pdev->dev);
1829 	if (err) {
1830 		vchiq_log_warning(vchiq_arm_log_level,
1831 				  "Failed to initialize vchiq cdev");
1832 		goto error_exit;
1833 	}
1834 
1835 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1836 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1837 
1838 	return 0;
1839 
1840 failed_platform_init:
1841 	vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1842 error_exit:
1843 	return err;
1844 }
1845 
vchiq_remove(struct platform_device * pdev)1846 static int vchiq_remove(struct platform_device *pdev)
1847 {
1848 	platform_device_unregister(bcm2835_audio);
1849 	platform_device_unregister(bcm2835_camera);
1850 	vchiq_debugfs_deinit();
1851 	vchiq_deregister_chrdev();
1852 
1853 	return 0;
1854 }
1855 
1856 static struct platform_driver vchiq_driver = {
1857 	.driver = {
1858 		.name = "bcm2835_vchiq",
1859 		.of_match_table = vchiq_of_match,
1860 	},
1861 	.probe = vchiq_probe,
1862 	.remove = vchiq_remove,
1863 };
1864 
vchiq_driver_init(void)1865 static int __init vchiq_driver_init(void)
1866 {
1867 	int ret;
1868 
1869 	ret = platform_driver_register(&vchiq_driver);
1870 	if (ret)
1871 		pr_err("Failed to register vchiq driver\n");
1872 
1873 	return ret;
1874 }
1875 module_init(vchiq_driver_init);
1876 
vchiq_driver_exit(void)1877 static void __exit vchiq_driver_exit(void)
1878 {
1879 	platform_driver_unregister(&vchiq_driver);
1880 }
1881 module_exit(vchiq_driver_exit);
1882 
1883 MODULE_LICENSE("Dual BSD/GPL");
1884 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1885 MODULE_AUTHOR("Broadcom Corporation");
1886