• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
3  *
4  *  This driver needs a DirectFB counterpart in user space, communication
5  *  is handled via mmap()ed memory areas and an ioctl.
6  *
7  *  Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
8  *  Copyright (c) 2009 Janine Kropp <nin@directfb.org>
9  *  Copyright (c) 2009 Denis Oliver Kropp <dok@directfb.org>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2 of the License, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; if not, write to the Free Software
23  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 /*
27  * WARNING: This controller is attached to System Bus 2 of the PXA which
28  * needs its arbiter to be enabled explicitly (CKENB & 1<<9).
29  * There is currently no way to do this from Linux, so you need to teach
30  * your bootloader for now.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/miscdevice.h>
37 #include <linux/interrupt.h>
38 #include <linux/spinlock.h>
39 #include <linux/uaccess.h>
40 #include <linux/ioctl.h>
41 #include <linux/delay.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/clk.h>
45 #include <linux/fs.h>
46 #include <linux/io.h>
47 
48 #include "pxa3xx-gcu.h"
49 
50 #define DRV_NAME	"pxa3xx-gcu"
51 #define MISCDEV_MINOR	197
52 
53 #define REG_GCCR	0x00
54 #define GCCR_SYNC_CLR	(1 << 9)
55 #define GCCR_BP_RST	(1 << 8)
56 #define GCCR_ABORT	(1 << 6)
57 #define GCCR_STOP	(1 << 4)
58 
59 #define REG_GCISCR	0x04
60 #define REG_GCIECR	0x08
61 #define REG_GCRBBR	0x20
62 #define REG_GCRBLR	0x24
63 #define REG_GCRBHR	0x28
64 #define REG_GCRBTR	0x2C
65 #define REG_GCRBEXHR	0x30
66 
67 #define IE_EOB		(1 << 0)
68 #define IE_EEOB		(1 << 5)
69 #define IE_ALL		0xff
70 
71 #define SHARED_SIZE	PAGE_ALIGN(sizeof(struct pxa3xx_gcu_shared))
72 
73 /* #define PXA3XX_GCU_DEBUG */
74 /* #define PXA3XX_GCU_DEBUG_TIMER */
75 
76 #ifdef PXA3XX_GCU_DEBUG
77 #define QDUMP(msg)					\
78 	do {						\
79 		QPRINT(priv, KERN_DEBUG, msg);		\
80 	} while (0)
81 #else
82 #define QDUMP(msg)	do {} while (0)
83 #endif
84 
85 #define QERROR(msg)					\
86 	do {						\
87 		QPRINT(priv, KERN_ERR, msg);		\
88 	} while (0)
89 
90 struct pxa3xx_gcu_batch {
91 	struct pxa3xx_gcu_batch *next;
92 	u32			*ptr;
93 	dma_addr_t		 phys;
94 	unsigned long		 length;
95 };
96 
97 struct pxa3xx_gcu_priv {
98 	void __iomem		 *mmio_base;
99 	struct clk		 *clk;
100 	struct pxa3xx_gcu_shared *shared;
101 	dma_addr_t		  shared_phys;
102 	struct resource		 *resource_mem;
103 	struct miscdevice	  misc_dev;
104 	wait_queue_head_t	  wait_idle;
105 	wait_queue_head_t	  wait_free;
106 	spinlock_t		  spinlock;
107 	struct timeval 		  base_time;
108 
109 	struct pxa3xx_gcu_batch *free;
110 
111 	struct pxa3xx_gcu_batch *ready;
112 	struct pxa3xx_gcu_batch *ready_last;
113 	struct pxa3xx_gcu_batch *running;
114 };
115 
116 static inline unsigned long
gc_readl(struct pxa3xx_gcu_priv * priv,unsigned int off)117 gc_readl(struct pxa3xx_gcu_priv *priv, unsigned int off)
118 {
119 	return __raw_readl(priv->mmio_base + off);
120 }
121 
122 static inline void
gc_writel(struct pxa3xx_gcu_priv * priv,unsigned int off,unsigned long val)123 gc_writel(struct pxa3xx_gcu_priv *priv, unsigned int off, unsigned long val)
124 {
125 	__raw_writel(val, priv->mmio_base + off);
126 }
127 
128 #define QPRINT(priv, level, msg)					\
129 	do {								\
130 		struct timeval tv;					\
131 		struct pxa3xx_gcu_shared *shared = priv->shared;	\
132 		u32 base = gc_readl(priv, REG_GCRBBR);			\
133 									\
134 		do_gettimeofday(&tv);					\
135 									\
136 		printk(level "%ld.%03ld.%03ld - %-17s: %-21s (%s, "	\
137 			"STATUS "					\
138 			"0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, "	\
139 			"T %5ld)\n",					\
140 			tv.tv_sec - priv->base_time.tv_sec,		\
141 			tv.tv_usec / 1000, tv.tv_usec % 1000,		\
142 			__func__, msg,					\
143 			shared->hw_running ? "running" : "   idle",	\
144 			gc_readl(priv, REG_GCISCR),			\
145 			gc_readl(priv, REG_GCRBBR),			\
146 			gc_readl(priv, REG_GCRBLR),			\
147 			(gc_readl(priv, REG_GCRBEXHR) - base) / 4,	\
148 			(gc_readl(priv, REG_GCRBHR) - base) / 4,	\
149 			(gc_readl(priv, REG_GCRBTR) - base) / 4);	\
150 	} while (0)
151 
152 static void
pxa3xx_gcu_reset(struct pxa3xx_gcu_priv * priv)153 pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv)
154 {
155 	QDUMP("RESET");
156 
157 	/* disable interrupts */
158 	gc_writel(priv, REG_GCIECR, 0);
159 
160 	/* reset hardware */
161 	gc_writel(priv, REG_GCCR, GCCR_ABORT);
162 	gc_writel(priv, REG_GCCR, 0);
163 
164 	memset(priv->shared, 0, SHARED_SIZE);
165 	priv->shared->buffer_phys = priv->shared_phys;
166 	priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC;
167 
168 	do_gettimeofday(&priv->base_time);
169 
170 	/* set up the ring buffer pointers */
171 	gc_writel(priv, REG_GCRBLR, 0);
172 	gc_writel(priv, REG_GCRBBR, priv->shared_phys);
173 	gc_writel(priv, REG_GCRBTR, priv->shared_phys);
174 
175 	/* enable all IRQs except EOB */
176 	gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB);
177 }
178 
179 static void
dump_whole_state(struct pxa3xx_gcu_priv * priv)180 dump_whole_state(struct pxa3xx_gcu_priv *priv)
181 {
182 	struct pxa3xx_gcu_shared *sh = priv->shared;
183 	u32 base = gc_readl(priv, REG_GCRBBR);
184 
185 	QDUMP("DUMP");
186 
187 	printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n"
188 		"%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n",
189 		sh->hw_running ? "running" : "idle   ",
190 		gc_readl(priv, REG_GCISCR),
191 		gc_readl(priv, REG_GCRBBR),
192 		gc_readl(priv, REG_GCRBLR),
193 		(gc_readl(priv, REG_GCRBEXHR) - base) / 4,
194 		(gc_readl(priv, REG_GCRBHR) - base) / 4,
195 		(gc_readl(priv, REG_GCRBTR) - base) / 4);
196 }
197 
198 static void
flush_running(struct pxa3xx_gcu_priv * priv)199 flush_running(struct pxa3xx_gcu_priv *priv)
200 {
201 	struct pxa3xx_gcu_batch *running = priv->running;
202 	struct pxa3xx_gcu_batch *next;
203 
204 	while (running) {
205 		next = running->next;
206 		running->next = priv->free;
207 		priv->free = running;
208 		running = next;
209 	}
210 
211 	priv->running = NULL;
212 }
213 
214 static void
run_ready(struct pxa3xx_gcu_priv * priv)215 run_ready(struct pxa3xx_gcu_priv *priv)
216 {
217 	unsigned int num = 0;
218 	struct pxa3xx_gcu_shared *shared = priv->shared;
219 	struct pxa3xx_gcu_batch	*ready = priv->ready;
220 
221 	QDUMP("Start");
222 
223 	BUG_ON(!ready);
224 
225 	shared->buffer[num++] = 0x05000000;
226 
227 	while (ready) {
228 		shared->buffer[num++] = 0x00000001;
229 		shared->buffer[num++] = ready->phys;
230 		ready = ready->next;
231 	}
232 
233 	shared->buffer[num++] = 0x05000000;
234 	priv->running = priv->ready;
235 	priv->ready = priv->ready_last = NULL;
236 	gc_writel(priv, REG_GCRBLR, 0);
237 	shared->hw_running = 1;
238 
239 	/* ring base address */
240 	gc_writel(priv, REG_GCRBBR, shared->buffer_phys);
241 
242 	/* ring tail address */
243 	gc_writel(priv, REG_GCRBTR, shared->buffer_phys + num * 4);
244 
245 	/* ring length */
246 	gc_writel(priv, REG_GCRBLR, ((num + 63) & ~63) * 4);
247 }
248 
249 static irqreturn_t
pxa3xx_gcu_handle_irq(int irq,void * ctx)250 pxa3xx_gcu_handle_irq(int irq, void *ctx)
251 {
252 	struct pxa3xx_gcu_priv *priv = ctx;
253 	struct pxa3xx_gcu_shared *shared = priv->shared;
254 	u32 status = gc_readl(priv, REG_GCISCR) & IE_ALL;
255 
256 	QDUMP("-Interrupt");
257 
258 	if (!status)
259 		return IRQ_NONE;
260 
261 	spin_lock(&priv->spinlock);
262 	shared->num_interrupts++;
263 
264 	if (status & IE_EEOB) {
265 		QDUMP(" [EEOB]");
266 
267 		flush_running(priv);
268 		wake_up_all(&priv->wait_free);
269 
270 		if (priv->ready) {
271 			run_ready(priv);
272 		} else {
273 			/* There is no more data prepared by the userspace.
274 			 * Set hw_running = 0 and wait for the next userspace
275 			 * kick-off */
276 			shared->num_idle++;
277 			shared->hw_running = 0;
278 
279 			QDUMP(" '-> Idle.");
280 
281 			/* set ring buffer length to zero */
282 			gc_writel(priv, REG_GCRBLR, 0);
283 
284 			wake_up_all(&priv->wait_idle);
285 		}
286 
287 		shared->num_done++;
288 	} else {
289 		QERROR(" [???]");
290 		dump_whole_state(priv);
291 	}
292 
293 	/* Clear the interrupt */
294 	gc_writel(priv, REG_GCISCR, status);
295 	spin_unlock(&priv->spinlock);
296 
297 	return IRQ_HANDLED;
298 }
299 
300 static int
pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv * priv)301 pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
302 {
303 	int ret = 0;
304 
305 	QDUMP("Waiting for idle...");
306 
307 	/* Does not need to be atomic. There's a lock in user space,
308 	 * but anyhow, this is just for statistics. */
309 	priv->shared->num_wait_idle++;
310 
311 	while (priv->shared->hw_running) {
312 		int num = priv->shared->num_interrupts;
313 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
314 
315 		ret = wait_event_interruptible_timeout(priv->wait_idle,
316 					!priv->shared->hw_running, HZ*4);
317 
318 		if (ret != 0)
319 			break;
320 
321 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
322 		    priv->shared->num_interrupts == num) {
323 			QERROR("TIMEOUT");
324 			ret = -ETIMEDOUT;
325 			break;
326 		}
327 	}
328 
329 	QDUMP("done");
330 
331 	return ret;
332 }
333 
334 static int
pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv * priv)335 pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
336 {
337 	int ret = 0;
338 
339 	QDUMP("Waiting for free...");
340 
341 	/* Does not need to be atomic. There's a lock in user space,
342 	 * but anyhow, this is just for statistics. */
343 	priv->shared->num_wait_free++;
344 
345 	while (!priv->free) {
346 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
347 
348 		ret = wait_event_interruptible_timeout(priv->wait_free,
349 						       priv->free, HZ*4);
350 
351 		if (ret < 0)
352 			break;
353 
354 		if (ret > 0)
355 			continue;
356 
357 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr) {
358 			QERROR("TIMEOUT");
359 			ret = -ETIMEDOUT;
360 			break;
361 		}
362 	}
363 
364 	QDUMP("done");
365 
366 	return ret;
367 }
368 
369 /* Misc device layer */
370 
file_dev(struct file * file)371 static inline struct pxa3xx_gcu_priv *file_dev(struct file *file)
372 {
373 	struct miscdevice *dev = file->private_data;
374 	return container_of(dev, struct pxa3xx_gcu_priv, misc_dev);
375 }
376 
377 static ssize_t
pxa3xx_gcu_misc_write(struct file * file,const char * buff,size_t count,loff_t * offp)378 pxa3xx_gcu_misc_write(struct file *file, const char *buff,
379 		      size_t count, loff_t *offp)
380 {
381 	int ret;
382 	unsigned long flags;
383 	struct pxa3xx_gcu_batch	*buffer;
384 	struct pxa3xx_gcu_priv *priv = file_dev(file);
385 
386 	int words = count / 4;
387 
388 	/* Does not need to be atomic. There's a lock in user space,
389 	 * but anyhow, this is just for statistics. */
390 	priv->shared->num_writes++;
391 
392 	priv->shared->num_words += words;
393 
394 	/* Last word reserved for batch buffer end command */
395 	if (words >= PXA3XX_GCU_BATCH_WORDS)
396 		return -E2BIG;
397 
398 	/* Wait for a free buffer */
399 	if (!priv->free) {
400 		ret = pxa3xx_gcu_wait_free(priv);
401 		if (ret < 0)
402 			return ret;
403 	}
404 
405 	/*
406 	 * Get buffer from free list
407 	 */
408 	spin_lock_irqsave(&priv->spinlock, flags);
409 
410 	buffer = priv->free;
411 	priv->free = buffer->next;
412 
413 	spin_unlock_irqrestore(&priv->spinlock, flags);
414 
415 
416 	/* Copy data from user into buffer */
417 	ret = copy_from_user(buffer->ptr, buff, words * 4);
418 	if (ret) {
419 		spin_lock_irqsave(&priv->spinlock, flags);
420 		buffer->next = priv->free;
421 		priv->free = buffer;
422 		spin_unlock_irqrestore(&priv->spinlock, flags);
423 		return -EFAULT;
424 	}
425 
426 	buffer->length = words;
427 
428 	/* Append batch buffer end command */
429 	buffer->ptr[words] = 0x01000000;
430 
431 	/*
432 	 * Add buffer to ready list
433 	 */
434 	spin_lock_irqsave(&priv->spinlock, flags);
435 
436 	buffer->next = NULL;
437 
438 	if (priv->ready) {
439 		BUG_ON(priv->ready_last == NULL);
440 
441 		priv->ready_last->next = buffer;
442 	} else
443 		priv->ready = buffer;
444 
445 	priv->ready_last = buffer;
446 
447 	if (!priv->shared->hw_running)
448 		run_ready(priv);
449 
450 	spin_unlock_irqrestore(&priv->spinlock, flags);
451 
452 	return words * 4;
453 }
454 
455 
456 static long
pxa3xx_gcu_misc_ioctl(struct file * file,unsigned int cmd,unsigned long arg)457 pxa3xx_gcu_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
458 {
459 	unsigned long flags;
460 	struct pxa3xx_gcu_priv *priv = file_dev(file);
461 
462 	switch (cmd) {
463 	case PXA3XX_GCU_IOCTL_RESET:
464 		spin_lock_irqsave(&priv->spinlock, flags);
465 		pxa3xx_gcu_reset(priv);
466 		spin_unlock_irqrestore(&priv->spinlock, flags);
467 		return 0;
468 
469 	case PXA3XX_GCU_IOCTL_WAIT_IDLE:
470 		return pxa3xx_gcu_wait_idle(priv);
471 	}
472 
473 	return -ENOSYS;
474 }
475 
476 static int
pxa3xx_gcu_misc_mmap(struct file * file,struct vm_area_struct * vma)477 pxa3xx_gcu_misc_mmap(struct file *file, struct vm_area_struct *vma)
478 {
479 	unsigned int size = vma->vm_end - vma->vm_start;
480 	struct pxa3xx_gcu_priv *priv = file_dev(file);
481 
482 	switch (vma->vm_pgoff) {
483 	case 0:
484 		/* hand out the shared data area */
485 		if (size != SHARED_SIZE)
486 			return -EINVAL;
487 
488 		return dma_mmap_coherent(NULL, vma,
489 			priv->shared, priv->shared_phys, size);
490 
491 	case SHARED_SIZE >> PAGE_SHIFT:
492 		/* hand out the MMIO base for direct register access
493 		 * from userspace */
494 		if (size != resource_size(priv->resource_mem))
495 			return -EINVAL;
496 
497 		vma->vm_flags |= VM_IO;
498 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
499 
500 		return io_remap_pfn_range(vma, vma->vm_start,
501 				priv->resource_mem->start >> PAGE_SHIFT,
502 				size, vma->vm_page_prot);
503 	}
504 
505 	return -EINVAL;
506 }
507 
508 
509 #ifdef PXA3XX_GCU_DEBUG_TIMER
510 static struct timer_list pxa3xx_gcu_debug_timer;
511 
pxa3xx_gcu_debug_timedout(unsigned long ptr)512 static void pxa3xx_gcu_debug_timedout(unsigned long ptr)
513 {
514 	struct pxa3xx_gcu_priv *priv = (struct pxa3xx_gcu_priv *) ptr;
515 
516 	QERROR("Timer DUMP");
517 
518 	/* init the timer structure */
519 	init_timer(&pxa3xx_gcu_debug_timer);
520 	pxa3xx_gcu_debug_timer.function = pxa3xx_gcu_debug_timedout;
521 	pxa3xx_gcu_debug_timer.data = ptr;
522 	pxa3xx_gcu_debug_timer.expires = jiffies + 5*HZ; /* one second */
523 
524 	add_timer(&pxa3xx_gcu_debug_timer);
525 }
526 
pxa3xx_gcu_init_debug_timer(void)527 static void pxa3xx_gcu_init_debug_timer(void)
528 {
529 	pxa3xx_gcu_debug_timedout((unsigned long) &pxa3xx_gcu_debug_timer);
530 }
531 #else
pxa3xx_gcu_init_debug_timer(void)532 static inline void pxa3xx_gcu_init_debug_timer(void) {}
533 #endif
534 
535 static int
add_buffer(struct platform_device * dev,struct pxa3xx_gcu_priv * priv)536 add_buffer(struct platform_device *dev,
537 	   struct pxa3xx_gcu_priv *priv)
538 {
539 	struct pxa3xx_gcu_batch *buffer;
540 
541 	buffer = kzalloc(sizeof(struct pxa3xx_gcu_batch), GFP_KERNEL);
542 	if (!buffer)
543 		return -ENOMEM;
544 
545 	buffer->ptr = dma_alloc_coherent(&dev->dev, PXA3XX_GCU_BATCH_WORDS * 4,
546 					 &buffer->phys, GFP_KERNEL);
547 	if (!buffer->ptr) {
548 		kfree(buffer);
549 		return -ENOMEM;
550 	}
551 
552 	buffer->next = priv->free;
553 
554 	priv->free = buffer;
555 
556 	return 0;
557 }
558 
559 static void
free_buffers(struct platform_device * dev,struct pxa3xx_gcu_priv * priv)560 free_buffers(struct platform_device *dev,
561 	     struct pxa3xx_gcu_priv *priv)
562 {
563 	struct pxa3xx_gcu_batch *next, *buffer = priv->free;
564 
565 	while (buffer) {
566 		next = buffer->next;
567 
568 		dma_free_coherent(&dev->dev, PXA3XX_GCU_BATCH_WORDS * 4,
569 				  buffer->ptr, buffer->phys);
570 
571 		kfree(buffer);
572 
573 		buffer = next;
574 	}
575 
576 	priv->free = NULL;
577 }
578 
579 static const struct file_operations misc_fops = {
580 	.owner	= THIS_MODULE,
581 	.write	= pxa3xx_gcu_misc_write,
582 	.unlocked_ioctl = pxa3xx_gcu_misc_ioctl,
583 	.mmap	= pxa3xx_gcu_misc_mmap
584 };
585 
pxa3xx_gcu_probe(struct platform_device * dev)586 static int pxa3xx_gcu_probe(struct platform_device *dev)
587 {
588 	int i, ret, irq;
589 	struct resource *r;
590 	struct pxa3xx_gcu_priv *priv;
591 
592 	priv = kzalloc(sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
593 	if (!priv)
594 		return -ENOMEM;
595 
596 	for (i = 0; i < 8; i++) {
597 		ret = add_buffer(dev, priv);
598 		if (ret) {
599 			dev_err(&dev->dev, "failed to allocate DMA memory\n");
600 			goto err_free_priv;
601 		}
602 	}
603 
604 	init_waitqueue_head(&priv->wait_idle);
605 	init_waitqueue_head(&priv->wait_free);
606 	spin_lock_init(&priv->spinlock);
607 
608 	/* we allocate the misc device structure as part of our own allocation,
609 	 * so we can get a pointer to our priv structure later on with
610 	 * container_of(). This isn't really necessary as we have a fixed minor
611 	 * number anyway, but this is to avoid statics. */
612 
613 	priv->misc_dev.minor	= MISCDEV_MINOR,
614 	priv->misc_dev.name	= DRV_NAME,
615 	priv->misc_dev.fops	= &misc_fops,
616 
617 	/* register misc device */
618 	ret = misc_register(&priv->misc_dev);
619 	if (ret < 0) {
620 		dev_err(&dev->dev, "misc_register() for minor %d failed\n",
621 			MISCDEV_MINOR);
622 		goto err_free_priv;
623 	}
624 
625 	/* handle IO resources */
626 	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
627 	if (r == NULL) {
628 		dev_err(&dev->dev, "no I/O memory resource defined\n");
629 		ret = -ENODEV;
630 		goto err_misc_deregister;
631 	}
632 
633 	if (!request_mem_region(r->start, resource_size(r), dev->name)) {
634 		dev_err(&dev->dev, "failed to request I/O memory\n");
635 		ret = -EBUSY;
636 		goto err_misc_deregister;
637 	}
638 
639 	priv->mmio_base = ioremap_nocache(r->start, resource_size(r));
640 	if (!priv->mmio_base) {
641 		dev_err(&dev->dev, "failed to map I/O memory\n");
642 		ret = -EBUSY;
643 		goto err_free_mem_region;
644 	}
645 
646 	/* allocate dma memory */
647 	priv->shared = dma_alloc_coherent(&dev->dev, SHARED_SIZE,
648 					  &priv->shared_phys, GFP_KERNEL);
649 
650 	if (!priv->shared) {
651 		dev_err(&dev->dev, "failed to allocate DMA memory\n");
652 		ret = -ENOMEM;
653 		goto err_free_io;
654 	}
655 
656 	/* enable the clock */
657 	priv->clk = clk_get(&dev->dev, NULL);
658 	if (IS_ERR(priv->clk)) {
659 		dev_err(&dev->dev, "failed to get clock\n");
660 		ret = -ENODEV;
661 		goto err_free_dma;
662 	}
663 
664 	ret = clk_enable(priv->clk);
665 	if (ret < 0) {
666 		dev_err(&dev->dev, "failed to enable clock\n");
667 		goto err_put_clk;
668 	}
669 
670 	/* request the IRQ */
671 	irq = platform_get_irq(dev, 0);
672 	if (irq < 0) {
673 		dev_err(&dev->dev, "no IRQ defined\n");
674 		ret = -ENODEV;
675 		goto err_put_clk;
676 	}
677 
678 	ret = request_irq(irq, pxa3xx_gcu_handle_irq,
679 			  0, DRV_NAME, priv);
680 	if (ret) {
681 		dev_err(&dev->dev, "request_irq failed\n");
682 		ret = -EBUSY;
683 		goto err_put_clk;
684 	}
685 
686 	platform_set_drvdata(dev, priv);
687 	priv->resource_mem = r;
688 	pxa3xx_gcu_reset(priv);
689 	pxa3xx_gcu_init_debug_timer();
690 
691 	dev_info(&dev->dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
692 			(void *) r->start, (void *) priv->shared_phys,
693 			SHARED_SIZE, irq);
694 	return 0;
695 
696 err_put_clk:
697 	clk_disable(priv->clk);
698 	clk_put(priv->clk);
699 
700 err_free_dma:
701 	dma_free_coherent(&dev->dev, SHARED_SIZE,
702 			priv->shared, priv->shared_phys);
703 
704 err_free_io:
705 	iounmap(priv->mmio_base);
706 
707 err_free_mem_region:
708 	release_mem_region(r->start, resource_size(r));
709 
710 err_misc_deregister:
711 	misc_deregister(&priv->misc_dev);
712 
713 err_free_priv:
714 	platform_set_drvdata(dev, NULL);
715 	free_buffers(dev, priv);
716 	kfree(priv);
717 	return ret;
718 }
719 
pxa3xx_gcu_remove(struct platform_device * dev)720 static int pxa3xx_gcu_remove(struct platform_device *dev)
721 {
722 	struct pxa3xx_gcu_priv *priv = platform_get_drvdata(dev);
723 	struct resource *r = priv->resource_mem;
724 
725 	pxa3xx_gcu_wait_idle(priv);
726 
727 	misc_deregister(&priv->misc_dev);
728 	dma_free_coherent(&dev->dev, SHARED_SIZE,
729 			priv->shared, priv->shared_phys);
730 	iounmap(priv->mmio_base);
731 	release_mem_region(r->start, resource_size(r));
732 	platform_set_drvdata(dev, NULL);
733 	clk_disable(priv->clk);
734 	free_buffers(dev, priv);
735 	kfree(priv);
736 
737 	return 0;
738 }
739 
740 static struct platform_driver pxa3xx_gcu_driver = {
741 	.probe	  = pxa3xx_gcu_probe,
742 	.remove	 = pxa3xx_gcu_remove,
743 	.driver	 = {
744 		.owner  = THIS_MODULE,
745 		.name   = DRV_NAME,
746 	},
747 };
748 
749 module_platform_driver(pxa3xx_gcu_driver);
750 
751 MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
752 MODULE_LICENSE("GPL");
753 MODULE_ALIAS_MISCDEV(MISCDEV_MINOR);
754 MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
755 		"Denis Oliver Kropp <dok@directfb.org>, "
756 		"Daniel Mack <daniel@caiaq.de>");
757