1 /*
2 * drivers\media\google_vp9
3 * (C) Copyright 2010-2016
4 * Reuuimlla Technology Co., Ltd. <www.allwinnertech.com>
5 * yangcaoyuan<yangcaoyuan@allwinnertech.com>
6 *
7 * some simple description for this code
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 *
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/ioctl.h>
19 #include <linux/fs.h>
20 #include <linux/device.h>
21 #include <linux/err.h>
22 #include <linux/list.h>
23 #include <linux/errno.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/preempt.h>
27 #include <linux/cdev.h>
28 #include <linux/platform_device.h>
29 #include <linux/interrupt.h>
30 #include <linux/clk.h>
31 #include <linux/rmap.h>
32 #include <linux/wait.h>
33 #include <linux/semaphore.h>
34 #include <linux/poll.h>
35 #include <linux/spinlock.h>
36 #include <linux/sched.h>
37 #include <linux/kthread.h>
38 #include <linux/delay.h>
39 #include <linux/scatterlist.h>
40 #include <asm/uaccess.h>
41 #include <asm/io.h>
42 #include <asm/dma.h>
43 #include <linux/mm.h>
44 #include <asm/siginfo.h>
45 #include <asm/signal.h>
46 #include <sunxi-clk.h>
47 #include <linux/of.h>
48 #include <linux/of_address.h>
49 #include <linux/of_irq.h>
50
51 #include "google_vp9.h"
52 #include "vp9_mem_list.h"
53 #include <linux/regulator/consumer.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/dma-buf.h>
56 #include <linux/reset.h>
57
58
59 #define DRV_VERSION "0.01alpha"
60
61 #ifndef VP9DEV_MAJOR
62 #define VP9DEV_MAJOR (160)
63 #endif
64 #ifndef VP9DEV_MINOR
65 #define VP9DEV_MINOR (0)
66 #endif
67
68 #define MACC_VP9_REGS_BASE (0x01C0D000)
69
70 /*#define VP9_DEBUG*/
71 #define google_vp9_printk(level, msg...) printk(level "google_vp9: " msg)
72
73 #define GOOGLE_VP9_CLK_HIGH_WATER (700)
74 #define GOOGLE_VP9_CLK_LOW_WATER (100)
75
76 #define PRINTK_IOMMU_ADDR 0
77
78 static int vp9_dev_major = VP9DEV_MAJOR;
79 static int vp9_dev_minor = VP9DEV_MINOR;
80 module_param(vp9_dev_major, int, 0444);
81 module_param(vp9_dev_minor, int, 0444);
82
83
84 struct iomap_para {
85 char *regs_macc;
86 char *regs_ccmu;
87 };
88
89 struct user_iommu_param {
90 int fd;
91 unsigned int iommu_addr;
92 };
93
94 struct cedarv_iommu_buffer {
95 struct aw_mem_list_head i_list;
96 int fd;
97 unsigned long iommu_addr;
98 struct dma_buf *dma_buf;
99 struct dma_buf_attachment *attachment;
100 struct sg_table *sgt;
101 int p_id;
102 };
103
104 static DECLARE_WAIT_QUEUE_HEAD(wait_vp9);
105 struct googlevp9_dev {
106 struct cdev cdev; /* char device struct */
107 struct device *dev; /* ptr to class device struct */
108 struct device *platform_dev; /* ptr to class device struct */
109 struct class *class; /* class for auto create device node */
110
111 struct semaphore sem; /* mutual exclusion semaphore */
112
113 wait_queue_head_t wq; /* wait queue for poll ops */
114
115 struct iomap_para iomap_addrs; /* io remap addrs */
116
117 struct timer_list vp9_engine_timer;
118 struct timer_list vp9_engine_timer_rel;
119
120 u32 irq; /* cedar video engine irq number */
121 u32 de_irq_flag; /* flag of video decoder engine irq generated */
122 u32 de_irq_value; /* value of video decoder engine irq */
123 u32 irq_has_enable;
124 u32 ref_count;
125
126 unsigned int *sram_bass_vir;
127 unsigned int *clk_bass_vir;
128
129 struct aw_mem_list_head list; /* buffer list */
130 struct mutex lock_mem;
131 unsigned char bMemDevAttachFlag;
132 struct clk *av1_clk;
133 struct clk *bus_av1_clk;
134 struct clk *bus_ve_clk;
135 struct clk *mbus_av1_clk;
136 struct reset_control *reset;
137 struct reset_control *reset_ve;
138 };
139
140 struct google_vp9_info {
141 unsigned int set_vol_flag;
142 };
143
144 struct googlevp9_dev *google_vp9_devp;
145
146
GoogleVp9Interrupt(int irq,void * dev)147 static irqreturn_t GoogleVp9Interrupt(int irq, void *dev)
148 {
149 char *av1_reg_2 = NULL;
150 unsigned int status = 0;
151 unsigned int interrupt_enable = 0;
152 struct iomap_para addrs = google_vp9_devp->iomap_addrs;
153 /*1. check and get the interrupt enable bits */
154 /*2. check and get the interrupt status bits */
155 /*3. clear the interrupt enable bits */
156 /*4. set the irq_value and irq_flag */
157 /*5. wake up the user mode interrupt_func */
158 av1_reg_2 = (addrs.regs_macc + 0x08);
159 status = readl((void *)av1_reg_2);
160 interrupt_enable = status & 0x0800;
161
162 /* only check status[bit:18,16,13,12,11,8], enable[bit:4] */
163 if ((status & 0x7c) && (interrupt_enable)) {
164 /*need check we must clear the interrupt enable bits or not?*/
165 if (status & 0x10) {
166 google_vp9_devp->de_irq_value = 1;
167 google_vp9_devp->de_irq_flag = 1;
168 }
169 writel(status & 0xfffff7ff, (void *)av1_reg_2);
170 wake_up_interruptible(&wait_vp9);
171 }
172
173 return IRQ_HANDLED;
174 }
175
176 static int clk_status;
177
178 static spinlock_t google_vp9_spin_lock;
179
enable_google_vp9_hw_clk(void)180 int enable_google_vp9_hw_clk(void)
181 {
182 unsigned long flags;
183 int res = -EFAULT;
184
185 spin_lock_irqsave(&google_vp9_spin_lock, flags);
186
187 if (clk_status == 1) {
188 res = 0 ;
189 goto out;
190 }
191 clk_status = 1;
192
193 reset_control_deassert(google_vp9_devp->reset_ve);
194 reset_control_deassert(google_vp9_devp->reset);
195
196 if (clk_prepare_enable(google_vp9_devp->bus_ve_clk)) {
197 google_vp9_printk(KERN_ERR, "enable bus ve clk gating failed;\n");
198 goto out;
199 }
200
201 if (clk_prepare_enable(google_vp9_devp->bus_av1_clk)) {
202 google_vp9_printk(KERN_ERR, "enable bus clk gating failed;\n");
203 goto out;
204 }
205
206 if (clk_prepare_enable(google_vp9_devp->mbus_av1_clk)) {
207 google_vp9_printk(KERN_ERR, "enable mbus clk gating failed;\n");
208 goto out;
209 }
210
211 if (clk_prepare_enable(google_vp9_devp->av1_clk)) {
212 google_vp9_printk(KERN_ERR, "enable ve clk gating failed;\n");
213 goto out;
214 }
215 res = 0;
216 AW_MEM_INIT_LIST_HEAD(&google_vp9_devp->list);
217 google_vp9_printk(KERN_DEBUG, "vp9 clk enable!\n");
218 out:
219 spin_unlock_irqrestore(&google_vp9_spin_lock, flags);
220 return res;
221 }
222
disable_google_vp9_hw_clk(void)223 int disable_google_vp9_hw_clk(void)
224 {
225 unsigned long flags;
226 int res = -EFAULT;
227 struct aw_mem_list_head *pos, *q;
228
229 spin_lock_irqsave(&google_vp9_spin_lock, flags);
230
231 if (clk_status == 0) {
232 res = 0;
233 goto out;
234 }
235 clk_status = 0;
236
237 clk_disable_unprepare(google_vp9_devp->av1_clk);
238 clk_disable_unprepare(google_vp9_devp->mbus_av1_clk);
239 clk_disable_unprepare(google_vp9_devp->bus_av1_clk);
240 clk_disable_unprepare(google_vp9_devp->bus_ve_clk);
241 reset_control_assert(google_vp9_devp->reset);
242 reset_control_assert(google_vp9_devp->reset_ve);
243
244 aw_mem_list_for_each_safe(pos, q, &google_vp9_devp->list) {
245 struct cedarv_iommu_buffer *tmp;
246
247 tmp = aw_mem_list_entry(pos, struct cedarv_iommu_buffer, i_list);
248 aw_mem_list_del(pos);
249 kfree(tmp);
250 }
251 res = 0;
252 google_vp9_printk(KERN_DEBUG, "vp9 clk disable!\n");
253
254 out:
255 spin_unlock_irqrestore(&google_vp9_spin_lock, flags);
256 return res;
257 }
258
compat_googlevp9dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)259 static long compat_googlevp9dev_ioctl(struct file *filp,
260 unsigned int cmd,
261 unsigned long arg)
262 {
263 long ret = 0;
264 int ve_timeout = 0;
265 unsigned long flags;
266 struct google_vp9_info *info;
267
268 info = filp->private_data;
269 switch (cmd) {
270 case VP9_IOCTL_ENGINE_REQ:
271 google_vp9_devp->ref_count++;
272 if (google_vp9_devp->ref_count == 1)
273 enable_google_vp9_hw_clk();
274 break;
275 case VP9_IOCTL_ENGINE_REL:
276 google_vp9_devp->ref_count--;
277 if (google_vp9_devp->ref_count == 0) {
278 ret = disable_google_vp9_hw_clk();
279 if (ret < 0) {
280 google_vp9_printk(KERN_WARNING,
281 "IOCTL_ENGINE_REL clk disable error!\n");
282 return -EFAULT;
283 }
284 }
285 return ret;
286 case VP9_IOCTL_WAIT_INTERRUPT:
287 ve_timeout = (int)arg;
288 google_vp9_devp->de_irq_value = 0;
289
290 spin_lock_irqsave(&google_vp9_spin_lock, flags);
291 if (google_vp9_devp->de_irq_flag)
292 google_vp9_devp->de_irq_value = 1;
293 spin_unlock_irqrestore(&google_vp9_spin_lock, flags);
294
295 wait_event_interruptible_timeout(wait_vp9,
296 google_vp9_devp->de_irq_flag, ve_timeout*HZ);
297 google_vp9_devp->de_irq_flag = 0;
298
299 return google_vp9_devp->de_irq_value;
300
301 case VP9_IOCTL_RESET:
302 google_vp9_printk(KERN_DEBUG, "vp9 reset!\n");
303 reset_control_reset(google_vp9_devp->reset);
304 break;
305
306 case VP9_IOCTL_SET_FREQ:
307 {
308 int arg_rate = (int)arg;
309
310 if (arg_rate >= GOOGLE_VP9_CLK_LOW_WATER &&
311 arg_rate <= GOOGLE_VP9_CLK_HIGH_WATER &&
312 clk_get_rate(google_vp9_devp->av1_clk)/1000000 != arg_rate) {
313 if (clk_set_rate(google_vp9_devp->av1_clk, arg_rate*1000000)) {
314 google_vp9_printk(KERN_WARNING,
315 "set pll_vp9_parent clock failed\n");
316 }
317 }
318 ret = clk_get_rate(google_vp9_devp->av1_clk);
319 google_vp9_printk(KERN_DEBUG, "real_freq:%ld", ret);
320 break;
321 }
322
323 case VP9_IOCTL_GET_ENV_INFO:
324 {
325 struct vp9_env_information_compat env_info;
326
327 env_info.phymem_start = 0;
328 env_info.phymem_total_size = 0;
329 env_info.address_macc = 0;
330 if (copy_to_user((char *)arg, &env_info,
331 sizeof(struct vp9_env_information_compat)))
332 return -EFAULT;
333 }
334 break;
335
336 case IOCTL_GET_IOMMU_ADDR:
337 {
338 struct sg_table *sgt;
339 struct user_iommu_param sUserIommuParam;
340 struct cedarv_iommu_buffer *pVeIommuBuf = NULL;
341
342 google_vp9_devp->bMemDevAttachFlag = 1;
343
344 pVeIommuBuf = kmalloc(sizeof(struct cedarv_iommu_buffer), GFP_KERNEL);
345 if (pVeIommuBuf == NULL)
346 return -ENOMEM;
347
348 if (copy_from_user(&sUserIommuParam, (void __user *)arg,
349 sizeof(struct user_iommu_param))) {
350 google_vp9_printk(KERN_ERR,
351 "IOCTL_GET_IOMMU_ADDR copy_from_user erro\n");
352 return -EFAULT;
353 }
354
355 pVeIommuBuf->fd = sUserIommuParam.fd;
356
357 pVeIommuBuf->dma_buf = dma_buf_get(pVeIommuBuf->fd);
358 if (pVeIommuBuf->dma_buf < 0) {
359 google_vp9_printk(KERN_ERR,
360 "vp9 get dma_buf error");
361 return -EFAULT;
362 }
363
364 pVeIommuBuf->attachment =
365 dma_buf_attach(pVeIommuBuf->dma_buf,
366 google_vp9_devp->platform_dev);
367 if (pVeIommuBuf->attachment < 0) {
368 google_vp9_printk(KERN_ERR,
369 "vp9 get dma_buf_attachment error");
370 goto RELEASE_DMA_BUF;
371 }
372
373 sgt = dma_buf_map_attachment(pVeIommuBuf->attachment,
374 DMA_BIDIRECTIONAL);
375 pVeIommuBuf->sgt = sgt;
376
377 pVeIommuBuf->iommu_addr = sg_dma_address(pVeIommuBuf->sgt->sgl);
378 sUserIommuParam.iommu_addr = (unsigned int)(pVeIommuBuf->iommu_addr & 0xffffffff);
379
380
381 if (copy_to_user((void __user *)arg, &sUserIommuParam, sizeof(struct user_iommu_param))) {
382 google_vp9_printk(KERN_ERR,
383 "vp9 get iommu copy_to_user error\n");
384 goto RELEASE_DMA_BUF;
385 }
386
387 pVeIommuBuf->p_id = current->tgid;
388 #if PRINTK_IOMMU_ADDR
389 google_vp9_printk(KERN_DEBUG,
390 "fd:%d, iommu_addr:%lx, dma_buf:%p, dma_buf_attach:%p, sg_table:%p, nents:%d, p_id:%d\n",
391 pVeIommuBuf->fd,
392 pVeIommuBuf->iommu_addr,
393 pVeIommuBuf->dma_buf,
394 pVeIommuBuf->attachment,
395 pVeIommuBuf->sgt,
396 pVeIommuBuf->sgt->nents,
397 pVeIommuBuf->p_id);
398 #endif
399
400 mutex_lock(&google_vp9_devp->lock_mem);
401 aw_mem_list_add_tail(&pVeIommuBuf->i_list, &google_vp9_devp->list);
402 mutex_unlock(&google_vp9_devp->lock_mem);
403 break;
404
405 RELEASE_DMA_BUF:
406 if (pVeIommuBuf->dma_buf > 0) {
407 if (pVeIommuBuf->attachment > 0) {
408 if (pVeIommuBuf->sgt > 0) {
409 dma_buf_unmap_attachment(pVeIommuBuf->attachment, pVeIommuBuf->sgt,
410 DMA_BIDIRECTIONAL);
411 }
412
413 dma_buf_detach(pVeIommuBuf->dma_buf,
414 pVeIommuBuf->attachment);
415 }
416
417 dma_buf_put(pVeIommuBuf->dma_buf);
418 return -1;
419 }
420 kfree(pVeIommuBuf);
421 break;
422 }
423 case IOCTL_FREE_IOMMU_ADDR:
424 {
425 struct user_iommu_param sUserIommuParam;
426 struct cedarv_iommu_buffer *pVeIommuBuf;
427
428 if (copy_from_user(&sUserIommuParam, (void __user *)arg,
429 sizeof(struct user_iommu_param))) {
430 google_vp9_printk(KERN_ERR,
431 "IOCTL_FREE_IOMMU_ADDR copy_from_user error");
432 return -EFAULT;
433 }
434 aw_mem_list_for_each_entry(pVeIommuBuf, &google_vp9_devp->list, i_list) {
435 if (pVeIommuBuf->fd == sUserIommuParam.fd &&
436 pVeIommuBuf->p_id == current->tgid) {
437 #if PRINTK_IOMMU_ADDR
438 google_vp9_printk(KERN_DEBUG, "free: fd:%d, iommu_addr:%lx, dma_buf:%p, dma_buf_attach:%p, sg_table:%p, nets:%d, p_id:%d\n",
439 pVeIommuBuf->fd,
440 pVeIommuBuf->iommu_addr,
441 pVeIommuBuf->dma_buf,
442 pVeIommuBuf->attachment,
443 pVeIommuBuf->sgt,
444 pVeIommuBuf->sgt->nents,
445 pVeIommuBuf->p_id);
446 #endif
447
448 if (pVeIommuBuf->dma_buf > 0) {
449 if (pVeIommuBuf->attachment > 0) {
450 if (pVeIommuBuf->sgt > 0) {
451 dma_buf_unmap_attachment(pVeIommuBuf->attachment,
452 pVeIommuBuf->sgt,
453 DMA_BIDIRECTIONAL);
454 }
455
456 dma_buf_detach(pVeIommuBuf->dma_buf, pVeIommuBuf->attachment);
457 }
458
459 dma_buf_put(pVeIommuBuf->dma_buf);
460 }
461
462 mutex_lock(&google_vp9_devp->lock_mem);
463 aw_mem_list_del(&pVeIommuBuf->i_list);
464 kfree(pVeIommuBuf);
465 mutex_unlock(&google_vp9_devp->lock_mem);
466 break;
467 }
468 }
469 break;
470 }
471 default:
472 return -1;
473 }
474 return ret;
475 }
476
googlevp9dev_open(struct inode * inode,struct file * filp)477 static int googlevp9dev_open(struct inode *inode, struct file *filp)
478 {
479 struct google_vp9_info *info;
480
481 info = kmalloc(sizeof(struct google_vp9_info), GFP_KERNEL);
482 if (!info)
483 return -ENOMEM;
484
485 info->set_vol_flag = 0;
486
487 filp->private_data = info;
488 if (down_interruptible(&google_vp9_devp->sem))
489 return -ERESTARTSYS;
490
491 if (google_vp9_devp->ref_count == 0)
492 google_vp9_devp->de_irq_flag = 0;
493
494 up(&google_vp9_devp->sem);
495 nonseekable_open(inode, filp);
496 return 0;
497 }
498
googlevp9dev_release(struct inode * inode,struct file * filp)499 static int googlevp9dev_release(struct inode *inode, struct file *filp)
500 {
501 struct google_vp9_info *info;
502 struct aw_mem_list_head *pos, *q;
503 struct cedarv_iommu_buffer *pVeIommuBuf;
504
505 info = filp->private_data;
506 if (google_vp9_devp->bMemDevAttachFlag) {
507 aw_mem_list_for_each_safe(pos, q, &google_vp9_devp->list) {
508 pVeIommuBuf = aw_mem_list_entry(pos, struct cedarv_iommu_buffer, i_list);
509 if (pVeIommuBuf->p_id == current->tgid) {
510 #if PRINTK_IOMMU_ADDR
511 google_vp9_printk(KERN_DEBUG, "free: fd:%d, iommu_addr:%lx, dma_buf:%p, dma_buf_attach:%p, sg_table:%p, nets:%d, p_id:%d\n",
512 pVeIommuBuf->fd,
513 pVeIommuBuf->iommu_addr,
514 pVeIommuBuf->dma_buf,
515 pVeIommuBuf->attachment,
516 pVeIommuBuf->sgt,
517 pVeIommuBuf->sgt->nents,
518 pVeIommuBuf->p_id);
519 #endif
520 if (pVeIommuBuf->dma_buf > 0) {
521 if (pVeIommuBuf->attachment > 0) {
522 if (pVeIommuBuf->sgt > 0) {
523 dma_buf_unmap_attachment(pVeIommuBuf->attachment,
524 pVeIommuBuf->sgt,
525 DMA_BIDIRECTIONAL);
526 }
527 dma_buf_detach(pVeIommuBuf->dma_buf, pVeIommuBuf->attachment);
528 }
529 dma_buf_put(pVeIommuBuf->dma_buf);
530 }
531 mutex_lock(&google_vp9_devp->lock_mem);
532 aw_mem_list_del(&pVeIommuBuf->i_list);
533 kfree(pVeIommuBuf);
534 mutex_unlock(&google_vp9_devp->lock_mem);
535 }
536 }
537 }
538
539 if (down_interruptible(&google_vp9_devp->sem))
540 return -ERESTARTSYS;
541
542 /* release other resource here */
543 if (google_vp9_devp->ref_count == 0)
544 google_vp9_devp->de_irq_flag = 1;
545
546 up(&google_vp9_devp->sem);
547
548 kfree(info);
549 return 0;
550 }
551
googlevp9dev_vma_open(struct vm_area_struct * vma)552 static void googlevp9dev_vma_open(struct vm_area_struct *vma)
553 {
554 }
555
googlevp9dev_vma_close(struct vm_area_struct * vma)556 static void googlevp9dev_vma_close(struct vm_area_struct *vma)
557 {
558 }
559
560 static struct vm_operations_struct cedardev_remap_vm_ops = {
561 .open = googlevp9dev_vma_open,
562 .close = googlevp9dev_vma_close,
563 };
564
googlevp9dev_mmap(struct file * filp,struct vm_area_struct * vma)565 static int googlevp9dev_mmap(struct file *filp, struct vm_area_struct *vma)
566 {
567 unsigned long temp_pfn;
568
569 if (vma->vm_end - vma->vm_start == 0) {
570 google_vp9_printk(KERN_WARNING,
571 "vma->vm_end is equal vma->vm_start : %lx\n",
572 vma->vm_start);
573 return 0;
574 }
575 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
576 google_vp9_printk(KERN_WARNING,
577 "the vma->vm_pgoff is %lx,it is large than the largest page number\n",
578 vma->vm_pgoff);
579 return -EINVAL;
580 }
581
582
583 temp_pfn = MACC_VP9_REGS_BASE >> 12;
584
585
586 /* Set reserved and I/O flag for the area. */
587 vma->vm_flags |= /*VM_RESERVED | */VM_IO;
588 /* Select uncached access. */
589 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
590
591 if (io_remap_pfn_range(vma, vma->vm_start, temp_pfn,
592 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
593 return -EAGAIN;
594 }
595
596
597 vma->vm_ops = &cedardev_remap_vm_ops;
598 googlevp9dev_vma_open(vma);
599
600 return 0;
601 }
602
603
snd_sw_google_vp9_suspend(struct platform_device * pdev,pm_message_t state)604 static int snd_sw_google_vp9_suspend(struct platform_device *pdev,
605 pm_message_t state)
606 {
607 int ret = 0;
608
609 google_vp9_printk(KERN_WARNING, "[google_vp9] standby suspend\n");
610 ret = disable_google_vp9_hw_clk();
611
612 if (ret < 0) {
613 google_vp9_printk(KERN_WARNING,
614 "google_vp9 clk disable somewhere error!\n");
615 return -EFAULT;
616 }
617
618 return 0;
619 }
620
snd_sw_google_vp9_resume(struct platform_device * pdev)621 static int snd_sw_google_vp9_resume(struct platform_device *pdev)
622 {
623 int ret = 0;
624
625 google_vp9_printk(KERN_WARNING, "[google_vp9] standby resume\n");
626
627 if (google_vp9_devp->ref_count == 0)
628 return 0;
629
630 ret = enable_google_vp9_hw_clk();
631 if (ret < 0) {
632 google_vp9_printk(KERN_WARNING,
633 "google_vp9 clk enable somewhere error!\n");
634 return -EFAULT;
635 }
636 return 0;
637 }
638
639
640 static const struct file_operations googlevp9dev_fops = {
641 .owner = THIS_MODULE,
642 .mmap = googlevp9dev_mmap,
643 .open = googlevp9dev_open,
644 .release = googlevp9dev_release,
645 .llseek = no_llseek,
646 .unlocked_ioctl = compat_googlevp9dev_ioctl,
647 .compat_ioctl = compat_googlevp9dev_ioctl,
648
649 };
650
create_char_device(void)651 static int create_char_device(void)
652 {
653 dev_t dev = 0;
654 int ret = 0;
655 int devno;
656
657 //1.register or alloc the device number.
658 if (vp9_dev_major) {
659 dev = MKDEV(vp9_dev_major, vp9_dev_minor);
660 ret = register_chrdev_region(dev, 1, "googlevp9_dev");
661 } else {
662 ret = alloc_chrdev_region(&dev, vp9_dev_major, 1, "googlevp9_dev");
663 vp9_dev_major = MAJOR(dev);
664 vp9_dev_minor = MINOR(dev);
665 }
666
667 if (ret < 0) {
668 google_vp9_printk(KERN_WARNING, "cedar_dev: can't get major %d\n", vp9_dev_major);
669 return ret;
670 }
671
672 //2.create char device
673 devno = MKDEV(vp9_dev_major, vp9_dev_minor);
674 cdev_init(&google_vp9_devp->cdev, &googlevp9dev_fops);
675 google_vp9_devp->cdev.owner = THIS_MODULE;
676 ret = cdev_add(&google_vp9_devp->cdev, devno, 1);
677 if (ret) {
678 google_vp9_printk(KERN_WARNING, "Err:%d add cedar-dev fail\n", ret);
679 goto region_del;
680 }
681 //3.create class and new device for auto device node
682 google_vp9_devp->class = class_create(THIS_MODULE, "cedar_av1");
683 if (IS_ERR_OR_NULL(google_vp9_devp->class)) {
684 ret = -EINVAL;
685 goto dev_del;
686 }
687 google_vp9_devp->dev = device_create(google_vp9_devp->class, NULL, devno, NULL, "cedar_av1");
688 if (IS_ERR_OR_NULL(google_vp9_devp->dev)) {
689 ret = -EINVAL;
690 goto class_del;
691 }
692 return ret;
693
694 class_del:
695 class_destroy(google_vp9_devp->class);
696 dev_del:
697 cdev_del(&google_vp9_devp->cdev);
698 region_del:
699 unregister_chrdev_region(dev, 1);
700
701 return ret;
702 }
703
704 #if 0
705 static int set_ccmu_by_shelf(char *ccmu_base)
706 {
707 unsigned int v;
708 /*
709 reg = readl(ccmu_base + 0x58);
710 reg &= 0x7ffa0000;
711
712 fq = fq/6 - 1;
713
714 reg = reg & (~(1 << 29));
715 writel(reg,ccmu_base + 0x58);
716
717 reg = readl(ccmu_base + 0x58);
718 reg = reg | (fq<<8) | (1<<1) | (1<<0);
719 writel(reg,ccmu_base + 0x58);
720
721 reg = readl(ccmu_base + 0x58);
722 reg = reg | (1<<31);
723 writel(reg,ccmu_base + 0x58);
724
725 reg = readl(ccmu_base + 0x58);
726 reg = reg | (1<<29);
727 writel(reg,ccmu_base + 0x58);
728
729 count = 0;
730 do {
731 mdelay(5);
732 reg = readl(ccmu_base + 0x58);
733 reg = reg & (1<<28);
734 count++;
735 } while(reg == 0 && count < 10);
736
737 if (count >= 10 && reg == 0) {
738 google_vp9_printk(KERN_WARNING,"Err: clock is unlock\n");
739 return -1;
740 }
741 */
742 v = readl(ccmu_base + 0x690);
743 v |= (1U<<31);
744 writel(v, ccmu_base + 0x690);
745
746 v = readl(ccmu_base + 0x69c);
747 v |= (1<<0);
748 writel(v, ccmu_base + 0x69c);
749
750 v = readl(ccmu_base + 0x804);
751 v |= (1U<<1);
752 writel(v, ccmu_base + 0x804);
753
754 v = readl(ccmu_base + 0x69c);
755 v &= ~(1<<16);
756 writel(v, ccmu_base + 0x69c);
757
758 v = readl(ccmu_base + 0x69c);
759 v |= (1<<16);
760 writel(v, ccmu_base + 0x69c);
761 return 0;
762 }
763 #endif
764
googleVp9dev_init(struct platform_device * pdev)765 static int googleVp9dev_init(struct platform_device *pdev)
766 {
767 int ret = 0;
768 struct device_node *node;
769 dev_t dev;
770
771 dev = 0;
772
773 google_vp9_printk(KERN_WARNING, "[google av1]: install start!!!\n");
774
775 google_vp9_devp = kmalloc(sizeof(struct googlevp9_dev), GFP_KERNEL);
776 if (google_vp9_devp == NULL) {
777 google_vp9_printk(KERN_WARNING,
778 "malloc mem for google vp9 device err\n");
779 return -ENOMEM;
780 }
781
782 memset(google_vp9_devp, 0, sizeof(struct googlevp9_dev));
783 node = pdev->dev.of_node;
784 google_vp9_devp->platform_dev = &pdev->dev;
785
786 /*register or alloc the device number.*/
787 if (create_char_device() != 0) {
788 ret = -EINVAL;
789 goto free_devp;
790 }
791
792 google_vp9_devp->irq = irq_of_parse_and_map(node, 0);
793 google_vp9_printk(KERN_INFO, "google vp9: the get irq is %d\n",
794 google_vp9_devp->irq);
795 if (google_vp9_devp->irq <= 0) {
796 google_vp9_printk(KERN_WARNING, "Can't parse IRQ, use defualt!!!");
797 ret = -EINVAL;
798 goto free_devp;
799 }
800
801 spin_lock_init(&google_vp9_spin_lock);
802
803 sema_init(&google_vp9_devp->sem, 1);
804 init_waitqueue_head(&google_vp9_devp->wq);
805
806 mutex_init(&google_vp9_devp->lock_mem);
807
808 memset(&google_vp9_devp->iomap_addrs, 0, sizeof(struct iomap_para));
809 ret = request_irq(google_vp9_devp->irq,
810 GoogleVp9Interrupt, 0, "googlevp9_dev", NULL);
811 if (ret < 0) {
812 google_vp9_printk(KERN_WARNING, "request irq err\n");
813 ret = -EINVAL;
814 goto free_devp;
815 }
816
817 /* map for macc io space */
818 google_vp9_devp->iomap_addrs.regs_macc = of_iomap(node, 0);
819 if (!google_vp9_devp->iomap_addrs.regs_macc) {
820 google_vp9_printk(KERN_WARNING, "vp9 Can't map registers");
821 ret = -EINVAL;
822 goto free_devp;
823 }
824 //map for ccmu io space
825 google_vp9_devp->iomap_addrs.regs_ccmu = of_iomap(node, 1);
826 if (!google_vp9_devp->iomap_addrs.regs_ccmu) {
827 google_vp9_printk(KERN_WARNING, "vp9 Can't map ccmu registers");
828 ret = -EINVAL;
829 goto free_devp;
830 }
831
832 /*
833 if (set_ccmu_by_shelf(google_vp9_devp->iomap_addrs.regs_ccmu) == 0) {
834 google_vp9_printk(KERN_WARNING, "now read reg0:%x\n",
835 readl(google_vp9_devp->iomap_addrs.regs_macc));
836 writel(0x2342, google_vp9_devp->iomap_addrs.regs_macc + 0x130);
837 google_vp9_printk(KERN_WARNING, "ji***read24:%x\n",
838 readl(google_vp9_devp->iomap_addrs.regs_macc + 0x130));
839 } else {
840 ret = -EINVAL;
841 goto free_devp;
842 }
843 */
844 //get clock
845 google_vp9_devp->av1_clk = devm_clk_get(google_vp9_devp->platform_dev, "av1");
846 if (IS_ERR(google_vp9_devp->av1_clk)) {
847 google_vp9_printk(KERN_WARNING, "try to get ve clk fail\n");
848 ret = -EINVAL;
849 goto free_devp;
850 }
851
852 google_vp9_devp->bus_av1_clk = devm_clk_get(google_vp9_devp->platform_dev, "bus_av1");
853 if (IS_ERR(google_vp9_devp->bus_av1_clk)) {
854 google_vp9_printk(KERN_WARNING, "try to get bus av1 clk fail\n");
855 ret = -EINVAL;
856 goto free_devp;
857 }
858
859 google_vp9_devp->bus_ve_clk = devm_clk_get(google_vp9_devp->platform_dev, "bus_ve");
860 if (IS_ERR(google_vp9_devp->bus_ve_clk)) {
861 google_vp9_printk(KERN_WARNING, "try to get bus ve clk fail\n");
862 ret = -EINVAL;
863 goto free_devp;
864 }
865
866 google_vp9_devp->mbus_av1_clk = devm_clk_get(google_vp9_devp->platform_dev, "mbus_av1");
867 if (IS_ERR(google_vp9_devp->mbus_av1_clk)) {
868 google_vp9_printk(KERN_WARNING, "try to get mbus clk fail\n");
869 ret = -EINVAL;
870 goto free_devp;
871 }
872
873 google_vp9_devp->reset = devm_reset_control_get(google_vp9_devp->platform_dev, "reset_av1");
874 if (IS_ERR(google_vp9_devp->reset)) {
875 google_vp9_printk(KERN_WARNING, "get reset fail\n");
876 ret = -EINVAL;
877 goto free_devp;
878 }
879 google_vp9_devp->reset_ve = devm_reset_control_get_shared(google_vp9_devp->platform_dev, "reset_ve");
880 if (IS_ERR(google_vp9_devp->reset_ve)) {
881 google_vp9_printk(KERN_WARNING, "get reset all fail\n");
882 ret = -EINVAL;
883 goto free_devp;
884 }
885
886 google_vp9_printk(KERN_WARNING, "[google av1]: install end!!!\n");
887 return 0;
888 free_devp:
889 kfree(google_vp9_devp);
890 return ret;
891 }
892
893
googleVp9dev_exit(void)894 static void googleVp9dev_exit(void)
895 {
896 dev_t dev;
897
898 dev = MKDEV(vp9_dev_major, vp9_dev_minor);
899
900 free_irq(google_vp9_devp->irq, NULL);
901 iounmap(google_vp9_devp->iomap_addrs.regs_macc);
902 /* Destroy char device */
903
904 cdev_del(&google_vp9_devp->cdev);
905 device_destroy(google_vp9_devp->class, dev);
906 class_destroy(google_vp9_devp->class);
907 unregister_chrdev_region(dev, 1);
908 kfree(google_vp9_devp);
909 }
910
sunxi_google_vp9_remove(struct platform_device * pdev)911 static int sunxi_google_vp9_remove(struct platform_device *pdev)
912 {
913 googleVp9dev_exit();
914 return 0;
915 }
916
sunxi_google_vp9_probe(struct platform_device * pdev)917 static int sunxi_google_vp9_probe(struct platform_device *pdev)
918 {
919 googleVp9dev_init(pdev);
920 return 0;
921 }
922
923 static const struct of_device_id sunxi_google_vp9_match[] = {
924 { .compatible = "allwinner,sunxi-cedar-av1",},
925 {}
926 };
927 MODULE_DEVICE_TABLE(of, sunxi_google_vp9_match);
928
929 static struct platform_driver sunxi_google_vp9_driver = {
930 .probe = sunxi_google_vp9_probe,
931 .remove = sunxi_google_vp9_remove,
932 .suspend = snd_sw_google_vp9_suspend,
933 .resume = snd_sw_google_vp9_resume,
934 .driver = {
935 .name = "sunxi-av1",
936 .owner = THIS_MODULE,
937 .of_match_table = sunxi_google_vp9_match,
938 },
939 };
940
sunxi_google_vp9_init(void)941 static int __init sunxi_google_vp9_init(void)
942 {
943 google_vp9_printk(KERN_WARNING, "sunxi google vp9 version 1.0\n");
944 return platform_driver_register(&sunxi_google_vp9_driver);
945 }
946
sunxi_google_vp9_exit(void)947 static void __exit sunxi_google_vp9_exit(void)
948 {
949 platform_driver_unregister(&sunxi_google_vp9_driver);
950 }
951
952 module_init(sunxi_google_vp9_init);
953 module_exit(sunxi_google_vp9_exit);
954
955
956 MODULE_AUTHOR("jilinglin");
957 MODULE_DESCRIPTION("User mode GOOGLE VP9/AV1 device interface");
958 MODULE_LICENSE("GPL");
959 MODULE_VERSION(DRV_VERSION);
960 MODULE_ALIAS("platform:cedarx-sunxi");
961