• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * vpu.c
3  *
4  * linux device driver for VPU.
5  *
6  * Copyright (C) 2006 - 2013  CHIPS&MEDIA INC.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  */
19 
20 
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/wait.h>
28 #include <linux/list.h>
29 #include <linux/clk.h>
30 #include <linux/delay.h>
31 #include <linux/uaccess.h>
32 #include <linux/cdev.h>
33 #include <linux/slab.h>
34 #include <linux/sched.h>
35 #include <linux/platform_device.h>
36 #include <linux/of.h>
37 #include <linux/of_fdt.h>
38 #include <linux/reset.h>
39 #include <linux/clk.h>
40 #include <linux/compat.h>
41 #include <linux/of_reserved_mem.h>
42 #include <linux/of_address.h>
43 #include <linux/amlogic/media/codec_mm/codec_mm.h>
44 
45 #include <linux/amlogic/media/utils/vdec_reg.h>
46 #include "../../../common/media_clock/switch/amports_gate.h"
47 
48 #include "vpu.h"
49 #include "vmm.h"
50 
51 /* definitions to be changed as customer  configuration */
52 /* if you want to have clock gating scheme frame by frame */
53 /* #define VPU_SUPPORT_CLOCK_CONTROL */
54 
55 #define VPU_PLATFORM_DEVICE_NAME "HevcEnc"
56 #define VPU_DEV_NAME "HevcEnc"
57 #define VPU_CLASS_NAME "HevcEnc"
58 
59 #ifndef VM_RESERVED	/*for kernel up to 3.7.0 version*/
60 #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
61 #endif
62 
63 #define VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE (64 * SZ_1M)
64 
65 #define LOG_ALL 0
66 #define LOG_INFO 1
67 #define LOG_DEBUG 2
68 #define LOG_ERROR 3
69 
70 #define enc_pr(level, x...) \
71 	do { \
72 		if (level >= print_level) \
73 			printk(x); \
74 	} while (0)
75 
76 static s32 print_level = LOG_DEBUG;
77 static s32 clock_level = 4;
78 
79 static struct video_mm_t s_vmem;
80 static struct vpudrv_buffer_t s_video_memory = {0};
81 static bool use_reserve;
82 static ulong cma_pool_size;
83 
84 /* end customer definition */
85 static struct vpudrv_buffer_t s_instance_pool = {0};
86 static struct vpudrv_buffer_t s_common_memory = {0};
87 static struct vpu_drv_context_t s_vpu_drv_context;
88 static s32 s_vpu_major;
89 static struct device *hevcenc_dev;
90 
91 static s32 s_vpu_open_ref_count;
92 static s32 s_vpu_irq;
93 static bool s_vpu_irq_requested;
94 
95 static struct vpudrv_buffer_t s_vpu_register = {0};
96 
97 static s32 s_interrupt_flag;
98 static wait_queue_head_t s_interrupt_wait_q;
99 
100 static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock);
101 static DEFINE_SEMAPHORE(s_vpu_sem);
102 static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head);
103 static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head);
104 static struct tasklet_struct hevc_tasklet;
105 static struct platform_device *hevc_pdev;
106 
107 static struct vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE];
108 
109 static struct vpu_dma_cfg dma_cfg[3];
110 
111 static u32 vpu_src_addr_config(struct vpu_dma_buf_info_t);
112 static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg);
113 
dma_flush(u32 buf_start,u32 buf_size)114 static void dma_flush(u32 buf_start, u32 buf_size)
115 {
116 	if (hevc_pdev)
117 		dma_sync_single_for_device(
118 			&hevc_pdev->dev, buf_start,
119 			buf_size, DMA_TO_DEVICE);
120 }
121 
cache_flush(u32 buf_start,u32 buf_size)122 static void cache_flush(u32 buf_start, u32 buf_size)
123 {
124 	if (hevc_pdev)
125 		dma_sync_single_for_cpu(
126 			&hevc_pdev->dev, buf_start,
127 			buf_size, DMA_FROM_DEVICE);
128 }
129 
vpu_hw_reset(void)130 s32 vpu_hw_reset(void)
131 {
132 	enc_pr(LOG_DEBUG, "request vpu reset from application.\n");
133 	return 0;
134 }
135 
vpu_clk_config(u32 enable)136 s32 vpu_clk_config(u32 enable)
137 {
138 	if (enable) {
139 		if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A)
140 			HevcEnc_MoreClock_enable();
141 		HevcEnc_clock_enable(clock_level);
142 	} else {
143 		HevcEnc_clock_disable();
144 		if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A)
145 			HevcEnc_MoreClock_disable();
146 	}
147 	return 0;
148 }
149 
vpu_alloc_dma_buffer(struct vpudrv_buffer_t * vb)150 static s32 vpu_alloc_dma_buffer(struct vpudrv_buffer_t *vb)
151 {
152 	if (!vb)
153 		return -1;
154 
155 	vb->phys_addr = (ulong)vmem_alloc(&s_vmem, vb->size, 0);
156 	if ((ulong)vb->phys_addr == (ulong)-1) {
157 		enc_pr(LOG_ERROR,
158 			"Physical memory allocation error size=%d\n", vb->size);
159 		return -1;
160 	}
161 
162 	enc_pr(LOG_INFO, "vpu_alloc_dma_buffer: vb->phys_addr 0x%lx \n",vb->phys_addr);
163 	return 0;
164 }
165 
vpu_free_dma_buffer(struct vpudrv_buffer_t * vb)166 static void vpu_free_dma_buffer(struct vpudrv_buffer_t *vb)
167 {
168 	if (!vb)
169 		return;
170 	enc_pr(LOG_INFO, "vpu_free_dma_buffer 0x%lx\n",vb->phys_addr);
171 
172 	if (vb->phys_addr)
173 		vmem_free(&s_vmem, vb->phys_addr, 0);
174 }
175 
vpu_free_instances(struct file * filp)176 static s32 vpu_free_instances(struct file *filp)
177 {
178 	struct vpudrv_instanace_list_t *vil, *n;
179 	struct vpudrv_instance_pool_t *vip;
180 	void *vip_base;
181 
182 	enc_pr(LOG_DEBUG, "vpu_free_instances\n");
183 
184 	list_for_each_entry_safe(vil, n, &s_inst_list_head, list) {
185 		if (vil->filp == filp) {
186 			vip_base = (void *)s_instance_pool.base;
187 			enc_pr(LOG_INFO,
188 				"free_instances instIdx=%d, coreIdx=%d, vip_base=%p\n",
189 				(s32)vil->inst_idx,
190 				(s32)vil->core_idx,
191 				vip_base);
192 			vip = (struct vpudrv_instance_pool_t *)vip_base;
193 			if (vip) {
194 				/* only first 4 byte is key point
195 				 *	(inUse of CodecInst in vpuapi)
196 				 *    to free the corresponding instance.
197 				 */
198 				memset(&vip->codecInstPool[vil->inst_idx],
199 					0x00, 4);
200 			}
201 			s_vpu_open_ref_count--;
202 			list_del(&vil->list);
203 			kfree(vil);
204 		}
205 	}
206 	return 1;
207 }
208 
vpu_free_buffers(struct file * filp)209 static s32 vpu_free_buffers(struct file *filp)
210 {
211 	struct vpudrv_buffer_pool_t *pool, *n;
212 	struct vpudrv_buffer_t vb;
213 
214 	enc_pr(LOG_DEBUG, "vpu_free_buffers\n");
215 
216 	list_for_each_entry_safe(pool, n, &s_vbp_head, list) {
217 		if (pool->filp == filp) {
218 			vb = pool->vb;
219 			if (vb.phys_addr) {
220 				vpu_free_dma_buffer(&vb);
221 				list_del(&pool->list);
222 				kfree(pool);
223 			}
224 		}
225 	}
226 	return 0;
227 }
228 
vpu_is_buffer_cached(struct file * filp,ulong vm_pgoff)229 static u32 vpu_is_buffer_cached(struct file *filp, ulong vm_pgoff)
230 {
231 	struct vpudrv_buffer_pool_t *pool, *n;
232 	struct vpudrv_buffer_t vb;
233 	bool find = false;
234 	u32 cached = 0;
235 
236 	enc_pr(LOG_ALL, "[+]vpu_is_buffer_cached\n");
237 	spin_lock(&s_vpu_lock);
238 	list_for_each_entry_safe(pool, n, &s_vbp_head, list) {
239 		if (pool->filp == filp) {
240 			vb = pool->vb;
241 			if (((vb.phys_addr  >> PAGE_SHIFT) == vm_pgoff)
242 				&& find == false){
243 				cached = vb.cached;
244 				find = true;
245 			}
246 		}
247 	}
248 	spin_unlock(&s_vpu_lock);
249 	enc_pr(LOG_ALL, "[-]vpu_is_buffer_cached, ret:%d\n", cached);
250 	return cached;
251 }
252 
hevcenc_isr_tasklet(ulong data)253 static void hevcenc_isr_tasklet(ulong data)
254 {
255 	struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)data;
256 
257 	enc_pr(LOG_INFO, "hevcenc_isr_tasklet  interruput:0x%08lx\n",
258 		dev->interrupt_reason);
259 	if (dev->interrupt_reason) {
260 		/* notify the interrupt to user space */
261 		if (dev->async_queue) {
262 			enc_pr(LOG_ALL, "kill_fasync e %s\n", __func__);
263 			kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
264 		}
265 		s_interrupt_flag = 1;
266 		wake_up_interruptible(&s_interrupt_wait_q);
267 	}
268 	enc_pr(LOG_ALL, "[-]%s\n", __func__);
269 }
270 
vpu_irq_handler(s32 irq,void * dev_id)271 static irqreturn_t vpu_irq_handler(s32 irq, void *dev_id)
272 {
273 	struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)dev_id;
274 	/* this can be removed.
275 	 *	it also work in VPU_WaitInterrupt of API function
276 	 */
277 	u32 core;
278 	ulong interrupt_reason = 0;
279 
280 	enc_pr(LOG_ALL, "[+]%s\n", __func__);
281 
282 	for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
283 		if (s_bit_firmware_info[core].size == 0) {
284 			/* it means that we didn't get an information
285 			 *	the current core from API layer.
286 			 *	No core activated.
287 			 */
288 			enc_pr(LOG_ERROR,
289 				"s_bit_firmware_info[core].size is zero\n");
290 			continue;
291 		}
292 		if (ReadVpuRegister(W4_VPU_VPU_INT_STS)) {
293 			interrupt_reason = ReadVpuRegister(W4_VPU_INT_REASON);
294 			WriteVpuRegister(W4_VPU_INT_REASON_CLEAR,
295 				interrupt_reason);
296 			WriteVpuRegister(W4_VPU_VINT_CLEAR, 0x1);
297 			dev->interrupt_reason |= interrupt_reason;
298 		}
299 		enc_pr(LOG_INFO,
300 			"intr_reason: 0x%08lx\n", dev->interrupt_reason);
301 	}
302 	if (dev->interrupt_reason)
303 		tasklet_schedule(&hevc_tasklet);
304 	enc_pr(LOG_ALL, "[-]%s\n", __func__);
305 	return IRQ_HANDLED;
306 }
307 
vpu_open(struct inode * inode,struct file * filp)308 static s32 vpu_open(struct inode *inode, struct file *filp)
309 {
310 	bool alloc_buffer = false;
311 	s32 r = 0;
312 
313 	enc_pr(LOG_DEBUG, "[+] %s\n", __func__);
314 	spin_lock(&s_vpu_lock);
315 	s_vpu_drv_context.open_count++;
316 	if (s_vpu_drv_context.open_count == 1) {
317 		alloc_buffer = true;
318 	} else {
319 		r = -EBUSY;
320 		s_vpu_drv_context.open_count--;
321 		spin_unlock(&s_vpu_lock);
322 		goto Err;
323 	}
324 	filp->private_data = (void *)(&s_vpu_drv_context);
325 	spin_unlock(&s_vpu_lock);
326 	if (alloc_buffer && !use_reserve) {
327 #ifdef CONFIG_CMA
328 		s_video_memory.size = VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE;
329 		s_video_memory.phys_addr =
330 			(ulong)codec_mm_alloc_for_dma(VPU_DEV_NAME,
331 			VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE >> PAGE_SHIFT, 0, 0);
332 		if (s_video_memory.phys_addr) {
333 			enc_pr(LOG_DEBUG,
334 				"allocating phys 0x%lx, virt addr 0x%lx, size %dk\n",
335 				s_video_memory.phys_addr,
336 				s_video_memory.base,
337 				s_video_memory.size >> 10);
338 			if (vmem_init(&s_vmem,
339 				s_video_memory.phys_addr,
340 				s_video_memory.size) < 0) {
341 				enc_pr(LOG_ERROR, "fail to init vmem system\n");
342 				r = -ENOMEM;
343 				codec_mm_free_for_dma(
344 					VPU_DEV_NAME,
345 					(u32)s_video_memory.phys_addr);
346 				vmem_exit(&s_vmem);
347 				memset(&s_video_memory, 0,
348 					sizeof(struct vpudrv_buffer_t));
349 				memset(&s_vmem, 0,
350 					sizeof(struct video_mm_t));
351 			}
352 		} else {
353 			enc_pr(LOG_ERROR,
354 				"CMA failed to allocate dma buffer for %s, phys: 0x%lx\n",
355 				VPU_DEV_NAME, s_video_memory.phys_addr);
356 			if (s_video_memory.phys_addr)
357 				codec_mm_free_for_dma(
358 					VPU_DEV_NAME,
359 					(u32)s_video_memory.phys_addr);
360 			s_video_memory.phys_addr = 0;
361 			r = -ENOMEM;
362 		}
363 #else
364 		enc_pr(LOG_ERROR,
365 			"No CMA and reserved memory for HevcEnc!!!\n");
366 		r = -ENOMEM;
367 #endif
368 	} else if (!s_video_memory.phys_addr) {
369 		enc_pr(LOG_ERROR,
370 			"HevcEnc memory is not malloced!!!\n");
371 		r = -ENOMEM;
372 	}
373 	if (alloc_buffer) {
374 		ulong flags;
375 		u32 data32;
376 
377 		if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == false)) {
378 			s32 err;
379 
380 			err = request_irq(s_vpu_irq, vpu_irq_handler, 0,
381 				"HevcEnc-irq", (void *)(&s_vpu_drv_context));
382 			if (err) {
383 				enc_pr(LOG_ERROR,
384 					"fail to register interrupt handler\n");
385 				s_vpu_drv_context.open_count--;
386 				return -EFAULT;
387 			}
388 			s_vpu_irq_requested = true;
389 		}
390 		amports_switch_gate("vdec", 1);
391 		spin_lock_irqsave(&s_vpu_lock, flags);
392 		WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
393 			READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
394 			(get_cpu_type() == MESON_CPU_MAJOR_ID_SM1
395 			? ~0x8 : ~(0x3<<24)));
396 		udelay(10);
397 
398 		if (get_cpu_type() <= MESON_CPU_MAJOR_ID_TXLX) {
399 			data32 = 0x700;
400 			data32 |= READ_VREG(DOS_SW_RESET4);
401 			WRITE_VREG(DOS_SW_RESET4, data32);
402 			data32 &= ~0x700;
403 			WRITE_VREG(DOS_SW_RESET4, data32);
404 		} else {
405 			data32 = 0xf00;
406 			data32 |= READ_VREG(DOS_SW_RESET4);
407 			WRITE_VREG(DOS_SW_RESET4, data32);
408 			data32 &= ~0xf00;
409 			WRITE_VREG(DOS_SW_RESET4, data32);
410 		}
411 
412 		WRITE_MPEG_REG(RESET0_REGISTER, data32 & ~(1<<21));
413 		WRITE_MPEG_REG(RESET0_REGISTER, data32 | (1<<21));
414 		READ_MPEG_REG(RESET0_REGISTER);
415 		READ_MPEG_REG(RESET0_REGISTER);
416 		READ_MPEG_REG(RESET0_REGISTER);
417 		READ_MPEG_REG(RESET0_REGISTER);
418 #ifndef VPU_SUPPORT_CLOCK_CONTROL
419 		vpu_clk_config(1);
420 #endif
421 		/* Enable wave420l_vpu_idle_rise_irq,
422 		 *	Disable wave420l_vpu_idle_fall_irq
423 		 */
424 		WRITE_VREG(DOS_WAVE420L_CNTL_STAT, 0x1);
425 		WRITE_VREG(DOS_MEM_PD_WAVE420L, 0x0);
426 
427 		WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
428 			READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
429 			(get_cpu_type() == MESON_CPU_MAJOR_ID_SM1
430 			? ~0x8 : ~(0x3<<12)));
431 		udelay(10);
432 
433 		spin_unlock_irqrestore(&s_vpu_lock, flags);
434 	}
435 	memset(dma_cfg, 0, sizeof(dma_cfg));
436 	dma_cfg[0].fd = -1;
437 	dma_cfg[1].fd = -1;
438 	dma_cfg[2].fd = -1;
439 Err:
440 	if (r != 0)
441 		s_vpu_drv_context.open_count--;
442 	enc_pr(LOG_DEBUG, "[-] %s, ret: %d\n", __func__, r);
443 	return r;
444 }
445 
vpu_ioctl(struct file * filp,u32 cmd,ulong arg)446 static long vpu_ioctl(struct file *filp, u32 cmd, ulong arg)
447 {
448 	s32 ret = 0;
449 	struct vpu_drv_context_t *dev =
450 		(struct vpu_drv_context_t *)filp->private_data;
451 
452 	switch (cmd) {
453 	case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
454 		{
455 			struct vpudrv_buffer_pool_t *vbp;
456 
457 			enc_pr(LOG_ALL,
458 				"[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
459 			ret = down_interruptible(&s_vpu_sem);
460 			if (ret == 0) {
461 				vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
462 				if (!vbp) {
463 					up(&s_vpu_sem);
464 					return -ENOMEM;
465 				}
466 
467 				ret = copy_from_user(&(vbp->vb),
468 					(struct vpudrv_buffer_t *)arg,
469 					sizeof(struct vpudrv_buffer_t));
470 				if (ret) {
471 					kfree(vbp);
472 					up(&s_vpu_sem);
473 					return -EFAULT;
474 				}
475 
476 				ret = vpu_alloc_dma_buffer(&(vbp->vb));
477 				if (ret == -1) {
478 					ret = -ENOMEM;
479 					kfree(vbp);
480 					up(&s_vpu_sem);
481 					break;
482 				}
483 				ret = copy_to_user((void __user *)arg,
484 					&(vbp->vb),
485 					sizeof(struct vpudrv_buffer_t));
486 				if (ret) {
487 					kfree(vbp);
488 					ret = -EFAULT;
489 					up(&s_vpu_sem);
490 					break;
491 				}
492 
493 				vbp->filp = filp;
494 				spin_lock(&s_vpu_lock);
495 				list_add(&vbp->list, &s_vbp_head);
496 				spin_unlock(&s_vpu_lock);
497 
498 				up(&s_vpu_sem);
499 			}
500 			enc_pr(LOG_ALL,
501 				"[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
502 		}
503 		break;
504 #ifdef CONFIG_COMPAT
505 	case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32:
506 		{
507 			struct vpudrv_buffer_pool_t *vbp;
508 			struct compat_vpudrv_buffer_t buf32;
509 
510 			enc_pr(LOG_ALL,
511 				"[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n");
512 			ret = down_interruptible(&s_vpu_sem);
513 			if (ret == 0) {
514 				vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
515 				if (!vbp) {
516 					up(&s_vpu_sem);
517 					return -ENOMEM;
518 				}
519 
520 				ret = copy_from_user(&buf32,
521 					(struct compat_vpudrv_buffer_t *)arg,
522 					sizeof(struct compat_vpudrv_buffer_t));
523 				if (ret) {
524 					kfree(vbp);
525 					up(&s_vpu_sem);
526 					return -EFAULT;
527 				}
528 
529 				vbp->vb.size = buf32.size;
530 				vbp->vb.cached = buf32.cached;
531 				vbp->vb.phys_addr =
532 					(ulong)buf32.phys_addr;
533 				vbp->vb.virt_addr =
534 					(ulong)buf32.virt_addr;
535 				ret = vpu_alloc_dma_buffer(&(vbp->vb));
536 				if (ret == -1) {
537 					ret = -ENOMEM;
538 					kfree(vbp);
539 					up(&s_vpu_sem);
540 					break;
541 				}
542 
543 				buf32.size = vbp->vb.size;
544 				buf32.phys_addr =
545 					(compat_ulong_t)vbp->vb.phys_addr;
546 				buf32.virt_addr =
547 					(compat_ulong_t)vbp->vb.virt_addr;
548 
549 				ret = copy_to_user((void __user *)arg,
550 					&buf32,
551 					sizeof(struct compat_vpudrv_buffer_t));
552 				if (ret) {
553 					kfree(vbp);
554 					ret = -EFAULT;
555 					up(&s_vpu_sem);
556 					break;
557 				}
558 
559 				vbp->filp = filp;
560 				spin_lock(&s_vpu_lock);
561 				list_add(&vbp->list, &s_vbp_head);
562 				spin_unlock(&s_vpu_lock);
563 
564 				up(&s_vpu_sem);
565 			}
566 			enc_pr(LOG_ALL,
567 				"[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n");
568 		}
569 		break;
570 #endif
571 	case VDI_IOCTL_FREE_PHYSICALMEMORY:
572 		{
573 			struct vpudrv_buffer_pool_t *vbp, *n;
574 			struct vpudrv_buffer_t vb;
575 
576 			enc_pr(LOG_ALL,
577 				"[+]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
578 			ret = down_interruptible(&s_vpu_sem);
579 			if (ret == 0) {
580 				ret = copy_from_user(&vb,
581 					(struct vpudrv_buffer_t *)arg,
582 					sizeof(struct vpudrv_buffer_t));
583 				if (ret) {
584 					up(&s_vpu_sem);
585 					return -EACCES;
586 				}
587 
588 				if (vb.phys_addr)
589 					vpu_free_dma_buffer(&vb);
590 
591 				spin_lock(&s_vpu_lock);
592 				list_for_each_entry_safe(vbp, n,
593 					&s_vbp_head, list) {
594 					if (vbp->vb.phys_addr == vb.phys_addr) {
595 						list_del(&vbp->list);
596 						kfree(vbp);
597 						break;
598 					}
599 				}
600 				spin_unlock(&s_vpu_lock);
601 
602 				up(&s_vpu_sem);
603 			}
604 			enc_pr(LOG_ALL,
605 				"[-]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
606 		}
607 		break;
608 #ifdef CONFIG_COMPAT
609 	case VDI_IOCTL_FREE_PHYSICALMEMORY32:
610 		{
611 			struct vpudrv_buffer_pool_t *vbp, *n;
612 			struct compat_vpudrv_buffer_t buf32;
613 			struct vpudrv_buffer_t vb;
614 
615 			enc_pr(LOG_ALL,
616 				"[+]VDI_IOCTL_FREE_PHYSICALMEMORY32\n");
617 			ret = down_interruptible(&s_vpu_sem);
618 			if (ret == 0) {
619 				ret = copy_from_user(&buf32,
620 					(struct compat_vpudrv_buffer_t *)arg,
621 					sizeof(struct compat_vpudrv_buffer_t));
622 				if (ret) {
623 					up(&s_vpu_sem);
624 					return -EACCES;
625 				}
626 
627 				vb.size = buf32.size;
628 				vb.phys_addr =
629 					(ulong)buf32.phys_addr;
630 				vb.virt_addr =
631 					(ulong)buf32.virt_addr;
632 
633 				if (vb.phys_addr)
634 					vpu_free_dma_buffer(&vb);
635 
636 				spin_lock(&s_vpu_lock);
637 				list_for_each_entry_safe(vbp, n,
638 					&s_vbp_head, list) {
639 					if ((compat_ulong_t)vbp->vb.base
640 						== buf32.base) {
641 						list_del(&vbp->list);
642 						kfree(vbp);
643 						break;
644 					}
645 				}
646 				spin_unlock(&s_vpu_lock);
647 				up(&s_vpu_sem);
648 			}
649 			enc_pr(LOG_ALL,
650 				"[-]VDI_IOCTL_FREE_PHYSICALMEMORY32\n");
651 		}
652 		break;
653 #endif
654 	case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
655 		{
656 			enc_pr(LOG_ALL,
657 				"[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
658 			if (s_video_memory.phys_addr != 0) {
659 				ret = copy_to_user((void __user *)arg,
660 					&s_video_memory,
661 					sizeof(struct vpudrv_buffer_t));
662 				if (ret != 0)
663 					ret = -EFAULT;
664 			} else {
665 				ret = -EFAULT;
666 			}
667 			enc_pr(LOG_ALL,
668 				"[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
669 		}
670 		break;
671 #ifdef CONFIG_COMPAT
672 	case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32:
673 		{
674 			struct compat_vpudrv_buffer_t buf32;
675 
676 			enc_pr(LOG_ALL,
677 				"[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n");
678 
679 			buf32.size = s_video_memory.size;
680 			buf32.phys_addr =
681 				(compat_ulong_t)s_video_memory.phys_addr;
682 			buf32.virt_addr =
683 				(compat_ulong_t)s_video_memory.virt_addr;
684 			if (s_video_memory.phys_addr != 0) {
685 				ret = copy_to_user((void __user *)arg,
686 					&buf32,
687 					sizeof(struct compat_vpudrv_buffer_t));
688 				if (ret != 0)
689 					ret = -EFAULT;
690 			} else {
691 				ret = -EFAULT;
692 			}
693 			enc_pr(LOG_ALL,
694 				"[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n");
695 		}
696 		break;
697 #endif
698 	case VDI_IOCTL_WAIT_INTERRUPT:
699 		{
700 			struct vpudrv_intr_info_t info;
701 
702 			enc_pr(LOG_ALL,
703 				"[+]VDI_IOCTL_WAIT_INTERRUPT\n");
704 			ret = copy_from_user(&info,
705 				(struct vpudrv_intr_info_t *)arg,
706 				sizeof(struct vpudrv_intr_info_t));
707 			if (ret != 0)
708 				return -EFAULT;
709 
710 			ret = wait_event_interruptible_timeout(
711 				s_interrupt_wait_q,
712 				s_interrupt_flag != 0,
713 				msecs_to_jiffies(info.timeout));
714 			if (!ret) {
715 				ret = -ETIME;
716 				break;
717 			}
718 			enc_pr(LOG_INFO,
719 			       "s_interrupt_flag(%d), reason(0x%08lx)\n",
720 			       s_interrupt_flag, dev->interrupt_reason);
721 			if (dev->interrupt_reason & (1 << W4_INT_ENC_PIC)) {
722 				u32 start, end, size, core = 0;
723 
724 				start = ReadVpuRegister(W4_BS_RD_PTR);
725 				end = ReadVpuRegister(W4_BS_WR_PTR);
726 				size = ReadVpuRegister(W4_RET_ENC_PIC_BYTE);
727 				enc_pr(LOG_INFO, "flush output buffer, ");
728 				enc_pr(LOG_INFO,
729 					"start:0x%x, end:0x%x, size:0x%x\n",
730 					start, end, size);
731 				if (end - start > size && end > start)
732 					size = end - start;
733 				if (size > 0)
734 					cache_flush(start, size);
735 			}
736 
737 			if (signal_pending(current)) {
738 				ret = -ERESTARTSYS;
739 				break;
740 			}
741 
742 			enc_pr(LOG_INFO,
743 				"s_interrupt_flag(%d), reason(0x%08lx)\n",
744 				s_interrupt_flag, dev->interrupt_reason);
745 
746 			info.intr_reason = dev->interrupt_reason;
747 			s_interrupt_flag = 0;
748 			dev->interrupt_reason = 0;
749 			ret = copy_to_user((void __user *)arg,
750 				&info, sizeof(struct vpudrv_intr_info_t));
751 			enc_pr(LOG_ALL,
752 				"[-]VDI_IOCTL_WAIT_INTERRUPT\n");
753 			if (ret != 0)
754 				return -EFAULT;
755 		}
756 		break;
757 	case VDI_IOCTL_SET_CLOCK_GATE:
758 		{
759 			u32 clkgate;
760 
761 			enc_pr(LOG_ALL,
762 				"[+]VDI_IOCTL_SET_CLOCK_GATE\n");
763 			if (get_user(clkgate, (u32 __user *) arg))
764 				return -EFAULT;
765 #ifdef VPU_SUPPORT_CLOCK_CONTROL
766 			vpu_clk_config(clkgate);
767 #endif
768 			enc_pr(LOG_ALL,
769 				"[-]VDI_IOCTL_SET_CLOCK_GATE\n");
770 		}
771 		break;
772 	case VDI_IOCTL_GET_INSTANCE_POOL:
773 		{
774 			enc_pr(LOG_ALL,
775 				"[+]VDI_IOCTL_GET_INSTANCE_POOL\n");
776 			ret = down_interruptible(&s_vpu_sem);
777 			if (ret != 0)
778 				break;
779 
780 			if (s_instance_pool.base != 0) {
781 				ret = copy_to_user((void __user *)arg,
782 					&s_instance_pool,
783 					sizeof(struct vpudrv_buffer_t));
784 				ret = (ret != 0) ? -EFAULT : 0;
785 			} else {
786 				ret = copy_from_user(&s_instance_pool,
787 					(struct vpudrv_buffer_t *)arg,
788 					sizeof(struct vpudrv_buffer_t));
789 				if (ret == 0) {
790 					s_instance_pool.size =
791 						PAGE_ALIGN(
792 						s_instance_pool.size);
793 					s_instance_pool.base =
794 						(ulong)vmalloc(
795 						s_instance_pool.size);
796 					s_instance_pool.phys_addr =
797 						s_instance_pool.base;
798 					if (s_instance_pool.base == 0) {
799 						ret = -EFAULT;
800 						up(&s_vpu_sem);
801 						break;
802 					}
803 					/*clearing memory*/
804 					memset((void *)s_instance_pool.base,
805 						0, s_instance_pool.size);
806 					ret = copy_to_user((void __user *)arg,
807 						&s_instance_pool,
808 						sizeof(struct vpudrv_buffer_t));
809 					if (ret != 0)
810 						ret = -EFAULT;
811 				} else
812 					ret = -EFAULT;
813 			}
814 			up(&s_vpu_sem);
815 			enc_pr(LOG_ALL,
816 				"[-]VDI_IOCTL_GET_INSTANCE_POOL\n");
817 		}
818 		break;
819 #ifdef CONFIG_COMPAT
820 	case VDI_IOCTL_GET_INSTANCE_POOL32:
821 		{
822 			struct compat_vpudrv_buffer_t buf32;
823 
824 			enc_pr(LOG_ALL,
825 				"[+]VDI_IOCTL_GET_INSTANCE_POOL32\n");
826 			ret = down_interruptible(&s_vpu_sem);
827 			if (ret != 0)
828 				break;
829 			if (s_instance_pool.base != 0) {
830 				buf32.size = s_instance_pool.size;
831 				buf32.phys_addr =
832 					(compat_ulong_t)
833 					s_instance_pool.phys_addr;
834 				buf32.virt_addr =
835 					(compat_ulong_t)
836 					s_instance_pool.virt_addr;
837 				ret = copy_to_user((void __user *)arg,
838 					&buf32,
839 					sizeof(struct compat_vpudrv_buffer_t));
840 				ret = (ret != 0) ? -EFAULT : 0;
841 			} else {
842 				ret = copy_from_user(&buf32,
843 					(struct compat_vpudrv_buffer_t *)arg,
844 					sizeof(struct compat_vpudrv_buffer_t));
845 				if (ret == 0) {
846 					s_instance_pool.size = buf32.size;
847 					s_instance_pool.size =
848 						PAGE_ALIGN(
849 						s_instance_pool.size);
850 					s_instance_pool.base =
851 						(ulong)vmalloc(
852 						s_instance_pool.size);
853 					s_instance_pool.phys_addr =
854 						s_instance_pool.base;
855 					buf32.size =
856 						s_instance_pool.size;
857 					buf32.phys_addr =
858 						(compat_ulong_t)
859 						s_instance_pool.phys_addr;
860 					buf32.base =
861 						(compat_ulong_t)
862 						s_instance_pool.base;
863 					buf32.virt_addr =
864 						(compat_ulong_t)
865 						s_instance_pool.virt_addr;
866 					if (s_instance_pool.base == 0) {
867 						ret = -EFAULT;
868 						up(&s_vpu_sem);
869 						break;
870 					}
871 					/*clearing memory*/
872 					memset((void *)s_instance_pool.base,
873 						0x0, s_instance_pool.size);
874 					ret = copy_to_user((void __user *)arg,
875 						&buf32,
876 						sizeof(
877 						struct compat_vpudrv_buffer_t));
878 					if (ret != 0)
879 						ret = -EFAULT;
880 				} else
881 					ret = -EFAULT;
882 			}
883 			up(&s_vpu_sem);
884 			enc_pr(LOG_ALL,
885 				"[-]VDI_IOCTL_GET_INSTANCE_POOL32\n");
886 		}
887 		break;
888 #endif
889 	case VDI_IOCTL_GET_COMMON_MEMORY:
890 		{
891 			enc_pr(LOG_ALL,
892 				"[+]VDI_IOCTL_GET_COMMON_MEMORY\n");
893 			if (s_common_memory.phys_addr != 0) {
894 				ret = copy_to_user((void __user *)arg,
895 					&s_common_memory,
896 					sizeof(struct vpudrv_buffer_t));
897 				if (ret != 0)
898 					ret = -EFAULT;
899 			} else {
900 				ret = copy_from_user(&s_common_memory,
901 					(struct vpudrv_buffer_t *)arg,
902 					sizeof(struct vpudrv_buffer_t));
903 				if (ret != 0) {
904 					ret = -EFAULT;
905 					break;
906 				}
907 				if (vpu_alloc_dma_buffer(
908 					&s_common_memory) != -1) {
909 					ret = copy_to_user((void __user *)arg,
910 						&s_common_memory,
911 						sizeof(struct vpudrv_buffer_t));
912 					if (ret != 0)
913 						ret = -EFAULT;
914 				} else
915 					ret = -EFAULT;
916 			}
917 			enc_pr(LOG_ALL,
918 				"[-]VDI_IOCTL_GET_COMMON_MEMORY\n");
919 		}
920 		break;
921 #ifdef CONFIG_COMPAT
922 	case VDI_IOCTL_GET_COMMON_MEMORY32:
923 		{
924 			struct compat_vpudrv_buffer_t buf32;
925 
926 			enc_pr(LOG_ALL,
927 				"[+]VDI_IOCTL_GET_COMMON_MEMORY32\n");
928 
929 			buf32.size = s_common_memory.size;
930 			buf32.phys_addr =
931 				(compat_ulong_t)
932 				s_common_memory.phys_addr;
933 			buf32.virt_addr =
934 				(compat_ulong_t)
935 				s_common_memory.virt_addr;
936 			if (s_common_memory.phys_addr != 0) {
937 				ret = copy_to_user((void __user *)arg,
938 					&buf32,
939 					sizeof(struct compat_vpudrv_buffer_t));
940 				if (ret != 0)
941 					ret = -EFAULT;
942 			} else {
943 				ret = copy_from_user(&buf32,
944 					(struct compat_vpudrv_buffer_t *)arg,
945 					sizeof(struct compat_vpudrv_buffer_t));
946 				if (ret != 0) {
947 					ret = -EFAULT;
948 					break;
949 				}
950 				s_common_memory.size = buf32.size;
951 				if (vpu_alloc_dma_buffer(
952 					&s_common_memory) != -1) {
953 					buf32.size =
954 						s_common_memory.size;
955 					buf32.phys_addr =
956 						(compat_ulong_t)
957 						s_common_memory.phys_addr;
958 					buf32.virt_addr =
959 						(compat_ulong_t)
960 						s_common_memory.virt_addr;
961 					ret = copy_to_user((void __user *)arg,
962 						&buf32,
963 						sizeof(
964 						struct compat_vpudrv_buffer_t));
965 					if (ret != 0)
966 						ret = -EFAULT;
967 				} else
968 					ret = -EFAULT;
969 			}
970 			enc_pr(LOG_ALL,
971 				"[-]VDI_IOCTL_GET_COMMON_MEMORY32\n");
972 		}
973 		break;
974 #endif
975 	case VDI_IOCTL_OPEN_INSTANCE:
976 		{
977 			struct vpudrv_inst_info_t inst_info;
978 			struct vpudrv_instanace_list_t *vil, *n;
979 
980 			vil = kzalloc(sizeof(*vil), GFP_KERNEL);
981 			if (!vil)
982 				return -ENOMEM;
983 
984 			if (copy_from_user(&inst_info,
985 				(struct vpudrv_inst_info_t *)arg,
986 				sizeof(struct vpudrv_inst_info_t)))
987 				return -EFAULT;
988 
989 			vil->inst_idx = inst_info.inst_idx;
990 			vil->core_idx = inst_info.core_idx;
991 			vil->filp = filp;
992 
993 			spin_lock(&s_vpu_lock);
994 			list_add(&vil->list, &s_inst_list_head);
995 
996 			/* counting the current open instance number */
997 			inst_info.inst_open_count = 0;
998 			list_for_each_entry_safe(vil, n,
999 				&s_inst_list_head, list) {
1000 				if (vil->core_idx == inst_info.core_idx)
1001 					inst_info.inst_open_count++;
1002 			}
1003 
1004 			 /* flag just for that vpu is in opened or closed */
1005 			s_vpu_open_ref_count++;
1006 			spin_unlock(&s_vpu_lock);
1007 
1008 			if (copy_to_user((void __user *)arg,
1009 				&inst_info,
1010 				sizeof(struct vpudrv_inst_info_t))) {
1011 				kfree(vil);
1012 				return -EFAULT;
1013 			}
1014 
1015 			enc_pr(LOG_DEBUG,
1016 				"VDI_IOCTL_OPEN_INSTANCE ");
1017 			enc_pr(LOG_DEBUG,
1018 				"core_idx=%d, inst_idx=%d, ",
1019 				(u32)inst_info.core_idx,
1020 				(u32)inst_info.inst_idx);
1021 			enc_pr(LOG_DEBUG,
1022 				"s_vpu_open_ref_count=%d, inst_open_count=%d\n",
1023 				s_vpu_open_ref_count,
1024 				inst_info.inst_open_count);
1025 		}
1026 		break;
1027 	case VDI_IOCTL_CLOSE_INSTANCE:
1028 		{
1029 			struct vpudrv_inst_info_t inst_info;
1030 			struct vpudrv_instanace_list_t *vil, *n;
1031 
1032 			enc_pr(LOG_ALL,
1033 				"[+]VDI_IOCTL_CLOSE_INSTANCE\n");
1034 			if (copy_from_user(&inst_info,
1035 				(struct vpudrv_inst_info_t *)arg,
1036 				sizeof(struct vpudrv_inst_info_t)))
1037 				return -EFAULT;
1038 
1039 			spin_lock(&s_vpu_lock);
1040 			list_for_each_entry_safe(vil, n,
1041 				&s_inst_list_head, list) {
1042 				if (vil->inst_idx == inst_info.inst_idx &&
1043 					vil->core_idx == inst_info.core_idx) {
1044 					list_del(&vil->list);
1045 					kfree(vil);
1046 					break;
1047 				}
1048 			}
1049 
1050 			/* counting the current open instance number */
1051 			inst_info.inst_open_count = 0;
1052 			list_for_each_entry_safe(vil, n,
1053 				&s_inst_list_head, list) {
1054 				if (vil->core_idx == inst_info.core_idx)
1055 					inst_info.inst_open_count++;
1056 			}
1057 
1058 			/* flag just for that vpu is in opened or closed */
1059 			s_vpu_open_ref_count--;
1060 			spin_unlock(&s_vpu_lock);
1061 
1062 			if (copy_to_user((void __user *)arg,
1063 				&inst_info,
1064 				sizeof(struct vpudrv_inst_info_t)))
1065 				return -EFAULT;
1066 
1067 			enc_pr(LOG_DEBUG,
1068 				"VDI_IOCTL_CLOSE_INSTANCE ");
1069 			enc_pr(LOG_DEBUG,
1070 				"core_idx=%d, inst_idx=%d, ",
1071 				(u32)inst_info.core_idx,
1072 				(u32)inst_info.inst_idx);
1073 			enc_pr(LOG_DEBUG,
1074 				"s_vpu_open_ref_count=%d, inst_open_count=%d\n",
1075 				s_vpu_open_ref_count,
1076 				inst_info.inst_open_count);
1077 		}
1078 		break;
1079 	case VDI_IOCTL_GET_INSTANCE_NUM:
1080 		{
1081 			struct vpudrv_inst_info_t inst_info;
1082 			struct vpudrv_instanace_list_t *vil, *n;
1083 
1084 			enc_pr(LOG_ALL,
1085 				"[+]VDI_IOCTL_GET_INSTANCE_NUM\n");
1086 
1087 			ret = copy_from_user(&inst_info,
1088 				(struct vpudrv_inst_info_t *)arg,
1089 				sizeof(struct vpudrv_inst_info_t));
1090 			if (ret != 0)
1091 				break;
1092 
1093 			inst_info.inst_open_count = 0;
1094 
1095 			spin_lock(&s_vpu_lock);
1096 			list_for_each_entry_safe(vil, n,
1097 				&s_inst_list_head, list) {
1098 				if (vil->core_idx == inst_info.core_idx)
1099 					inst_info.inst_open_count++;
1100 			}
1101 			spin_unlock(&s_vpu_lock);
1102 
1103 			ret = copy_to_user((void __user *)arg,
1104 				&inst_info,
1105 				sizeof(struct vpudrv_inst_info_t));
1106 
1107 			enc_pr(LOG_DEBUG,
1108 				"VDI_IOCTL_GET_INSTANCE_NUM ");
1109 			enc_pr(LOG_DEBUG,
1110 				"core_idx=%d, inst_idx=%d, open_count=%d\n",
1111 				(u32)inst_info.core_idx,
1112 				(u32)inst_info.inst_idx,
1113 				inst_info.inst_open_count);
1114 		}
1115 		break;
1116 	case VDI_IOCTL_RESET:
1117 		{
1118 			vpu_hw_reset();
1119 		}
1120 		break;
1121 	case VDI_IOCTL_GET_REGISTER_INFO:
1122 		{
1123 			enc_pr(LOG_ALL,
1124 				"[+]VDI_IOCTL_GET_REGISTER_INFO\n");
1125 			ret = copy_to_user((void __user *)arg,
1126 				&s_vpu_register,
1127 				sizeof(struct vpudrv_buffer_t));
1128 			if (ret != 0)
1129 				ret = -EFAULT;
1130 			enc_pr(LOG_ALL,
1131 				"[-]VDI_IOCTL_GET_REGISTER_INFO ");
1132 			enc_pr(LOG_ALL,
1133 				"s_vpu_register.phys_addr=0x%lx, ",
1134 				s_vpu_register.phys_addr);
1135 			enc_pr(LOG_ALL,
1136 				"s_vpu_register.virt_addr=0x%lx, ",
1137 				s_vpu_register.virt_addr);
1138 			enc_pr(LOG_ALL,
1139 				"s_vpu_register.size=0x%x\n",
1140 				s_vpu_register.size);
1141 		}
1142 		break;
1143 #ifdef CONFIG_COMPAT
1144 	case VDI_IOCTL_GET_REGISTER_INFO32:
1145 		{
1146 			struct compat_vpudrv_buffer_t buf32;
1147 
1148 			enc_pr(LOG_ALL,
1149 				"[+]VDI_IOCTL_GET_REGISTER_INFO32\n");
1150 
1151 			buf32.size = s_vpu_register.size;
1152 			buf32.phys_addr =
1153 				(compat_ulong_t)
1154 				s_vpu_register.phys_addr;
1155 			buf32.virt_addr =
1156 				(compat_ulong_t)
1157 				s_vpu_register.virt_addr;
1158 			ret = copy_to_user((void __user *)arg,
1159 				&buf32,
1160 				sizeof(
1161 				struct compat_vpudrv_buffer_t));
1162 			if (ret != 0)
1163 				ret = -EFAULT;
1164 			enc_pr(LOG_ALL,
1165 				"[-]VDI_IOCTL_GET_REGISTER_INFO32 ");
1166 			enc_pr(LOG_ALL,
1167 				"s_vpu_register.phys_addr=0x%lx, ",
1168 				s_vpu_register.phys_addr);
1169 			enc_pr(LOG_ALL,
1170 				"s_vpu_register.virt_addr=0x%lx, ",
1171 				s_vpu_register.virt_addr);
1172 			enc_pr(LOG_ALL,
1173 				"s_vpu_register.size=0x%x\n",
1174 				s_vpu_register.size);
1175 		}
1176 		break;
1177 	case VDI_IOCTL_FLUSH_BUFFER32:
1178 		{
1179 			struct vpudrv_buffer_pool_t *pool, *n;
1180 			struct compat_vpudrv_buffer_t buf32;
1181 			struct vpudrv_buffer_t vb;
1182 			bool find = false;
1183 			u32 cached = 0;
1184 
1185 			enc_pr(LOG_ALL,
1186 				"[+]VDI_IOCTL_FLUSH_BUFFER32\n");
1187 
1188 			ret = copy_from_user(&buf32,
1189 				(struct compat_vpudrv_buffer_t *)arg,
1190 				sizeof(struct compat_vpudrv_buffer_t));
1191 			if (ret)
1192 				return -EFAULT;
1193 			spin_lock(&s_vpu_lock);
1194 			list_for_each_entry_safe(pool, n,
1195 				&s_vbp_head, list) {
1196 				if (pool->filp == filp) {
1197 					vb = pool->vb;
1198 					if (((compat_ulong_t)vb.phys_addr
1199 						== buf32.phys_addr)
1200 						&& find == false){
1201 						cached = vb.cached;
1202 						find = true;
1203 					}
1204 				}
1205 			}
1206 			spin_unlock(&s_vpu_lock);
1207 			if (find && cached)
1208 				dma_flush(
1209 					(u32)buf32.phys_addr,
1210 					(u32)buf32.size);
1211 			enc_pr(LOG_ALL,
1212 				"[-]VDI_IOCTL_FLUSH_BUFFER32\n");
1213 		}
1214 		break;
1215 #endif
1216 	case VDI_IOCTL_FLUSH_BUFFER:
1217 		{
1218 			struct vpudrv_buffer_pool_t *pool, *n;
1219 			struct vpudrv_buffer_t vb, buf;
1220 			bool find = false;
1221 			u32 cached = 0;
1222 
1223 			enc_pr(LOG_ALL,
1224 				"[+]VDI_IOCTL_FLUSH_BUFFER\n");
1225 
1226 			ret = copy_from_user(&buf,
1227 				(struct vpudrv_buffer_t *)arg,
1228 				sizeof(struct vpudrv_buffer_t));
1229 			if (ret)
1230 				return -EFAULT;
1231 			spin_lock(&s_vpu_lock);
1232 			list_for_each_entry_safe(pool, n,
1233 				&s_vbp_head, list) {
1234 				if (pool->filp == filp) {
1235 					vb = pool->vb;
1236 					if ((vb.phys_addr
1237 						== buf.phys_addr)
1238 						&& find == false){
1239 						cached = vb.cached;
1240 						find = true;
1241 					}
1242 				}
1243 			}
1244 			spin_unlock(&s_vpu_lock);
1245 			if (find && cached)
1246 				dma_flush(
1247 					(u32)buf.phys_addr,
1248 					(u32)buf.size);
1249 			enc_pr(LOG_ALL,
1250 				"[-]VDI_IOCTL_FLUSH_BUFFER\n");
1251 		}
1252 		break;
1253 	case VDI_IOCTL_CONFIG_DMA:
1254 		{
1255 			struct vpu_dma_buf_info_t dma_info;
1256 
1257 			enc_pr(LOG_ALL,
1258 				"[+]VDI_IOCTL_CONFIG_DMA\n");
1259 			if (copy_from_user(&dma_info,
1260 				(struct vpu_dma_buf_info_t *)arg,
1261 				sizeof(struct vpu_dma_buf_info_t)))
1262 				return -EFAULT;
1263 
1264 			if (vpu_src_addr_config(dma_info)) {
1265 				enc_pr(LOG_ERROR,
1266 					"src addr config error\n");
1267 				return -EFAULT;
1268 			}
1269 
1270 			enc_pr(LOG_ALL,
1271 				"[-]VDI_IOCTL_CONFIG_DMA %d, %d, %d\n",
1272 				dma_info.fd[0],
1273 				dma_info.fd[1],
1274 				dma_info.fd[2]);
1275 		}
1276 		break;
1277 	case VDI_IOCTL_UNMAP_DMA:
1278 		{
1279 			enc_pr(LOG_ALL,
1280 				"[+]VDI_IOCTL_UNMAP_DMA\n");
1281 
1282 			vpu_dma_buffer_unmap(&dma_cfg[0]);
1283 			if (dma_cfg[1].paddr != 0) {
1284 				vpu_dma_buffer_unmap(&dma_cfg[1]);
1285 			}
1286 			if (dma_cfg[2].paddr != 0) {
1287 				vpu_dma_buffer_unmap(&dma_cfg[2]);
1288 			}
1289 			enc_pr(LOG_ALL,
1290 				"[-]VDI_IOCTL_UNMAP_DMA\n");
1291 		}
1292 		break;
1293 	default:
1294 		{
1295 			enc_pr(LOG_ERROR,
1296 				"No such IOCTL, cmd is 0x%x\n", cmd);
1297 			ret = -EFAULT;
1298 		}
1299 		break;
1300 	}
1301 	return ret;
1302 }
1303 
1304 #ifdef CONFIG_COMPAT
vpu_compat_ioctl(struct file * filp,u32 cmd,ulong arg)1305 static long vpu_compat_ioctl(struct file *filp, u32 cmd, ulong arg)
1306 {
1307 	long ret;
1308 
1309 	arg = (ulong)compat_ptr(arg);
1310 	ret = vpu_ioctl(filp, cmd, arg);
1311 	return ret;
1312 }
1313 #endif
1314 
vpu_write(struct file * filp,const char * buf,size_t len,loff_t * ppos)1315 static ssize_t vpu_write(struct file *filp,
1316 	const char *buf,
1317 	size_t len,
1318 	loff_t *ppos)
1319 {
1320 	enc_pr(LOG_INFO, "vpu_write len=%d\n", (int)len);
1321 
1322 	if (!buf) {
1323 		enc_pr(LOG_ERROR, "vpu_write buf = NULL error\n");
1324 		return -EFAULT;
1325 	}
1326 
1327 	if (len == sizeof(struct vpu_bit_firmware_info_t))	{
1328 		struct vpu_bit_firmware_info_t *bit_firmware_info;
1329 
1330 		bit_firmware_info =
1331 			kmalloc(sizeof(struct vpu_bit_firmware_info_t),
1332 			GFP_KERNEL);
1333 		if (!bit_firmware_info) {
1334 			enc_pr(LOG_ERROR,
1335 				"vpu_write bit_firmware_info allocation error\n");
1336 			return -EFAULT;
1337 		}
1338 
1339 		if (copy_from_user(bit_firmware_info, buf, len)) {
1340 			enc_pr(LOG_ERROR,
1341 				"vpu_write copy_from_user error for bit_firmware_info\n");
1342 			return -EFAULT;
1343 		}
1344 
1345 		if (bit_firmware_info->size ==
1346 			sizeof(struct vpu_bit_firmware_info_t)) {
1347 			enc_pr(LOG_INFO,
1348 				"vpu_write set bit_firmware_info coreIdx=0x%x, ",
1349 				bit_firmware_info->core_idx);
1350 			enc_pr(LOG_INFO,
1351 				"reg_base_offset=0x%x size=0x%x, bit_code[0]=0x%x\n",
1352 				bit_firmware_info->reg_base_offset,
1353 				bit_firmware_info->size,
1354 				bit_firmware_info->bit_code[0]);
1355 
1356 			if (bit_firmware_info->core_idx
1357 				> MAX_NUM_VPU_CORE) {
1358 				enc_pr(LOG_ERROR,
1359 					"vpu_write coreIdx[%d] is ",
1360 					bit_firmware_info->core_idx);
1361 				enc_pr(LOG_ERROR,
1362 					"exceeded than MAX_NUM_VPU_CORE[%d]\n",
1363 					MAX_NUM_VPU_CORE);
1364 				return -ENODEV;
1365 			}
1366 
1367 			memcpy((void *)&s_bit_firmware_info
1368 				[bit_firmware_info->core_idx],
1369 				bit_firmware_info,
1370 				sizeof(struct vpu_bit_firmware_info_t));
1371 			kfree(bit_firmware_info);
1372 			return len;
1373 		}
1374 		kfree(bit_firmware_info);
1375 	}
1376 	return -1;
1377 }
1378 
vpu_release(struct inode * inode,struct file * filp)1379 static s32 vpu_release(struct inode *inode, struct file *filp)
1380 {
1381 	s32 ret = 0;
1382 	ulong flags;
1383 
1384 	enc_pr(LOG_DEBUG, "vpu_release\n");
1385 	ret = down_interruptible(&s_vpu_sem);
1386 	if (ret == 0) {
1387 		vpu_free_buffers(filp);
1388 		vpu_free_instances(filp);
1389 		s_vpu_drv_context.open_count--;
1390 		if (s_vpu_drv_context.open_count == 0) {
1391 			enc_pr(LOG_INFO,
1392 			       "vpu_release: s_interrupt_flag(%d), reason(0x%08lx)\n",
1393 			       s_interrupt_flag, s_vpu_drv_context.interrupt_reason);
1394 			s_vpu_drv_context.interrupt_reason = 0;
1395 			s_interrupt_flag = 0;
1396 			if (s_instance_pool.base) {
1397 				enc_pr(LOG_DEBUG, "free instance pool\n");
1398 				vfree((const void *)s_instance_pool.base);
1399 				s_instance_pool.base = 0;
1400 			}
1401 			if (s_common_memory.phys_addr) {
1402 				enc_pr(LOG_INFO, "vpu_release, s_common_memory 0x%lx\n",s_common_memory.phys_addr);
1403 				vpu_free_dma_buffer(&s_common_memory);
1404 				s_common_memory.phys_addr = 0;
1405 			}
1406 
1407 			if (s_video_memory.phys_addr && !use_reserve) {
1408 				enc_pr(LOG_DEBUG, "vpu_release, s_video_memory 0x%lx\n",s_video_memory.phys_addr);
1409 				codec_mm_free_for_dma(
1410 					VPU_DEV_NAME,
1411 					(u32)s_video_memory.phys_addr);
1412 				vmem_exit(&s_vmem);
1413 				memset(&s_video_memory,
1414 					0, sizeof(struct vpudrv_buffer_t));
1415 				memset(&s_vmem,
1416 					0, sizeof(struct video_mm_t));
1417 			}
1418 			if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == true)) {
1419 				free_irq(s_vpu_irq, &s_vpu_drv_context);
1420 				s_vpu_irq_requested = false;
1421 			}
1422 			spin_lock_irqsave(&s_vpu_lock, flags);
1423 			WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
1424 				READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
1425 				(get_cpu_type() == MESON_CPU_MAJOR_ID_SM1
1426 				? 0x8 : (0x3<<12)));
1427 			udelay(10);
1428 
1429 			WRITE_VREG(DOS_MEM_PD_WAVE420L, 0xffffffff);
1430 #ifndef VPU_SUPPORT_CLOCK_CONTROL
1431 			vpu_clk_config(0);
1432 #endif
1433 			WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
1434 				READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
1435 				(get_cpu_type() == MESON_CPU_MAJOR_ID_SM1
1436 				? 0x8 : (0x3<<24)));
1437 			udelay(10);
1438 			spin_unlock_irqrestore(&s_vpu_lock, flags);
1439 			amports_switch_gate("vdec", 0);
1440 		}
1441 	}
1442 	up(&s_vpu_sem);
1443 	return 0;
1444 }
1445 
vpu_fasync(s32 fd,struct file * filp,s32 mode)1446 static s32 vpu_fasync(s32 fd, struct file *filp, s32 mode)
1447 {
1448 	struct vpu_drv_context_t *dev =
1449 		(struct vpu_drv_context_t *)filp->private_data;
1450 	return fasync_helper(fd, filp, mode, &dev->async_queue);
1451 }
1452 
vpu_map_to_register(struct file * fp,struct vm_area_struct * vm)1453 static s32 vpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
1454 {
1455 	ulong pfn;
1456 
1457 	vm->vm_flags |= VM_IO | VM_RESERVED;
1458 	vm->vm_page_prot =
1459 		pgprot_noncached(vm->vm_page_prot);
1460 	pfn = s_vpu_register.phys_addr >> PAGE_SHIFT;
1461 	return remap_pfn_range(vm, vm->vm_start, pfn,
1462 		vm->vm_end - vm->vm_start,
1463 		vm->vm_page_prot) ? -EAGAIN : 0;
1464 }
1465 
vpu_map_to_physical_memory(struct file * fp,struct vm_area_struct * vm)1466 static s32 vpu_map_to_physical_memory(
1467 	struct file *fp, struct vm_area_struct *vm)
1468 {
1469 	vm->vm_flags |= VM_IO | VM_RESERVED;
1470 	if (vm->vm_pgoff ==
1471 		(s_common_memory.phys_addr >> PAGE_SHIFT)) {
1472 		vm->vm_page_prot =
1473 			pgprot_noncached(vm->vm_page_prot);
1474 	} else {
1475 		if (vpu_is_buffer_cached(fp, vm->vm_pgoff) == 0)
1476 			vm->vm_page_prot =
1477 				pgprot_noncached(vm->vm_page_prot);
1478 	}
1479 	/* vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot); */
1480 	if (!pfn_valid(vm->vm_pgoff)) {
1481 		enc_pr(LOG_ERROR, "%s invalid pfn\n", __FUNCTION__);
1482 		return -EAGAIN;
1483 	}
1484 	return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
1485 		vm->vm_end - vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
1486 }
1487 
vpu_map_to_instance_pool_memory(struct file * fp,struct vm_area_struct * vm)1488 static s32 vpu_map_to_instance_pool_memory(
1489 	struct file *fp, struct vm_area_struct *vm)
1490 {
1491 	s32 ret;
1492 	long length = vm->vm_end - vm->vm_start;
1493 	ulong start = vm->vm_start;
1494 	s8 *vmalloc_area_ptr = (s8 *)s_instance_pool.base;
1495 	ulong pfn;
1496 
1497 	vm->vm_flags |= VM_RESERVED;
1498 
1499 	/* loop over all pages, map it page individually */
1500 	while (length > 0) {
1501 		pfn = vmalloc_to_pfn(vmalloc_area_ptr);
1502 		ret = remap_pfn_range(vm, start, pfn,
1503 			PAGE_SIZE, PAGE_SHARED);
1504 		if (ret < 0)
1505 			return ret;
1506 		start += PAGE_SIZE;
1507 		vmalloc_area_ptr += PAGE_SIZE;
1508 		length -= PAGE_SIZE;
1509 	}
1510 	return 0;
1511 }
1512 
1513 /*
1514  * @brief memory map interface for vpu file operation
1515  * @return 0 on success or negative error code on error
1516  */
vpu_mmap(struct file * fp,struct vm_area_struct * vm)1517 static s32 vpu_mmap(struct file *fp, struct vm_area_struct *vm)
1518 {
1519 	/* if (vm->vm_pgoff == (s_vpu_register.phys_addr >> PAGE_SHIFT)) */
1520 	if ((vm->vm_end - vm->vm_start == s_vpu_register.size + 1) &&
1521 						(vm->vm_pgoff == 0)) {
1522 		vm->vm_pgoff = (s_vpu_register.phys_addr >> PAGE_SHIFT);
1523 		return vpu_map_to_register(fp, vm);
1524 	}
1525 
1526 	if (vm->vm_pgoff == 0)
1527 		return vpu_map_to_instance_pool_memory(fp, vm);
1528 
1529 	return vpu_map_to_physical_memory(fp, vm);
1530 }
vpu_dma_buffer_map(struct vpu_dma_cfg * cfg)1531 static int vpu_dma_buffer_map(struct vpu_dma_cfg *cfg)
1532 {
1533 	int ret = -1;
1534 	int fd = -1;
1535 	struct dma_buf *dbuf = NULL;
1536 	struct dma_buf_attachment *d_att = NULL;
1537 	struct sg_table *sg = NULL;
1538 	void *vaddr = NULL;
1539 	struct device *dev = NULL;
1540 	enum dma_data_direction dir;
1541 
1542 	if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) {
1543 		enc_pr(LOG_ERROR, "error dma param\n");
1544 		return -EINVAL;
1545 	}
1546 	fd = cfg->fd;
1547 	dev = cfg->dev;
1548 	dir = cfg->dir;
1549 
1550 	dbuf = dma_buf_get(fd);
1551 	if (dbuf == NULL) {
1552 		enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd);
1553 		return -EINVAL;
1554 	}
1555 
1556 	d_att = dma_buf_attach(dbuf, dev);
1557 	if (d_att == NULL) {
1558 		enc_pr(LOG_ERROR, "failed to set dma attach\n");
1559 		goto attach_err;
1560 	}
1561 
1562 	sg = dma_buf_map_attachment(d_att, dir);
1563 	if (sg == NULL) {
1564 		enc_pr(LOG_ERROR, "failed to get dma sg\n");
1565 		goto map_attach_err;
1566 	}
1567 	cfg->dbuf = dbuf;
1568 	cfg->attach = d_att;
1569 	cfg->vaddr = vaddr;
1570 	cfg->sg = sg;
1571 
1572 	return 0;
1573 
1574 map_attach_err:
1575 	dma_buf_detach(dbuf, d_att);
1576 attach_err:
1577 	dma_buf_put(dbuf);
1578 
1579 	return ret;
1580 }
1581 
vpu_dma_buffer_unmap(struct vpu_dma_cfg * cfg)1582 static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg)
1583 {
1584 	int fd = -1;
1585 	struct dma_buf *dbuf = NULL;
1586 	struct dma_buf_attachment *d_att = NULL;
1587 	struct sg_table *sg = NULL;
1588 	/*void *vaddr = NULL;*/
1589 	struct device *dev = NULL;
1590 	enum dma_data_direction dir;
1591 
1592 	if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL
1593 			|| cfg->dbuf == NULL /*|| cfg->vaddr == NULL*/
1594 			|| cfg->attach == NULL || cfg->sg == NULL) {
1595 		enc_pr(LOG_ERROR, "unmap: Error dma param\n");
1596 		return;
1597 	}
1598 
1599 	fd = cfg->fd;
1600 	dev = cfg->dev;
1601 	dir = cfg->dir;
1602 	dbuf = cfg->dbuf;
1603 	d_att = cfg->attach;
1604 	sg = cfg->sg;
1605 
1606 	dma_buf_unmap_attachment(d_att, sg, dir);
1607 	dma_buf_detach(dbuf, d_att);
1608 	dma_buf_put(dbuf);
1609 
1610 	enc_pr(LOG_INFO, "vpu_dma_buffer_unmap fd %d\n",fd);
1611 }
1612 
vpu_dma_buffer_get_phys(struct vpu_dma_cfg * cfg,unsigned long * addr)1613 static int vpu_dma_buffer_get_phys(struct vpu_dma_cfg *cfg, unsigned long *addr)
1614 {
1615 	struct sg_table *sg_table;
1616 	struct page *page;
1617 	int ret;
1618 
1619 	ret = vpu_dma_buffer_map(cfg);
1620 	if (ret < 0) {
1621 		printk("vpu_dma_buffer_map failed\n");
1622 		return ret;
1623 	}
1624 	if (cfg->sg) {
1625 		sg_table = cfg->sg;
1626 		page = sg_page(sg_table->sgl);
1627 		*addr = PFN_PHYS(page_to_pfn(page));
1628 		ret = 0;
1629 	}
1630 	enc_pr(LOG_INFO,"vpu_dma_buffer_get_phys\n");
1631 
1632 	return ret;
1633 }
1634 
vpu_src_addr_config(struct vpu_dma_buf_info_t info)1635 static u32 vpu_src_addr_config(struct vpu_dma_buf_info_t info) {
1636 	unsigned long phy_addr_y = 0;
1637 	unsigned long phy_addr_u = 0;
1638 	unsigned long phy_addr_v = 0;
1639 	unsigned long Ysize = info.width * info.height;
1640 	unsigned long Usize = Ysize >> 2;
1641 	s32 ret = 0;
1642 	u32 core = 0;
1643 
1644 	//y
1645 	dma_cfg[0].dir = DMA_TO_DEVICE;
1646 	dma_cfg[0].fd = info.fd[0];
1647 	dma_cfg[0].dev = &(hevc_pdev->dev);
1648 	ret = vpu_dma_buffer_get_phys(&dma_cfg[0], &phy_addr_y);
1649 	if (ret < 0) {
1650 		enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[0]);
1651 		return -1;
1652 	}
1653 
1654 	//u
1655 	if (info.num_planes >=2) {
1656 		dma_cfg[1].dir = DMA_TO_DEVICE;
1657 		dma_cfg[1].fd = info.fd[1];
1658 		dma_cfg[1].dev = &(hevc_pdev->dev);
1659 		ret = vpu_dma_buffer_get_phys(&dma_cfg[1], &phy_addr_u);
1660 		if (ret < 0) {
1661 			enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[1]);
1662 			return -1;
1663 		}
1664 	}
1665 
1666 	//v
1667 	if (info.num_planes >=3) {
1668 		dma_cfg[2].dir = DMA_TO_DEVICE;
1669 		dma_cfg[2].fd = info.fd[2];
1670 		dma_cfg[2].dev = &(hevc_pdev->dev);
1671 		ret = vpu_dma_buffer_get_phys(&dma_cfg[2], &phy_addr_v);
1672 		if (ret < 0) {
1673 			enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[2]);
1674 			return -1;
1675 		}
1676 	}
1677 
1678 	enc_pr(LOG_INFO, "vpu_src_addr_config phy_addr 0x%lx, 0x%lx, 0x%lx\n",
1679 		phy_addr_y, phy_addr_u, phy_addr_v);
1680 
1681 	dma_cfg[0].paddr = (void *)phy_addr_y;
1682 	dma_cfg[1].paddr = (void *)phy_addr_u;
1683 	dma_cfg[2].paddr = (void *)phy_addr_v;
1684 
1685 	enc_pr(LOG_INFO, "info.num_planes %d, info.fmt %d\n",
1686 		info.num_planes, info.fmt);
1687 
1688 	WriteVpuRegister(W4_SRC_ADDR_Y, phy_addr_y);
1689 	if (info.num_planes == 1) {
1690 		if (info.fmt == AMVENC_YUV420) {
1691 			WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_y + Ysize);
1692 			WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_y + Ysize + Usize);
1693 		} else if (info.fmt == AMVENC_NV12 || info.fmt == AMVENC_NV21 ) {
1694 			WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_y + Ysize);
1695 			WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_y + Ysize);
1696 		} else {
1697 			enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt);
1698 		}
1699 
1700 	} else if (info.num_planes == 2) {
1701 		if (info.fmt == AMVENC_NV12 || info.fmt == AMVENC_NV21 ) {
1702 			WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_u);
1703 			WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_u);
1704 		} else {
1705 			enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt);
1706 		}
1707 
1708 	} else if (info.num_planes == 3) {
1709 		if (info.fmt == AMVENC_YUV420) {
1710 			WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_u);
1711 			WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_v);
1712 		} else {
1713 			enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt);
1714 		}
1715 	}
1716 	return 0;
1717 
1718 }
1719 
1720 static const struct file_operations vpu_fops = {
1721 	.owner = THIS_MODULE,
1722 	.open = vpu_open,
1723 	.release = vpu_release,
1724 	.write = vpu_write,
1725 	.unlocked_ioctl = vpu_ioctl,
1726 #ifdef CONFIG_COMPAT
1727 	.compat_ioctl = vpu_compat_ioctl,
1728 #endif
1729 	.fasync = vpu_fasync,
1730 	.mmap = vpu_mmap,
1731 };
1732 
hevcenc_status_show(struct class * cla,struct class_attribute * attr,char * buf)1733 static ssize_t hevcenc_status_show(struct class *cla,
1734 				  struct class_attribute *attr, char *buf)
1735 {
1736 	return snprintf(buf, 40, "hevcenc_status_show\n");
1737 }
1738 
1739 static struct class_attribute hevcenc_class_attrs[] = {
1740 	__ATTR(encode_status,
1741 	S_IRUGO | S_IWUSR,
1742 	hevcenc_status_show,
1743 	NULL),
1744 	__ATTR_NULL
1745 };
1746 
1747 static struct class hevcenc_class = {
1748 	.name = VPU_CLASS_NAME,
1749 	.class_attrs = hevcenc_class_attrs,
1750 };
1751 
init_HevcEnc_device(void)1752 s32 init_HevcEnc_device(void)
1753 {
1754 	s32  r = 0;
1755 
1756 	r = register_chrdev(0, VPU_DEV_NAME, &vpu_fops);
1757 	if (r <= 0) {
1758 		enc_pr(LOG_ERROR, "register hevcenc device error.\n");
1759 		return  r;
1760 	}
1761 	s_vpu_major = r;
1762 
1763 	r = class_register(&hevcenc_class);
1764 	if (r < 0) {
1765 		enc_pr(LOG_ERROR, "error create hevcenc class.\n");
1766 		return r;
1767 	}
1768 
1769 	hevcenc_dev = device_create(&hevcenc_class, NULL,
1770 				       MKDEV(s_vpu_major, 0), NULL,
1771 				       VPU_DEV_NAME);
1772 
1773 	if (IS_ERR(hevcenc_dev)) {
1774 		enc_pr(LOG_ERROR, "create hevcenc device error.\n");
1775 		class_unregister(&hevcenc_class);
1776 		return -1;
1777 	}
1778 	return r;
1779 }
1780 
uninit_HevcEnc_device(void)1781 s32 uninit_HevcEnc_device(void)
1782 {
1783 	if (hevcenc_dev)
1784 		device_destroy(&hevcenc_class, MKDEV(s_vpu_major, 0));
1785 
1786 	class_destroy(&hevcenc_class);
1787 
1788 	unregister_chrdev(s_vpu_major, VPU_DEV_NAME);
1789 	return 0;
1790 }
1791 
hevc_mem_device_init(struct reserved_mem * rmem,struct device * dev)1792 static s32 hevc_mem_device_init(
1793 	struct reserved_mem *rmem, struct device *dev)
1794 {
1795 	s32 r;
1796 
1797 	if (!rmem) {
1798 		enc_pr(LOG_ERROR,
1799 			"Can not obtain I/O memory, will allocate hevc buffer!\n");
1800 		r = -EFAULT;
1801 		return r;
1802 	}
1803 
1804 	if ((!rmem->base) ||
1805 		(rmem->size < VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) {
1806 		enc_pr(LOG_ERROR,
1807 			"memory range error, 0x%lx - 0x%lx\n",
1808 			 (ulong)rmem->base, (ulong)rmem->size);
1809 		r = -EFAULT;
1810 		return r;
1811 	}
1812 	r = 0;
1813 	s_video_memory.size = rmem->size;
1814 	s_video_memory.phys_addr = (ulong)rmem->base;
1815 	enc_pr(LOG_DEBUG, "hevc_mem_device_init %d, 0x%lx\n ",s_video_memory.size,s_video_memory.phys_addr);
1816 
1817 	return r;
1818 }
1819 
vpu_probe(struct platform_device * pdev)1820 static s32 vpu_probe(struct platform_device *pdev)
1821 {
1822 	s32 err = 0, irq, reg_count, idx;
1823 	struct resource res;
1824 	struct device_node *np, *child;
1825 
1826 	enc_pr(LOG_DEBUG, "vpu_probe\n");
1827 
1828 	s_vpu_major = 0;
1829 	use_reserve = false;
1830 	s_vpu_irq = -1;
1831 	cma_pool_size = 0;
1832 	s_vpu_irq_requested = false;
1833 	s_vpu_open_ref_count = 0;
1834 	hevcenc_dev = NULL;
1835 	hevc_pdev = NULL;
1836 	memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t));
1837 	memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t));
1838 	memset(&s_vmem, 0, sizeof(struct video_mm_t));
1839 	memset(&s_bit_firmware_info[0], 0, sizeof(s_bit_firmware_info));
1840 	memset(&res, 0, sizeof(struct resource));
1841 
1842 	idx = of_reserved_mem_device_init(&pdev->dev);
1843 	if (idx != 0) {
1844 		enc_pr(LOG_DEBUG,
1845 			"HevcEnc reserved memory config fail.\n");
1846 	} else if (s_video_memory.phys_addr) {
1847 		use_reserve = true;
1848 	}
1849 
1850 	if (use_reserve == false) {
1851 #ifndef CONFIG_CMA
1852 		enc_pr(LOG_ERROR,
1853 			"HevcEnc reserved memory is invaild, probe fail!\n");
1854 		err = -EFAULT;
1855 		goto ERROR_PROVE_DEVICE;
1856 #else
1857 		cma_pool_size =
1858 			(codec_mm_get_total_size() >
1859 			(VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) ?
1860 			(VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE) :
1861 			codec_mm_get_total_size();
1862 		enc_pr(LOG_DEBUG,
1863 			"HevcEnc - cma memory pool size: %d MB\n",
1864 			(u32)cma_pool_size / SZ_1M);
1865 #endif
1866 	}
1867 
1868 	/* get interrupt resource */
1869 	irq = platform_get_irq_byname(pdev, "wave420l_irq");
1870 	if (irq < 0) {
1871 		enc_pr(LOG_ERROR, "get HevcEnc irq resource error\n");
1872 		err = -ENXIO;
1873 		goto ERROR_PROVE_DEVICE;
1874 	}
1875 	s_vpu_irq = irq;
1876 	enc_pr(LOG_DEBUG, "HevcEnc - wave420l_irq: %d\n", s_vpu_irq);
1877 #if 0
1878 	rstc = devm_reset_control_get(&pdev->dev, "HevcEnc");
1879 	if (IS_ERR(rstc)) {
1880 		enc_pr(LOG_ERROR,
1881 			"get HevcEnc rstc error: %lx\n", PTR_ERR(rstc));
1882 		rstc = NULL;
1883 		err = -ENOENT;
1884 		goto ERROR_PROVE_DEVICE;
1885 	}
1886 	reset_control_assert(rstc);
1887 	s_vpu_rstc = rstc;
1888 
1889 	clk = clk_get(&pdev->dev, "clk_HevcEnc");
1890 	if (IS_ERR(clk)) {
1891 		enc_pr(LOG_ERROR, "cannot get clock\n");
1892 		clk = NULL;
1893 		err = -ENOENT;
1894 		goto ERROR_PROVE_DEVICE;
1895 	}
1896 	s_vpu_clk = clk;
1897 #endif
1898 
1899 #ifdef VPU_SUPPORT_CLOCK_CONTROL
1900 #else
1901 	vpu_clk_config(1);
1902 #endif
1903 
1904 	np = pdev->dev.of_node;
1905 	reg_count = 0;
1906 	for_each_child_of_node(np, child) {
1907 		if (of_address_to_resource(child, 0, &res)
1908 			|| (reg_count > 1)) {
1909 			enc_pr(LOG_ERROR,
1910 				"no reg ranges or more reg ranges %d\n",
1911 				reg_count);
1912 			err = -ENXIO;
1913 			goto ERROR_PROVE_DEVICE;
1914 		}
1915 		/* if platform driver is implemented */
1916 		if (res.start != 0) {
1917 			s_vpu_register.phys_addr = res.start;
1918 			s_vpu_register.virt_addr =
1919 				(ulong)ioremap_nocache(
1920 				res.start, resource_size(&res));
1921 			s_vpu_register.size = res.end - res.start;
1922 			enc_pr(LOG_DEBUG,
1923 				"vpu base address get from platform driver ");
1924 			enc_pr(LOG_DEBUG,
1925 				"physical base addr=0x%lx, virtual base=0x%lx\n",
1926 				s_vpu_register.phys_addr,
1927 				s_vpu_register.virt_addr);
1928 		} else {
1929 			s_vpu_register.phys_addr = VPU_REG_BASE_ADDR;
1930 			s_vpu_register.virt_addr =
1931 				(ulong)ioremap_nocache(
1932 				s_vpu_register.phys_addr, VPU_REG_SIZE);
1933 			s_vpu_register.size = VPU_REG_SIZE;
1934 			enc_pr(LOG_DEBUG,
1935 				"vpu base address get from defined value ");
1936 			enc_pr(LOG_DEBUG,
1937 				"physical base addr=0x%lx, virtual base=0x%lx\n",
1938 				s_vpu_register.phys_addr,
1939 				s_vpu_register.virt_addr);
1940 		}
1941 		reg_count++;
1942 	}
1943 
1944 	/* get the major number of the character device */
1945 	if (init_HevcEnc_device()) {
1946 		err = -EBUSY;
1947 		enc_pr(LOG_ERROR, "could not allocate major number\n");
1948 		goto ERROR_PROVE_DEVICE;
1949 	}
1950 	enc_pr(LOG_INFO, "SUCCESS alloc_chrdev_region\n");
1951 
1952 	init_waitqueue_head(&s_interrupt_wait_q);
1953 	tasklet_init(&hevc_tasklet,
1954 		hevcenc_isr_tasklet,
1955 		(ulong)&s_vpu_drv_context);
1956 	s_common_memory.base = 0;
1957 	s_instance_pool.base = 0;
1958 
1959 	if (use_reserve == true) {
1960 		if (vmem_init(&s_vmem, s_video_memory.phys_addr,
1961 			s_video_memory.size) < 0) {
1962 			enc_pr(LOG_ERROR, "fail to init vmem system\n");
1963 			goto ERROR_PROVE_DEVICE;
1964 		}
1965 		enc_pr(LOG_DEBUG,
1966 			"success to probe vpu device with video memory ");
1967 		enc_pr(LOG_DEBUG,
1968 			"phys_addr=0x%lx, base = 0x%lx\n",
1969 			(ulong)s_video_memory.phys_addr,
1970 			(ulong)s_video_memory.base);
1971 	} else
1972 		enc_pr(LOG_DEBUG,
1973 			"success to probe vpu device with video memory from cma\n");
1974 	hevc_pdev = pdev;
1975 	return 0;
1976 
1977 ERROR_PROVE_DEVICE:
1978 	if (s_vpu_register.virt_addr) {
1979 		iounmap((void *)s_vpu_register.virt_addr);
1980 		memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t));
1981 	}
1982 
1983 	if (s_video_memory.phys_addr) {
1984 		vmem_exit(&s_vmem);
1985 		memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t));
1986 		memset(&s_vmem, 0, sizeof(struct video_mm_t));
1987 	}
1988 
1989 	vpu_clk_config(0);
1990 
1991 	if (s_vpu_irq_requested == true) {
1992 		if (s_vpu_irq >= 0) {
1993 			free_irq(s_vpu_irq, &s_vpu_drv_context);
1994 			s_vpu_irq = -1;
1995 		}
1996 		s_vpu_irq_requested = false;
1997 	}
1998 	uninit_HevcEnc_device();
1999 	return err;
2000 }
2001 
vpu_remove(struct platform_device * pdev)2002 static s32 vpu_remove(struct platform_device *pdev)
2003 {
2004 	enc_pr(LOG_DEBUG, "vpu_remove\n");
2005 
2006 	if (s_instance_pool.base) {
2007 		vfree((const void *)s_instance_pool.base);
2008 		s_instance_pool.base = 0;
2009 	}
2010 
2011 	if (s_common_memory.phys_addr) {
2012 		vpu_free_dma_buffer(&s_common_memory);
2013 		s_common_memory.phys_addr = 0;
2014 	}
2015 
2016 	if (s_video_memory.phys_addr) {
2017 		if (!use_reserve) {
2018 			codec_mm_free_for_dma(
2019 			VPU_DEV_NAME,
2020 			(u32)s_video_memory.phys_addr);
2021 		}
2022 		vmem_exit(&s_vmem);
2023 		memset(&s_video_memory,
2024 			0, sizeof(struct vpudrv_buffer_t));
2025 		memset(&s_vmem,
2026 			0, sizeof(struct video_mm_t));
2027 	}
2028 
2029 	if (s_vpu_irq_requested == true) {
2030 		if (s_vpu_irq >= 0) {
2031 			free_irq(s_vpu_irq, &s_vpu_drv_context);
2032 			s_vpu_irq = -1;
2033 		}
2034 		s_vpu_irq_requested = false;
2035 	}
2036 
2037 	if (s_vpu_register.virt_addr) {
2038 		iounmap((void *)s_vpu_register.virt_addr);
2039 		memset(&s_vpu_register,
2040 			0, sizeof(struct vpudrv_buffer_t));
2041 	}
2042 	hevc_pdev = NULL;
2043 	vpu_clk_config(0);
2044 
2045 	uninit_HevcEnc_device();
2046 	return 0;
2047 }
2048 
2049 #ifdef CONFIG_PM
Wave4BitIssueCommand(u32 core,u32 cmd)2050 static void Wave4BitIssueCommand(u32 core, u32 cmd)
2051 {
2052 	WriteVpuRegister(W4_VPU_BUSY_STATUS, 1);
2053 	WriteVpuRegister(W4_CORE_INDEX, 0);
2054 	/* coreIdx = ReadVpuRegister(W4_VPU_BUSY_STATUS); */
2055 	/* coreIdx = 0; */
2056 	/* WriteVpuRegister(W4_INST_INDEX,
2057 	 *	(instanceIndex & 0xffff) | (codecMode << 16));
2058 	 */
2059 	WriteVpuRegister(W4_COMMAND, cmd);
2060 	WriteVpuRegister(W4_VPU_HOST_INT_REQ, 1);
2061 }
2062 
vpu_suspend(struct platform_device * pdev,pm_message_t state)2063 static s32 vpu_suspend(struct platform_device *pdev, pm_message_t state)
2064 {
2065 	u32 core;
2066 	ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
2067 
2068 	enc_pr(LOG_DEBUG, "vpu_suspend\n");
2069 
2070 	vpu_clk_config(1);
2071 
2072 	if (s_vpu_open_ref_count > 0) {
2073 		for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
2074 			if (s_bit_firmware_info[core].size == 0)
2075 				continue;
2076 			while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
2077 				if (time_after(jiffies, timeout)) {
2078 					enc_pr(LOG_ERROR,
2079 						"SLEEP_VPU BUSY timeout");
2080 					goto DONE_SUSPEND;
2081 				}
2082 			}
2083 			Wave4BitIssueCommand(core, W4_CMD_SLEEP_VPU);
2084 
2085 			while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
2086 				if (time_after(jiffies, timeout)) {
2087 					enc_pr(LOG_ERROR,
2088 						"SLEEP_VPU BUSY timeout");
2089 					goto DONE_SUSPEND;
2090 				}
2091 			}
2092 			if (ReadVpuRegister(W4_RET_SUCCESS) == 0) {
2093 				enc_pr(LOG_ERROR,
2094 					"SLEEP_VPU failed [0x%x]",
2095 					ReadVpuRegister(W4_RET_FAIL_REASON));
2096 				goto DONE_SUSPEND;
2097 			}
2098 		}
2099 	}
2100 
2101 	vpu_clk_config(0);
2102 	return 0;
2103 
2104 DONE_SUSPEND:
2105 	vpu_clk_config(0);
2106 	return -EAGAIN;
2107 }
vpu_resume(struct platform_device * pdev)2108 static s32 vpu_resume(struct platform_device *pdev)
2109 {
2110 	u32 i;
2111 	u32 core;
2112 	u32 val;
2113 	ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
2114 	ulong code_base;
2115 	u32 code_size;
2116 	u32 remap_size;
2117 	u32 regVal;
2118 	u32 hwOption = 0;
2119 
2120 	enc_pr(LOG_DEBUG, "vpu_resume\n");
2121 
2122 	vpu_clk_config(1);
2123 	if (s_vpu_open_ref_count > 0) {
2124 		for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
2125 			if (s_bit_firmware_info[core].size == 0)
2126 				continue;
2127 			code_base = s_common_memory.phys_addr;
2128 			/* ALIGN TO 4KB */
2129 			code_size = (s_common_memory.size & ~0xfff);
2130 			if (code_size < s_bit_firmware_info[core].size * 2)
2131 				goto DONE_WAKEUP;
2132 
2133 			/*---- LOAD BOOT CODE */
2134 			for (i = 0; i < 512; i += 2) {
2135 				val = s_bit_firmware_info[core].bit_code[i];
2136 				val |= (s_bit_firmware_info[core].bit_code[i+1] << 16);
2137 				WriteVpu(code_base+(i*2), val);
2138 			}
2139 
2140 			regVal = 0;
2141 			WriteVpuRegister(W4_PO_CONF, regVal);
2142 
2143 			/* Reset All blocks */
2144 			regVal = 0x7ffffff;
2145 			WriteVpuRegister(W4_VPU_RESET_REQ, regVal);
2146 
2147 			/* Waiting reset done */
2148 			while (ReadVpuRegister(W4_VPU_RESET_STATUS)) {
2149 				if (time_after(jiffies, timeout))
2150 					goto DONE_WAKEUP;
2151 			}
2152 
2153 			WriteVpuRegister(W4_VPU_RESET_REQ, 0);
2154 
2155 			/* remap page size */
2156 			remap_size = (code_size >> 12) & 0x1ff;
2157 			regVal = 0x80000000 | (W4_REMAP_CODE_INDEX<<12)
2158 				| (0 << 16) | (1<<11) | remap_size;
2159 			WriteVpuRegister(W4_VPU_REMAP_CTRL, regVal);
2160 			/* DO NOT CHANGE! */
2161 			WriteVpuRegister(W4_VPU_REMAP_VADDR, 0x00000000);
2162 			WriteVpuRegister(W4_VPU_REMAP_PADDR, code_base);
2163 			WriteVpuRegister(W4_ADDR_CODE_BASE, code_base);
2164 			WriteVpuRegister(W4_CODE_SIZE, code_size);
2165 			WriteVpuRegister(W4_CODE_PARAM, 0);
2166 			WriteVpuRegister(W4_INIT_VPU_TIME_OUT_CNT, timeout);
2167 			WriteVpuRegister(W4_HW_OPTION, hwOption);
2168 
2169 			/* Interrupt */
2170 			regVal = (1 << W4_INT_DEC_PIC_HDR);
2171 			regVal |= (1 << W4_INT_DEC_PIC);
2172 			regVal |= (1 << W4_INT_QUERY_DEC);
2173 			regVal |= (1 << W4_INT_SLEEP_VPU);
2174 			regVal |= (1 << W4_INT_BSBUF_EMPTY);
2175 			regVal = 0xfffffefe;
2176 			WriteVpuRegister(W4_VPU_VINT_ENABLE, regVal);
2177 			Wave4BitIssueCommand(core, W4_CMD_INIT_VPU);
2178 			WriteVpuRegister(W4_VPU_REMAP_CORE_START, 1);
2179 			while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
2180 				if (time_after(jiffies, timeout))
2181 					goto DONE_WAKEUP;
2182 			}
2183 
2184 			if (ReadVpuRegister(W4_RET_SUCCESS) == 0) {
2185 				enc_pr(LOG_ERROR,
2186 					"WAKEUP_VPU failed [0x%x]",
2187 					ReadVpuRegister(W4_RET_FAIL_REASON));
2188 				goto DONE_WAKEUP;
2189 			}
2190 		}
2191 	}
2192 
2193 	if (s_vpu_open_ref_count == 0)
2194 		vpu_clk_config(0);
2195 DONE_WAKEUP:
2196 	if (s_vpu_open_ref_count > 0)
2197 		vpu_clk_config(1);
2198 	return 0;
2199 }
2200 #else
2201 #define vpu_suspend NULL
2202 #define vpu_resume NULL
2203 #endif /* !CONFIG_PM */
2204 
2205 static const struct of_device_id cnm_hevcenc_dt_match[] = {
2206 	{
2207 		.compatible = "cnm, HevcEnc",
2208 	},
2209 	{},
2210 };
2211 
2212 static struct platform_driver vpu_driver = {
2213 	.driver = {
2214 		.name = VPU_PLATFORM_DEVICE_NAME,
2215 		.of_match_table = cnm_hevcenc_dt_match,
2216 	},
2217 	.probe = vpu_probe,
2218 	.remove = vpu_remove,
2219 	.suspend = vpu_suspend,
2220 	.resume = vpu_resume,
2221 };
2222 
vpu_init(void)2223 static s32 __init vpu_init(void)
2224 {
2225 	s32 res;
2226 
2227 	enc_pr(LOG_DEBUG, "vpu_init\n");
2228 
2229 	if ((get_cpu_type() != MESON_CPU_MAJOR_ID_GXM)
2230 		&& (get_cpu_type() != MESON_CPU_MAJOR_ID_G12A)
2231 			&& (get_cpu_type() != MESON_CPU_MAJOR_ID_GXLX)
2232 				&& (get_cpu_type() != MESON_CPU_MAJOR_ID_G12B)
2233 				&& (get_cpu_type() != MESON_CPU_MAJOR_ID_SM1)) {
2234 		enc_pr(LOG_DEBUG,
2235 			"The chip is not support hevc encoder\n");
2236 		return -1;
2237 	}
2238 	if (get_cpu_type() == MESON_CPU_MAJOR_ID_G12A) {
2239 		if ((READ_EFUSE_REG(EFUSE_LIC2)  >> 12) & 1) {
2240 			enc_pr(LOG_DEBUG,
2241 				"Chip efuse disabled H265\n");
2242 			return -1;
2243 		}
2244 	}
2245 
2246 	res = platform_driver_register(&vpu_driver);
2247 	enc_pr(LOG_INFO,
2248 		"end vpu_init result=0x%x\n", res);
2249 	return res;
2250 }
2251 
vpu_exit(void)2252 static void __exit vpu_exit(void)
2253 {
2254 	enc_pr(LOG_DEBUG, "vpu_exit\n");
2255 	if ((get_cpu_type() != MESON_CPU_MAJOR_ID_GXM) &&
2256 		(get_cpu_type() != MESON_CPU_MAJOR_ID_G12A) &&
2257 		(get_cpu_type() != MESON_CPU_MAJOR_ID_GXLX) &&
2258 		(get_cpu_type() != MESON_CPU_MAJOR_ID_G12B) &&
2259 		(get_cpu_type() != MESON_CPU_MAJOR_ID_SM1)) {
2260 		enc_pr(LOG_INFO,
2261 			"The chip is not support hevc encoder\n");
2262 		return;
2263 	}
2264 	platform_driver_unregister(&vpu_driver);
2265 }
2266 
2267 static const struct reserved_mem_ops rmem_hevc_ops = {
2268 	.device_init = hevc_mem_device_init,
2269 };
2270 
hevc_mem_setup(struct reserved_mem * rmem)2271 static s32 __init hevc_mem_setup(struct reserved_mem *rmem)
2272 {
2273 	rmem->ops = &rmem_hevc_ops;
2274 	enc_pr(LOG_DEBUG, "HevcEnc reserved mem setup.\n");
2275 	return 0;
2276 }
2277 
2278 module_param(print_level, uint, 0664);
2279 MODULE_PARM_DESC(print_level, "\n print_level\n");
2280 
2281 module_param(clock_level, uint, 0664);
2282 MODULE_PARM_DESC(clock_level, "\n clock_level\n");
2283 
2284 MODULE_AUTHOR("Amlogic using C&M VPU, Inc.");
2285 MODULE_DESCRIPTION("VPU linux driver");
2286 MODULE_LICENSE("GPL");
2287 
2288 module_init(vpu_init);
2289 module_exit(vpu_exit);
2290 RESERVEDMEM_OF_DECLARE(cnm_hevc, "cnm, HevcEnc-memory", hevc_mem_setup);
2291