1 /*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 */
17 #define DEBUG
18 #include <linux/kernel.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/platform_device.h>
25 #include <linux/uaccess.h>
26 #include <linux/semaphore.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/sched.h>
29 #include <linux/sched/rt.h>
30 #include <linux/interrupt.h>
31 #include <linux/amlogic/media/utils/vformat.h>
32 #include <linux/amlogic/iomap.h>
33 #include <linux/amlogic/media/canvas/canvas.h>
34 #include <linux/amlogic/media/vfm/vframe.h>
35 #include <linux/amlogic/media/vfm/vframe_provider.h>
36 #include <linux/amlogic/media/vfm/vframe_receiver.h>
37 #include <linux/amlogic/media/video_sink/ionvideo_ext.h>
38 #ifdef CONFIG_AMLOGIC_V4L_VIDEO3
39 #include <linux/amlogic/media/video_sink/v4lvideo_ext.h>
40 #endif
41 #include <linux/amlogic/media/vfm/vfm_ext.h>
42 /*for VDEC_DEBUG_SUPPORT*/
43 #include <linux/time.h>
44
45 #include <linux/amlogic/media/utils/vdec_reg.h>
46 #include "../../../stream_input/amports/streambuf.h"
47 #include "vdec.h"
48 #include "vdec_trace.h"
49 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
50 #include "vdec_profile.h"
51 #endif
52 #include <linux/of.h>
53 #include <linux/of_fdt.h>
54 #include <linux/libfdt_env.h>
55 #include <linux/of_reserved_mem.h>
56 #include <linux/dma-map-ops.h>
57 #include <linux/cma.h>
58 #include <linux/module.h>
59 #include <linux/slab.h>
60 #include <linux/dma-mapping.h>
61 #include "../../../stream_input/amports/amports_priv.h"
62
63 #include <linux/amlogic/media/utils/amports_config.h>
64 #include "../utils/amvdec.h"
65 #include "vdec_input.h"
66
67 #include "../../../common/media_clock/clk/clk.h"
68 #include <linux/reset.h>
69 #include <linux/amlogic/cpu_version.h>
70 #include <linux/amlogic/media/codec_mm/codec_mm.h>
71 #include <linux/amlogic/media/video_sink/video_keeper.h>
72 #include <linux/amlogic/media/codec_mm/configs.h>
73 #include <linux/amlogic/media/frame_sync/ptsserv.h>
74 #include "secprot.h"
75 #include "../../../common/chips/decoder_cpu_ver_info.h"
76 #include "frame_check.h"
77
78 #ifdef CONFIG_AMLOGIC_POWER
79 #include <linux/amlogic/power_ctrl.h>
80 #endif
81
82 #if 1 // FIXME
pts_get_rec_num(u8 type,u32 val)83 int pts_get_rec_num(u8 type, u32 val)
84 {
85 pr_err("Error: %s() is not supported.\n", __func__);
86 return -1;
87 }
88
vf_notify_receiver(const char * provider_name,int event_type,void * data)89 int vf_notify_receiver(const char *provider_name, int event_type, void *data)
90 {
91 pr_err("Error: %s() is not supported.\n", __func__);
92 return -1;
93 }
94
vfm_map_add(char * id,char * name_chain)95 int vfm_map_add(char *id, char *name_chain)
96 {
97 pr_err("Error: %s() is not supported.\n", __func__);
98 return -1;
99 }
100
vfm_map_remove(char * id)101 int vfm_map_remove(char *id)
102 {
103 pr_err("Error: %s() is not supported.\n", __func__);
104 return -1;
105 }
106
vf_reg_provider(struct vframe_provider_s * prov)107 int vf_reg_provider(struct vframe_provider_s *prov)
108 {
109 pr_err("Error: %s() is not supported.\n", __func__);
110 return -1;
111 }
112
vf_unreg_provider(struct vframe_provider_s * prov)113 void vf_unreg_provider(struct vframe_provider_s *prov)
114 {
115 pr_err("Error: %s() is not supported.\n", __func__);
116 }
117
118 #endif
119
120 static DEFINE_MUTEX(vdec_mutex);
121
122 #define MC_SIZE (4096 * 4)
123 #define CMA_ALLOC_SIZE SZ_64M
124 #define MEM_NAME "vdec_prealloc"
125 static int inited_vcodec_num;
126 #define jiffies_ms div64_u64(get_jiffies_64() * 1000, HZ)
127 static int poweron_clock_level;
128 static int debug_vdetect = 0;
129 static int keep_vdec_mem;
130 static unsigned int debug_trace_num = 16 * 20;
131 static int step_mode;
132 static unsigned int clk_config;
133 /*
134 &1: sched_priority to MAX_RT_PRIO -1.
135 &2: always reload firmware.
136 &4: vdec canvas debug enable
137 */
138 static unsigned int debug = 2;
139
140 static int hevc_max_reset_count;
141
142 static int no_powerdown;
143 static int parallel_decode = 1;
144 static int fps_detection;
145 static int fps_clear;
146
147
148 static int force_nosecure_even_drm;
149 static int disable_switch_single_to_mult;
150
151 static DEFINE_SPINLOCK(vdec_spin_lock);
152
153 #define HEVC_TEST_LIMIT 100
154 #define GXBB_REV_A_MINOR 0xA
155
156 #define PRINT_FRAME_INFO 1
157 #define DISABLE_FRAME_INFO 2
158
159 static int frameinfo_flag = 0;
160 static int v4lvideo_add_di = 1;
161 static int max_di_instance = 2;
162
163 //static int path_debug = 0;
164
165 static int enable_mvdec_info = 1;
166
167 int decode_underflow = 0;
168
169 int enable_stream_mode_multi_dec;
170
171 #define CANVAS_MAX_SIZE (AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1)
172
173 struct am_reg {
174 char *name;
175 int offset;
176 };
177
178 struct vdec_isr_context_s {
179 int index;
180 int irq;
181 irq_handler_t dev_isr;
182 irq_handler_t dev_threaded_isr;
183 void *dev_id;
184 struct vdec_s *vdec;
185 };
186
187 struct decode_fps_s {
188 u32 frame_count;
189 u64 start_timestamp;
190 u64 last_timestamp;
191 u32 fps;
192 };
193
194 struct vdec_core_s {
195 struct list_head connected_vdec_list;
196 spinlock_t lock;
197 spinlock_t canvas_lock;
198 spinlock_t fps_lock;
199 spinlock_t input_lock;
200 struct ida ida;
201 atomic_t vdec_nr;
202 struct vdec_s *vfm_vdec;
203 struct vdec_s *active_vdec;
204 struct vdec_s *active_hevc;
205 struct vdec_s *hint_fr_vdec;
206 struct platform_device *vdec_core_platform_device;
207 struct device *cma_dev;
208 struct semaphore sem;
209 struct task_struct *thread;
210 struct workqueue_struct *vdec_core_wq;
211
212 unsigned long sched_mask;
213 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
214 int power_ref_count[VDEC_MAX];
215 struct vdec_s *last_vdec;
216 int parallel_dec;
217 unsigned long power_ref_mask;
218 int vdec_combine_flag;
219 struct decode_fps_s decode_fps[MAX_INSTANCE_MUN];
220 unsigned long buff_flag;
221 unsigned long stream_buff_flag;
222 };
223
224 struct canvas_status_s {
225 int type;
226 int canvas_used_flag;
227 int id;
228 };
229
230
231 static struct vdec_core_s *vdec_core;
232
233 static const char * const vdec_status_string[] = {
234 "VDEC_STATUS_UNINITIALIZED",
235 "VDEC_STATUS_DISCONNECTED",
236 "VDEC_STATUS_CONNECTED",
237 "VDEC_STATUS_ACTIVE"
238 };
239 /*
240 bit [28] enable print
241 bit [23:16] etc
242 bit [15:12]
243 none 0 and not 0x1: force single
244 none 0 and 0x1: force multi
245 bit [8]
246 1: force dual
247 bit [3]
248 1: use mavs for single mode
249 bit [2]
250 1: force vfm path for frame mode
251 bit [1]
252 1: force esparser auto mode
253 bit [0]
254 1: disable audo manual mode ??
255 */
256
257 static int debugflags;
258
259 static char vfm_path[VDEC_MAP_NAME_SIZE] = {"disable"};
260 static const char vfm_path_node[][VDEC_MAP_NAME_SIZE] =
261 {
262 "video_render.0",
263 "video_render.1",
264 "amvideo",
265 "videopip",
266 "deinterlace",
267 "dimulti.1",
268 "amlvideo",
269 "aml_video.1",
270 "amlvideo2.0",
271 "amlvideo2.1",
272 "ppmgr",
273 "ionvideo",
274 "ionvideo.1",
275 "ionvideo.2",
276 "ionvideo.3",
277 "ionvideo.4",
278 "ionvideo.5",
279 "ionvideo.6",
280 "ionvideo.7",
281 "ionvideo.8",
282 "videosync.0",
283 "v4lvideo.0",
284 "v4lvideo.1",
285 "v4lvideo.2",
286 "v4lvideo.3",
287 "v4lvideo.4",
288 "v4lvideo.5",
289 "v4lvideo.6",
290 "v4lvideo.7",
291 "v4lvideo.8",
292 "disable",
293 "reserved",
294 };
295
296 static struct canvas_status_s canvas_stat[AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1];
297
298
vdec_get_debug_flags(void)299 int vdec_get_debug_flags(void)
300 {
301 return debugflags;
302 }
303 EXPORT_SYMBOL(vdec_get_debug_flags);
304
VDEC_PRINT_FUN_LINENO(const char * fun,int line)305 void VDEC_PRINT_FUN_LINENO(const char *fun, int line)
306 {
307 if (debugflags & 0x10000000)
308 pr_info("%s, %d\n", fun, line);
309 }
310 EXPORT_SYMBOL(VDEC_PRINT_FUN_LINENO);
311
is_mult_inc(unsigned int type)312 unsigned char is_mult_inc(unsigned int type)
313 {
314 unsigned char ret = 0;
315 if (vdec_get_debug_flags() & 0xf000)
316 ret = (vdec_get_debug_flags() & 0x1000)
317 ? 1 : 0;
318 else if (type & PORT_TYPE_DECODER_SCHED)
319 ret = 1;
320 return ret;
321 }
322 EXPORT_SYMBOL(is_mult_inc);
323
324 static const bool cores_with_input[VDEC_MAX] = {
325 true, /* VDEC_1 */
326 false, /* VDEC_HCODEC */
327 false, /* VDEC_2 */
328 true, /* VDEC_HEVC / VDEC_HEVC_FRONT */
329 false, /* VDEC_HEVC_BACK */
330 };
331
332 static const int cores_int[VDEC_MAX] = {
333 VDEC_IRQ_1,
334 VDEC_IRQ_2,
335 VDEC_IRQ_0,
336 VDEC_IRQ_0,
337 VDEC_IRQ_HEVC_BACK
338 };
339
vdec_canvas_lock(struct vdec_core_s * core)340 unsigned long vdec_canvas_lock(struct vdec_core_s *core)
341 {
342 unsigned long flags;
343 spin_lock_irqsave(&core->canvas_lock, flags);
344
345 return flags;
346 }
347
vdec_canvas_unlock(struct vdec_core_s * core,unsigned long flags)348 void vdec_canvas_unlock(struct vdec_core_s *core, unsigned long flags)
349 {
350 spin_unlock_irqrestore(&core->canvas_lock, flags);
351 }
352
vdec_fps_lock(struct vdec_core_s * core)353 unsigned long vdec_fps_lock(struct vdec_core_s *core)
354 {
355 unsigned long flags;
356 spin_lock_irqsave(&core->fps_lock, flags);
357
358 return flags;
359 }
360
vdec_fps_unlock(struct vdec_core_s * core,unsigned long flags)361 void vdec_fps_unlock(struct vdec_core_s *core, unsigned long flags)
362 {
363 spin_unlock_irqrestore(&core->fps_lock, flags);
364 }
365
vdec_core_lock(struct vdec_core_s * core)366 unsigned long vdec_core_lock(struct vdec_core_s *core)
367 {
368 unsigned long flags;
369
370 spin_lock_irqsave(&core->lock, flags);
371
372 return flags;
373 }
374
vdec_core_unlock(struct vdec_core_s * core,unsigned long flags)375 void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
376 {
377 spin_unlock_irqrestore(&core->lock, flags);
378 }
379
vdec_inputbuff_lock(struct vdec_core_s * core)380 unsigned long vdec_inputbuff_lock(struct vdec_core_s *core)
381 {
382 unsigned long flags;
383
384 spin_lock_irqsave(&core->input_lock, flags);
385
386 return flags;
387 }
388
vdec_inputbuff_unlock(struct vdec_core_s * core,unsigned long flags)389 void vdec_inputbuff_unlock(struct vdec_core_s *core, unsigned long flags)
390 {
391 spin_unlock_irqrestore(&core->input_lock, flags);
392 }
393
394
vdec_is_input_frame_empty(struct vdec_s * vdec)395 static bool vdec_is_input_frame_empty(struct vdec_s *vdec) {
396 struct vdec_core_s *core = vdec_core;
397 bool ret;
398 unsigned long flags;
399
400 flags = vdec_inputbuff_lock(core);
401 ret = !(vdec->core_mask & core->buff_flag);
402 vdec_inputbuff_unlock(core, flags);
403
404 return ret;
405 }
406
vdec_up(struct vdec_s * vdec)407 static void vdec_up(struct vdec_s *vdec)
408 {
409 struct vdec_core_s *core = vdec_core;
410
411 if (debug & 8)
412 pr_info("vdec_up, id:%d\n", vdec->id);
413 up(&core->sem);
414 }
415
416
vdec_get_us_time_system(void)417 static u64 vdec_get_us_time_system(void)
418 {
419 struct timespec64 tspec;
420 ktime_get_real_ts64(&tspec);
421 return tspec.tv_sec * 1000000 + tspec.tv_nsec / 1000;
422 }
423
vdec_fps_clear(int id)424 static void vdec_fps_clear(int id)
425 {
426 if (id >= MAX_INSTANCE_MUN)
427 return;
428
429 vdec_core->decode_fps[id].frame_count = 0;
430 vdec_core->decode_fps[id].start_timestamp = 0;
431 vdec_core->decode_fps[id].last_timestamp = 0;
432 vdec_core->decode_fps[id].fps = 0;
433 }
434
vdec_fps_clearall(void)435 static void vdec_fps_clearall(void)
436 {
437 int i;
438
439 for (i = 0; i < MAX_INSTANCE_MUN; i++) {
440 vdec_core->decode_fps[i].frame_count = 0;
441 vdec_core->decode_fps[i].start_timestamp = 0;
442 vdec_core->decode_fps[i].last_timestamp = 0;
443 vdec_core->decode_fps[i].fps = 0;
444 }
445 }
446
vdec_fps_detec(int id)447 static void vdec_fps_detec(int id)
448 {
449 unsigned long flags;
450
451 if (fps_detection == 0)
452 return;
453
454 if (id >= MAX_INSTANCE_MUN)
455 return;
456
457 flags = vdec_fps_lock(vdec_core);
458
459 if (fps_clear == 1) {
460 vdec_fps_clearall();
461 fps_clear = 0;
462 }
463
464 vdec_core->decode_fps[id].frame_count++;
465 if (vdec_core->decode_fps[id].frame_count == 1) {
466 vdec_core->decode_fps[id].start_timestamp =
467 vdec_get_us_time_system();
468 vdec_core->decode_fps[id].last_timestamp =
469 vdec_core->decode_fps[id].start_timestamp;
470 } else {
471 vdec_core->decode_fps[id].last_timestamp =
472 vdec_get_us_time_system();
473 vdec_core->decode_fps[id].fps =
474 (u32)div_u64(((u64)(vdec_core->decode_fps[id].frame_count) *
475 10000000000),
476 (vdec_core->decode_fps[id].last_timestamp -
477 vdec_core->decode_fps[id].start_timestamp));
478 }
479 vdec_fps_unlock(vdec_core, flags);
480 }
481
482
483
get_canvas(unsigned int index,unsigned int base)484 static int get_canvas(unsigned int index, unsigned int base)
485 {
486 int start;
487 int canvas_index = index * base;
488 int ret;
489
490 if ((base > 4) || (base == 0))
491 return -1;
492
493 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
494 <= AMVDEC_CANVAS_MAX1) {
495 start = AMVDEC_CANVAS_START_INDEX + base * index;
496 } else {
497 canvas_index -= (AMVDEC_CANVAS_MAX1 -
498 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
499 if (canvas_index <= AMVDEC_CANVAS_MAX2)
500 start = canvas_index / base;
501 else
502 return -1;
503 }
504
505 if (base == 1) {
506 ret = start;
507 } else if (base == 2) {
508 ret = ((start + 1) << 16) | ((start + 1) << 8) | start;
509 } else if (base == 3) {
510 ret = ((start + 2) << 16) | ((start + 1) << 8) | start;
511 } else if (base == 4) {
512 ret = (((start + 3) << 24) | (start + 2) << 16) |
513 ((start + 1) << 8) | start;
514 }
515
516 return ret;
517 }
518
get_canvas_ex(int type,int id)519 static int get_canvas_ex(int type, int id)
520 {
521 int i;
522 unsigned long flags;
523
524 flags = vdec_canvas_lock(vdec_core);
525
526 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
527 /*0x10-0x15 has been used by rdma*/
528 if ((i >= 0x10) && (i <= 0x15))
529 continue;
530 if ((canvas_stat[i].type == type) &&
531 (canvas_stat[i].id & (1 << id)) == 0) {
532 canvas_stat[i].canvas_used_flag++;
533 canvas_stat[i].id |= (1 << id);
534 if (debug & 4)
535 pr_debug("get used canvas %d\n", i);
536 vdec_canvas_unlock(vdec_core, flags);
537 if (i < AMVDEC_CANVAS_MAX2 + 1)
538 return i;
539 else
540 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
541 }
542 }
543
544 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
545 /*0x10-0x15 has been used by rdma*/
546 if ((i >= 0x10) && (i <= 0x15))
547 continue;
548 if (canvas_stat[i].type == 0) {
549 canvas_stat[i].type = type;
550 canvas_stat[i].canvas_used_flag = 1;
551 canvas_stat[i].id = (1 << id);
552 if (debug & 4) {
553 pr_debug("get canvas %d\n", i);
554 pr_debug("canvas_used_flag %d\n",
555 canvas_stat[i].canvas_used_flag);
556 pr_debug("canvas_stat[i].id %d\n",
557 canvas_stat[i].id);
558 }
559 vdec_canvas_unlock(vdec_core, flags);
560 if (i < AMVDEC_CANVAS_MAX2 + 1)
561 return i;
562 else
563 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
564 }
565 }
566 vdec_canvas_unlock(vdec_core, flags);
567
568 pr_info("cannot get canvas\n");
569
570 return -1;
571 }
572
free_canvas_ex(int index,int id)573 static void free_canvas_ex(int index, int id)
574 {
575 unsigned long flags;
576 int offset;
577
578 flags = vdec_canvas_lock(vdec_core);
579 if (index >= 0 &&
580 index < AMVDEC_CANVAS_MAX2 + 1)
581 offset = index;
582 else if ((index >= AMVDEC_CANVAS_START_INDEX) &&
583 (index <= AMVDEC_CANVAS_MAX1))
584 offset = index + AMVDEC_CANVAS_MAX2 + 1 - AMVDEC_CANVAS_START_INDEX;
585 else {
586 vdec_canvas_unlock(vdec_core, flags);
587 return;
588 }
589
590 if ((canvas_stat[offset].canvas_used_flag > 0) &&
591 (canvas_stat[offset].id & (1 << id))) {
592 canvas_stat[offset].canvas_used_flag--;
593 canvas_stat[offset].id &= ~(1 << id);
594 if (canvas_stat[offset].canvas_used_flag == 0) {
595 canvas_stat[offset].type = 0;
596 canvas_stat[offset].id = 0;
597 }
598 if (debug & 4) {
599 pr_debug("free index %d used_flag %d, type = %d, id = %d\n",
600 offset,
601 canvas_stat[offset].canvas_used_flag,
602 canvas_stat[offset].type,
603 canvas_stat[offset].id);
604 }
605 }
606 vdec_canvas_unlock(vdec_core, flags);
607
608 return;
609
610 }
611
vdec_dmc_pipeline_reset(void)612 static void vdec_dmc_pipeline_reset(void)
613 {
614 /*
615 * bit15: vdec_piple
616 * bit14: hevc_dmc_piple
617 * bit13: hevcf_dmc_pipl
618 * bit12: wave420_dmc_pipl
619 * bit11: hcodec_dmc_pipl
620 */
621
622 WRITE_RESET_REG(RESET7_REGISTER,
623 (1 << 15) | (1 << 14) | (1 << 13) |
624 (1 << 12) | (1 << 11));
625 }
626
vdec_stop_armrisc(int hw)627 static void vdec_stop_armrisc(int hw)
628 {
629 ulong timeout = jiffies + HZ;
630
631 if (hw == VDEC_INPUT_TARGET_VLD) {
632 WRITE_VREG(MPSR, 0);
633 WRITE_VREG(CPSR, 0);
634
635 while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) {
636 if (time_after(jiffies, timeout))
637 break;
638 }
639
640 timeout = jiffies + HZ;
641 while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) {
642 if (time_after(jiffies, timeout))
643 break;
644 }
645 } else if (hw == VDEC_INPUT_TARGET_HEVC) {
646 WRITE_VREG(HEVC_MPSR, 0);
647 WRITE_VREG(HEVC_CPSR, 0);
648
649 while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) {
650 if (time_after(jiffies, timeout))
651 break;
652 }
653
654 timeout = jiffies + HZ/10;
655 while (READ_VREG(HEVC_LMEM_DMA_CTRL) & 0x8000) {
656 if (time_after(jiffies, timeout))
657 break;
658 }
659 }
660 }
661
vdec_disable_DMC(struct vdec_s * vdec)662 static void vdec_disable_DMC(struct vdec_s *vdec)
663 {
664 /*close first,then wait pedding end,timing suggestion from vlsi*/
665 struct vdec_input_s *input = &vdec->input;
666 unsigned long flags;
667 unsigned int mask = 0;
668
669 if (input->target == VDEC_INPUT_TARGET_VLD) {
670 mask = (1 << 13);
671 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
672 mask = (1 << 21);
673 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
674 mask = (1 << 4); /*hevc*/
675 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
676 mask |= (1 << 8); /*hevcb */
677 }
678
679 /* need to stop armrisc. */
680 if (!IS_ERR_OR_NULL(vdec->dev))
681 vdec_stop_armrisc(input->target);
682
683 spin_lock_irqsave(&vdec_spin_lock, flags);
684 codec_dmcbus_write(DMC_REQ_CTRL,
685 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
686 spin_unlock_irqrestore(&vdec_spin_lock, flags);
687
688 if (is_cpu_tm2_revb()) {
689 while (!(codec_dmcbus_read(TM2_REVB_DMC_CHAN_STS)
690 & mask))
691 ;
692 } else {
693 while (!(codec_dmcbus_read(DMC_CHAN_STS)
694 & mask))
695 ;
696 }
697
698 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
699 }
700
vdec_enable_DMC(struct vdec_s * vdec)701 static void vdec_enable_DMC(struct vdec_s *vdec)
702 {
703 struct vdec_input_s *input = &vdec->input;
704 unsigned long flags;
705 unsigned int mask = 0;
706
707 if (input->target == VDEC_INPUT_TARGET_VLD) {
708 mask = (1 << 13);
709 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
710 mask = (1 << 21);
711 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
712 mask = (1 << 4); /*hevc*/
713 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
714 mask |= (1 << 8); /*hevcb */
715 }
716
717 /*must to be reset the dmc pipeline if it's g12b.*/
718 if (get_cpu_type() == AM_MESON_CPU_MAJOR_ID_G12B)
719 vdec_dmc_pipeline_reset();
720
721 spin_lock_irqsave(&vdec_spin_lock, flags);
722 codec_dmcbus_write(DMC_REQ_CTRL,
723 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
724 spin_unlock_irqrestore(&vdec_spin_lock, flags);
725 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
726 }
727
728
729
vdec_get_hw_type(int value)730 static int vdec_get_hw_type(int value)
731 {
732 int type;
733 switch (value) {
734 case VFORMAT_HEVC:
735 case VFORMAT_VP9:
736 case VFORMAT_AVS2:
737 case VFORMAT_AV1:
738 type = CORE_MASK_HEVC;
739 break;
740
741 case VFORMAT_MPEG12:
742 case VFORMAT_MPEG4:
743 case VFORMAT_H264:
744 case VFORMAT_MJPEG:
745 case VFORMAT_REAL:
746 case VFORMAT_JPEG:
747 case VFORMAT_VC1:
748 case VFORMAT_AVS:
749 case VFORMAT_YUV:
750 case VFORMAT_H264MVC:
751 case VFORMAT_H264_4K2K:
752 case VFORMAT_H264_ENC:
753 case VFORMAT_JPEG_ENC:
754 type = CORE_MASK_VDEC_1;
755 break;
756
757 default:
758 type = -1;
759 }
760
761 return type;
762 }
763
764
vdec_save_active_hw(struct vdec_s * vdec)765 static void vdec_save_active_hw(struct vdec_s *vdec)
766 {
767 int type;
768
769 type = vdec_get_hw_type(vdec->port->vformat);
770
771 if (type == CORE_MASK_HEVC) {
772 vdec_core->active_hevc = vdec;
773 } else if (type == CORE_MASK_VDEC_1) {
774 vdec_core->active_vdec = vdec;
775 } else {
776 pr_info("save_active_fw wrong\n");
777 }
778 }
779
vdec_update_buff_status(void)780 static void vdec_update_buff_status(void)
781 {
782 struct vdec_core_s *core = vdec_core;
783 unsigned long flags;
784 struct vdec_s *vdec;
785
786 flags = vdec_inputbuff_lock(core);
787 core->buff_flag = 0;
788 core->stream_buff_flag = 0;
789 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
790 struct vdec_input_s *input = &vdec->input;
791 if (input_frame_based(input)) {
792 if (input->have_frame_num || input->eos)
793 core->buff_flag |= vdec->core_mask;
794 } else if (input_stream_based(input)) {
795 core->stream_buff_flag |= vdec->core_mask;
796 }
797 }
798 vdec_inputbuff_unlock(core, flags);
799 }
800
801 #if 0
802 void vdec_update_streambuff_status(void)
803 {
804 struct vdec_core_s *core = vdec_core;
805 struct vdec_s *vdec;
806
807 /* check streaming prepare level threshold if not EOS */
808 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
809 struct vdec_input_s *input = &vdec->input;
810 if (input && input_stream_based(input) && !input->eos &&
811 (vdec->need_more_data & VDEC_NEED_MORE_DATA)) {
812 u32 rp, wp, level;
813
814 rp = STBUF_READ(&vdec->vbuf, get_rp);
815 wp = STBUF_READ(&vdec->vbuf, get_wp);
816 if (wp < rp)
817 level = input->size + wp - rp;
818 else
819 level = wp - rp;
820 if ((level < input->prepare_level) &&
821 (pts_get_rec_num(PTS_TYPE_VIDEO,
822 vdec->input.total_rd_count) < 2)) {
823 break;
824 } else if (level > input->prepare_level) {
825 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
826 if (debug & 8)
827 pr_info("vdec_flush_streambuff_status up\n");
828 vdec_up(vdec);
829 }
830 break;
831 }
832 }
833 }
834 EXPORT_SYMBOL(vdec_update_streambuff_status);
835 #endif
836
vdec_status(struct vdec_s * vdec,struct vdec_info * vstatus)837 int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
838 {
839 if (vdec && vdec->dec_status &&
840 ((vdec->status == VDEC_STATUS_CONNECTED ||
841 vdec->status == VDEC_STATUS_ACTIVE)))
842 return vdec->dec_status(vdec, vstatus);
843
844 return 0;
845 }
846 EXPORT_SYMBOL(vdec_status);
847
vdec_set_trickmode(struct vdec_s * vdec,unsigned long trickmode)848 int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
849 {
850 int r;
851 if (vdec->set_trickmode) {
852 r = vdec->set_trickmode(vdec, trickmode);
853
854 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
855 r = vdec->slave->set_trickmode(vdec->slave,
856 trickmode);
857 return r;
858 }
859 return -1;
860 }
861 EXPORT_SYMBOL(vdec_set_trickmode);
862
vdec_set_isreset(struct vdec_s * vdec,int isreset)863 int vdec_set_isreset(struct vdec_s *vdec, int isreset)
864 {
865 vdec->is_reset = isreset;
866 pr_info("is_reset=%d\n", isreset);
867 if (vdec->set_isreset)
868 return vdec->set_isreset(vdec, isreset);
869 return 0;
870 }
871 EXPORT_SYMBOL(vdec_set_isreset);
872
vdec_set_dv_metawithel(struct vdec_s * vdec,int isdvmetawithel)873 int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
874 {
875 vdec->dolby_meta_with_el = isdvmetawithel;
876 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
877 return 0;
878 }
879 EXPORT_SYMBOL(vdec_set_dv_metawithel);
880
vdec_set_no_powerdown(int flag)881 void vdec_set_no_powerdown(int flag)
882 {
883 no_powerdown = flag;
884 pr_info("no_powerdown=%d\n", no_powerdown);
885 return;
886 }
887 EXPORT_SYMBOL(vdec_set_no_powerdown);
888
vdec_count_info(struct vdec_info * vs,unsigned int err,unsigned int offset)889 void vdec_count_info(struct vdec_info *vs, unsigned int err,
890 unsigned int offset)
891 {
892 if (err)
893 vs->error_frame_count++;
894 if (offset) {
895 if (0 == vs->frame_count) {
896 vs->offset = 0;
897 vs->samp_cnt = 0;
898 }
899 vs->frame_data = offset > vs->total_data ?
900 offset - vs->total_data : vs->total_data - offset;
901 vs->total_data = offset;
902 if (vs->samp_cnt < 96000 * 2) { /* 2s */
903 if (0 == vs->samp_cnt)
904 vs->offset = offset;
905 vs->samp_cnt += vs->frame_dur;
906 } else {
907 vs->bit_rate = (offset - vs->offset) / 2;
908 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
909 vs->samp_cnt = 0;
910 }
911 vs->frame_count++;
912 }
913 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
914 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
915 return;
916 }
917 EXPORT_SYMBOL(vdec_count_info);
vdec_is_support_4k(void)918 int vdec_is_support_4k(void)
919 {
920 return !is_meson_gxl_package_805X();
921 }
922 EXPORT_SYMBOL(vdec_is_support_4k);
923
924 /*
925 * clk_config:
926 *0:default
927 *1:no gp0_pll;
928 *2:always used gp0_pll;
929 *>=10:fixed n M clk;
930 *== 100 , 100M clks;
931 */
get_vdec_clk_config_settings(void)932 unsigned int get_vdec_clk_config_settings(void)
933 {
934 return clk_config;
935 }
update_vdec_clk_config_settings(unsigned int config)936 void update_vdec_clk_config_settings(unsigned int config)
937 {
938 clk_config = config;
939 }
940 EXPORT_SYMBOL(update_vdec_clk_config_settings);
941
hevc_workaround_needed(void)942 static bool hevc_workaround_needed(void)
943 {
944 return (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) &&
945 (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR)
946 == GXBB_REV_A_MINOR);
947 }
948
get_codec_cma_device(void)949 struct device *get_codec_cma_device(void)
950 {
951 return vdec_core->cma_dev;
952 }
953
954 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
955 static const char * const vdec_device_name[] = {
956 "amvdec_mpeg12", "ammvdec_mpeg12",
957 "amvdec_mpeg4", "ammvdec_mpeg4",
958 "amvdec_h264", "ammvdec_h264",
959 "amvdec_mjpeg", "ammvdec_mjpeg",
960 "amvdec_real", "ammvdec_real",
961 "amjpegdec", "ammjpegdec",
962 "amvdec_vc1", "ammvdec_vc1",
963 "amvdec_avs", "ammvdec_avs",
964 "amvdec_yuv", "ammvdec_yuv",
965 "amvdec_h264mvc", "ammvdec_h264mvc",
966 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
967 "amvdec_h265", "ammvdec_h265",
968 "amvenc_avc", "amvenc_avc",
969 "jpegenc", "jpegenc",
970 "amvdec_vp9", "ammvdec_vp9",
971 "amvdec_avs2", "ammvdec_avs2",
972 "amvdec_av1", "ammvdec_av1",
973 };
974
975
976 #else
977
978 static const char * const vdec_device_name[] = {
979 "amvdec_mpeg12",
980 "amvdec_mpeg4",
981 "amvdec_h264",
982 "amvdec_mjpeg",
983 "amvdec_real",
984 "amjpegdec",
985 "amvdec_vc1",
986 "amvdec_avs",
987 "amvdec_yuv",
988 "amvdec_h264mvc",
989 "amvdec_h264_4k2k",
990 "amvdec_h265",
991 "amvenc_avc",
992 "jpegenc",
993 "amvdec_vp9",
994 "amvdec_avs2",
995 "amvdec_av1"
996 };
997
998 #endif
999
1000 /*
1001 * Only support time sliced decoding for frame based input,
1002 * so legacy decoder can exist with time sliced decoder.
1003 */
get_dev_name(bool use_legacy_vdec,int format)1004 static const char *get_dev_name(bool use_legacy_vdec, int format)
1005 {
1006 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1007 if (use_legacy_vdec && (debugflags & 0x8) == 0)
1008 return vdec_device_name[format * 2];
1009 else
1010 return vdec_device_name[format * 2 + 1];
1011 #else
1012 return vdec_device_name[format];
1013 #endif
1014 }
1015
1016 #ifdef VDEC_DEBUG_SUPPORT
get_current_clk(void)1017 static u64 get_current_clk(void)
1018 {
1019 /*struct timespec xtime = current_kernel_time();
1020 u64 usec = xtime.tv_sec * 1000000;
1021 usec += xtime.tv_nsec / 1000;
1022 */
1023 u64 usec = sched_clock();
1024 return usec;
1025 }
1026
inc_profi_count(unsigned long mask,u32 * count)1027 static void inc_profi_count(unsigned long mask, u32 *count)
1028 {
1029 enum vdec_type_e type;
1030
1031 for (type = VDEC_1; type < VDEC_MAX; type++) {
1032 if (mask & (1 << type))
1033 count[type]++;
1034 }
1035 }
1036
update_profi_clk_run(struct vdec_s * vdec,unsigned long mask,u64 clk)1037 static void update_profi_clk_run(struct vdec_s *vdec,
1038 unsigned long mask, u64 clk)
1039 {
1040 enum vdec_type_e type;
1041
1042 for (type = VDEC_1; type < VDEC_MAX; type++) {
1043 if (mask & (1 << type)) {
1044 vdec->start_run_clk[type] = clk;
1045 if (vdec->profile_start_clk[type] == 0)
1046 vdec->profile_start_clk[type] = clk;
1047 vdec->total_clk[type] = clk
1048 - vdec->profile_start_clk[type];
1049 /*pr_info("set start_run_clk %ld\n",
1050 vdec->start_run_clk);*/
1051
1052 }
1053 }
1054 }
1055
update_profi_clk_stop(struct vdec_s * vdec,unsigned long mask,u64 clk)1056 static void update_profi_clk_stop(struct vdec_s *vdec,
1057 unsigned long mask, u64 clk)
1058 {
1059 enum vdec_type_e type;
1060
1061 for (type = VDEC_1; type < VDEC_MAX; type++) {
1062 if (mask & (1 << type)) {
1063 if (vdec->start_run_clk[type] == 0)
1064 pr_info("error, start_run_clk[%d] not set\n", type);
1065
1066 /*pr_info("update run_clk type %d, %ld, %ld, %ld\n",
1067 type,
1068 clk,
1069 vdec->start_run_clk[type],
1070 vdec->run_clk[type]);*/
1071 vdec->run_clk[type] +=
1072 (clk - vdec->start_run_clk[type]);
1073 }
1074 }
1075 }
1076
1077 #endif
1078
vdec_set_decinfo(struct vdec_s * vdec,struct dec_sysinfo * p)1079 int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
1080 {
1081 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
1082 sizeof(struct dec_sysinfo)))
1083 return -EFAULT;
1084
1085 /* force switch to mult instance if supports this profile. */
1086 if ((vdec->type == VDEC_TYPE_SINGLE) &&
1087 !disable_switch_single_to_mult) {
1088 const char *str = NULL;
1089 char fmt[16] = {0};
1090
1091 str = strchr(get_dev_name(false, vdec->format), '_');
1092 if (!str)
1093 return -1;
1094
1095 sprintf(fmt, "m%s", ++str);
1096 if (is_support_profile(fmt) &&
1097 vdec->sys_info->format != VIDEO_DEC_FORMAT_H263 &&
1098 vdec->format != VFORMAT_AV1)
1099 vdec->type = VDEC_TYPE_STREAM_PARSER;
1100 }
1101
1102 return 0;
1103 }
1104 EXPORT_SYMBOL(vdec_set_decinfo);
1105
1106 /* construct vdec strcture */
vdec_create(struct stream_port_s * port,struct vdec_s * master)1107 struct vdec_s *vdec_create(struct stream_port_s *port,
1108 struct vdec_s *master)
1109 {
1110 struct vdec_s *vdec;
1111 int type = VDEC_TYPE_SINGLE;
1112 int id;
1113
1114 if (is_mult_inc(port->type))
1115 type = (port->type & PORT_TYPE_FRAME) ?
1116 VDEC_TYPE_FRAME_BLOCK :
1117 VDEC_TYPE_STREAM_PARSER;
1118
1119 id = ida_simple_get(&vdec_core->ida,
1120 0, MAX_INSTANCE_MUN, GFP_KERNEL);
1121 if (id < 0) {
1122 pr_info("vdec_create request id failed!ret =%d\n", id);
1123 return NULL;
1124 }
1125 vdec = vzalloc(sizeof(struct vdec_s));
1126
1127 /* TBD */
1128 if (vdec) {
1129 vdec->magic = 0x43454456;
1130 vdec->id = -1;
1131 vdec->type = type;
1132 vdec->port = port;
1133 vdec->sys_info = &vdec->sys_info_store;
1134
1135 INIT_LIST_HEAD(&vdec->list);
1136
1137 atomic_inc(&vdec_core->vdec_nr);
1138 #ifdef CONFIG_AMLOGIC_V4L_VIDEO3
1139 v4lvideo_dec_count_increase();
1140 #endif
1141 vdec->id = id;
1142 vdec_input_init(&vdec->input, vdec);
1143 vdec->input.vdec_is_input_frame_empty = vdec_is_input_frame_empty;
1144 vdec->input.vdec_up = vdec_up;
1145 if (master) {
1146 vdec->master = master;
1147 master->slave = vdec;
1148 master->sched = 1;
1149 }
1150 if (enable_mvdec_info) {
1151 vdec->mvfrm = (struct vdec_frames_s *)
1152 vzalloc(sizeof(struct vdec_frames_s));
1153 if (!vdec->mvfrm)
1154 pr_err("vzalloc: vdec_frames_s failed\n");
1155 }
1156 }
1157
1158 pr_debug("vdec_create instance %p, total %d\n", vdec,
1159 atomic_read(&vdec_core->vdec_nr));
1160
1161 //trace_vdec_create(vdec); /*DEBUG_TMP*/
1162
1163 return vdec;
1164 }
1165 EXPORT_SYMBOL(vdec_create);
1166
vdec_set_format(struct vdec_s * vdec,int format)1167 int vdec_set_format(struct vdec_s *vdec, int format)
1168 {
1169 vdec->format = format;
1170 vdec->port_flag |= PORT_FLAG_VFORMAT;
1171
1172 if (vdec->slave) {
1173 vdec->slave->format = format;
1174 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
1175 }
1176 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
1177
1178 return 0;
1179 }
1180 EXPORT_SYMBOL(vdec_set_format);
1181
vdec_set_pts(struct vdec_s * vdec,u32 pts)1182 int vdec_set_pts(struct vdec_s *vdec, u32 pts)
1183 {
1184 vdec->pts = pts;
1185 vdec->pts64 = div64_u64((u64)pts * 100, 9);
1186 vdec->pts_valid = true;
1187 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
1188 return 0;
1189 }
1190 EXPORT_SYMBOL(vdec_set_pts);
1191
vdec_set_timestamp(struct vdec_s * vdec,u64 timestamp)1192 void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp)
1193 {
1194 vdec->timestamp = timestamp;
1195 vdec->timestamp_valid = true;
1196 }
1197 EXPORT_SYMBOL(vdec_set_timestamp);
1198
vdec_set_pts64(struct vdec_s * vdec,u64 pts64)1199 int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
1200 {
1201 vdec->pts64 = pts64;
1202 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
1203 vdec->pts_valid = true;
1204
1205 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
1206 return 0;
1207 }
1208 EXPORT_SYMBOL(vdec_set_pts64);
1209
vdec_get_status(struct vdec_s * vdec)1210 int vdec_get_status(struct vdec_s *vdec)
1211 {
1212 return vdec->status;
1213 }
1214 EXPORT_SYMBOL(vdec_get_status);
1215
vdec_get_frame_num(struct vdec_s * vdec)1216 int vdec_get_frame_num(struct vdec_s *vdec)
1217 {
1218 return vdec->input.have_frame_num;
1219 }
1220 EXPORT_SYMBOL(vdec_get_frame_num);
1221
vdec_set_status(struct vdec_s * vdec,int status)1222 void vdec_set_status(struct vdec_s *vdec, int status)
1223 {
1224 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
1225 vdec->status = status;
1226 }
1227 EXPORT_SYMBOL(vdec_set_status);
1228
vdec_set_next_status(struct vdec_s * vdec,int status)1229 void vdec_set_next_status(struct vdec_s *vdec, int status)
1230 {
1231 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
1232 vdec->next_status = status;
1233 }
1234 EXPORT_SYMBOL(vdec_set_next_status);
1235
vdec_set_video_path(struct vdec_s * vdec,int video_path)1236 int vdec_set_video_path(struct vdec_s *vdec, int video_path)
1237 {
1238 vdec->frame_base_video_path = video_path;
1239 return 0;
1240 }
1241 EXPORT_SYMBOL(vdec_set_video_path);
1242
vdec_set_receive_id(struct vdec_s * vdec,int receive_id)1243 int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
1244 {
1245 vdec->vf_receiver_inst = receive_id;
1246 return 0;
1247 }
1248 EXPORT_SYMBOL(vdec_set_receive_id);
1249
1250 /* add frame data to input chain */
vdec_write_vframe(struct vdec_s * vdec,const char * buf,size_t count)1251 int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
1252 {
1253 return vdec_input_add_frame(&vdec->input, buf, count);
1254 }
1255 EXPORT_SYMBOL(vdec_write_vframe);
1256
vdec_write_vframe_with_dma(struct vdec_s * vdec,ulong addr,size_t count,u32 handle)1257 int vdec_write_vframe_with_dma(struct vdec_s *vdec,
1258 ulong addr, size_t count, u32 handle)
1259 {
1260 return vdec_input_add_frame_with_dma(&vdec->input, addr, count, handle);
1261 }
1262 EXPORT_SYMBOL(vdec_write_vframe_with_dma);
1263
1264 /* add a work queue thread for vdec*/
vdec_schedule_work(struct work_struct * work)1265 void vdec_schedule_work(struct work_struct *work)
1266 {
1267 if (vdec_core->vdec_core_wq)
1268 queue_work(vdec_core->vdec_core_wq, work);
1269 else
1270 schedule_work(work);
1271 }
1272 EXPORT_SYMBOL(vdec_schedule_work);
1273
vdec_get_associate(struct vdec_s * vdec)1274 static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
1275 {
1276 if (vdec->master)
1277 return vdec->master;
1278 else if (vdec->slave)
1279 return vdec->slave;
1280 return NULL;
1281 }
1282
vdec_sync_input_read(struct vdec_s * vdec)1283 static void vdec_sync_input_read(struct vdec_s *vdec)
1284 {
1285 if (!vdec_stream_based(vdec))
1286 return;
1287
1288 if (vdec_dual(vdec)) {
1289 u32 me, other;
1290 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1291 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1292 other =
1293 vdec_get_associate(vdec)->input.stream_cookie;
1294 if (me > other)
1295 return;
1296 else if (me == other) {
1297 me = READ_VREG(VLD_MEM_VIFIFO_RP);
1298 other =
1299 vdec_get_associate(vdec)->input.swap_rp;
1300 if (me > other) {
1301 STBUF_WRITE(&vdec->vbuf, set_rp,
1302 vdec_get_associate(vdec)->input.swap_rp);
1303 return;
1304 }
1305 }
1306
1307 STBUF_WRITE(&vdec->vbuf, set_rp,
1308 READ_VREG(VLD_MEM_VIFIFO_RP));
1309 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1310 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1311 if (((me & 0x80000000) == 0) &&
1312 (vdec->input.streaming_rp & 0x80000000))
1313 me += 1ULL << 32;
1314 other = vdec_get_associate(vdec)->input.streaming_rp;
1315 if (me > other) {
1316 STBUF_WRITE(&vdec->vbuf, set_rp,
1317 vdec_get_associate(vdec)->input.swap_rp);
1318 return;
1319 }
1320
1321 STBUF_WRITE(&vdec->vbuf, set_rp,
1322 READ_VREG(HEVC_STREAM_RD_PTR));
1323 }
1324 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1325 STBUF_WRITE(&vdec->vbuf, set_rp,
1326 READ_VREG(VLD_MEM_VIFIFO_RP));
1327 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1328 STBUF_WRITE(&vdec->vbuf, set_rp,
1329 READ_VREG(HEVC_STREAM_RD_PTR));
1330 }
1331 }
1332
vdec_sync_input_write(struct vdec_s * vdec)1333 static void vdec_sync_input_write(struct vdec_s *vdec)
1334 {
1335 if (!vdec_stream_based(vdec))
1336 return;
1337
1338 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1339 if (enable_stream_mode_multi_dec) {
1340 if (!vdec->master) {
1341 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1342 STBUF_READ(&vdec->vbuf, get_wp));
1343 } else {
1344 STBUF_WRITE(&vdec->vbuf, set_wp,
1345 STBUF_READ(&vdec->master->vbuf, get_wp));
1346 }
1347 } else {
1348 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1349 STBUF_READ(&vdec->vbuf, get_wp));
1350 }
1351 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1352 if (enable_stream_mode_multi_dec) {
1353 if (!vdec->master) {
1354 WRITE_VREG(HEVC_STREAM_WR_PTR,
1355 STBUF_READ(&vdec->vbuf, get_wp));
1356 } else {
1357 STBUF_WRITE(&vdec->vbuf, set_wp,
1358 STBUF_READ(&vdec->master->vbuf, get_wp));
1359 }
1360 } else {
1361 WRITE_VREG(HEVC_STREAM_WR_PTR,
1362 STBUF_READ(&vdec->vbuf, get_wp));
1363 }
1364 }
1365 }
1366
1367 /*
1368 *get next frame from input chain
1369 */
1370 /*
1371 *THE VLD_FIFO is 512 bytes and Video buffer level
1372 * empty interrupt is set to 0x80 bytes threshold
1373 */
1374 #define VLD_PADDING_SIZE 1024
1375 #define HEVC_PADDING_SIZE (1024*16)
vdec_prepare_input(struct vdec_s * vdec,struct vframe_chunk_s ** p)1376 int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
1377 {
1378 struct vdec_input_s *input = &vdec->input;
1379 struct vframe_chunk_s *chunk = NULL;
1380 struct vframe_block_list_s *block = NULL;
1381 int dummy;
1382
1383 /* full reset to HW input */
1384 if (input->target == VDEC_INPUT_TARGET_VLD) {
1385 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1386
1387 /* reset VLD fifo for all vdec */
1388 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
1389 WRITE_VREG(DOS_SW_RESET0, 0);
1390
1391 dummy = READ_RESET_REG(RESET0_REGISTER);
1392 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
1393 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1394 #if 0
1395 /*move to driver*/
1396 if (input_frame_based(input))
1397 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
1398
1399 /*
1400 * 2: assist
1401 * 3: parser
1402 * 4: parser_state
1403 * 8: dblk
1404 * 11:mcpu
1405 * 12:ccpu
1406 * 13:ddr
1407 * 14:iqit
1408 * 15:ipp
1409 * 17:qdct
1410 * 18:mpred
1411 * 19:sao
1412 * 24:hevc_afifo
1413 */
1414 WRITE_VREG(DOS_SW_RESET3,
1415 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
1416 (1<<17)|(1<<18)|(1<<19));
1417 WRITE_VREG(DOS_SW_RESET3, 0);
1418 #endif
1419 }
1420
1421 /*
1422 *setup HW decoder input buffer (VLD context)
1423 * based on input->type and input->target
1424 */
1425 if (input_frame_based(input)) {
1426 chunk = vdec_input_next_chunk(&vdec->input);
1427
1428 if (chunk == NULL) {
1429 *p = NULL;
1430 return -1;
1431 }
1432
1433 block = chunk->block;
1434
1435 if (input->target == VDEC_INPUT_TARGET_VLD) {
1436 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
1437 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
1438 block->size - 8);
1439 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1440 round_down(block->start + chunk->offset,
1441 VDEC_FIFO_ALIGN));
1442
1443 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1444 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1445
1446 /* set to manual mode */
1447 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1448 WRITE_VREG(VLD_MEM_VIFIFO_RP,
1449 round_down(block->start + chunk->offset,
1450 VDEC_FIFO_ALIGN));
1451 dummy = chunk->offset + chunk->size +
1452 VLD_PADDING_SIZE;
1453 if (dummy >= block->size)
1454 dummy -= block->size;
1455 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1456 round_down(block->start + dummy,
1457 VDEC_FIFO_ALIGN));
1458
1459 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
1460 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1461
1462 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1463 (0x11 << 16) | (1<<10) | (7<<3));
1464
1465 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1466 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
1467 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
1468 block->size);
1469 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
1470 chunk->offset);
1471 dummy = chunk->offset + chunk->size +
1472 HEVC_PADDING_SIZE;
1473 if (dummy >= block->size)
1474 dummy -= block->size;
1475 WRITE_VREG(HEVC_STREAM_WR_PTR,
1476 round_down(block->start + dummy,
1477 VDEC_FIFO_ALIGN));
1478
1479 /* set endian */
1480 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1481 }
1482
1483 *p = chunk;
1484 return chunk->size;
1485
1486 } else {
1487 /* stream based */
1488 u32 rp = 0, wp = 0, fifo_len = 0;
1489 int size;
1490 bool swap_valid = input->swap_valid;
1491 unsigned long swap_page_phys = input->swap_page_phys;
1492
1493 if (vdec_dual(vdec) &&
1494 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
1495 /* keep using previous input context */
1496 struct vdec_s *master = (vdec->slave) ?
1497 vdec : vdec->master;
1498 if (master->input.last_swap_slave) {
1499 swap_valid = master->slave->input.swap_valid;
1500 swap_page_phys =
1501 master->slave->input.swap_page_phys;
1502 } else {
1503 swap_valid = master->input.swap_valid;
1504 swap_page_phys = master->input.swap_page_phys;
1505 }
1506 }
1507
1508 if (swap_valid) {
1509 if (input->target == VDEC_INPUT_TARGET_VLD) {
1510 if (vdec->format == VFORMAT_H264)
1511 SET_VREG_MASK(POWER_CTL_VLD,
1512 (1 << 9));
1513
1514 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1515
1516 /* restore read side */
1517 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1518 swap_page_phys);
1519 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1520
1521 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1522 ;
1523 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1524
1525 /* restore wrap count */
1526 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
1527 input->stream_cookie);
1528
1529 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1530 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
1531
1532 /* enable */
1533 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1534 (0x11 << 16) | (1<<10));
1535
1536 if (vdec->vbuf.no_parser)
1537 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL,
1538 7 << 3);
1539
1540 /* sync with front end */
1541 vdec_sync_input_read(vdec);
1542 vdec_sync_input_write(vdec);
1543
1544 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1545 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1546 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1547
1548 /* restore read side */
1549 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1550 swap_page_phys);
1551 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1552
1553 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1554 & (1<<7))
1555 ;
1556 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1557
1558 /* restore stream offset */
1559 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
1560 input->stream_cookie);
1561
1562 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1563 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1564 >> 16) & 0x7f;
1565
1566
1567 /* enable */
1568
1569 /* sync with front end */
1570 vdec_sync_input_read(vdec);
1571 vdec_sync_input_write(vdec);
1572
1573 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1574
1575 if (vdec->vbuf.no_parser)
1576 SET_VREG_MASK(HEVC_STREAM_CONTROL,
1577 7 << 4);
1578 /*pr_info("vdec: restore context\r\n");*/
1579 }
1580
1581 } else {
1582 if (input->target == VDEC_INPUT_TARGET_VLD) {
1583 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1584 input->start);
1585 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1586 input->start + input->size - 8);
1587 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1588 input->start);
1589
1590 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1591 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1592
1593 /* set to manual mode */
1594 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1595 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1596 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1597 STBUF_READ(&vdec->vbuf, get_wp));
1598 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1599
1600 /* enable */
1601 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1602 (0x11 << 16) | (1<<10));
1603 if (vdec->vbuf.no_parser)
1604 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL,
1605 7 << 3);
1606
1607 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1608
1609 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1610 WRITE_VREG(HEVC_STREAM_START_ADDR,
1611 input->start);
1612 WRITE_VREG(HEVC_STREAM_END_ADDR,
1613 input->start + input->size);
1614 WRITE_VREG(HEVC_STREAM_RD_PTR,
1615 input->start);
1616 WRITE_VREG(HEVC_STREAM_WR_PTR,
1617 STBUF_READ(&vdec->vbuf, get_wp));
1618 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1619 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1620 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1621 >> 16) & 0x7f;
1622 if (vdec->vbuf.no_parser)
1623 SET_VREG_MASK(HEVC_STREAM_CONTROL,
1624 7 << 4);
1625 /* enable */
1626 }
1627 }
1628 *p = NULL;
1629 if (wp >= rp)
1630 size = wp - rp + fifo_len;
1631 else
1632 size = wp + input->size - rp + fifo_len;
1633 if (size < 0) {
1634 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1635 __func__, input->size, wp, rp, fifo_len, size);
1636 size = 0;
1637 }
1638 return size;
1639 }
1640 }
1641 EXPORT_SYMBOL(vdec_prepare_input);
1642
vdec_enable_input(struct vdec_s * vdec)1643 void vdec_enable_input(struct vdec_s *vdec)
1644 {
1645 struct vdec_input_s *input = &vdec->input;
1646
1647 if (vdec->status != VDEC_STATUS_ACTIVE)
1648 return;
1649
1650 if (input->target == VDEC_INPUT_TARGET_VLD)
1651 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
1652 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1653 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1654 if (vdec_stream_based(vdec)) {
1655 if (vdec->vbuf.no_parser)
1656 /*set endian for non-parser mode. */
1657 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1658 else
1659 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1660 } else
1661 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1662
1663 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
1664 }
1665 }
1666 EXPORT_SYMBOL(vdec_enable_input);
1667
vdec_set_input_buffer(struct vdec_s * vdec,u32 start,u32 size)1668 int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
1669 {
1670 int r = vdec_input_set_buffer(&vdec->input, start, size);
1671
1672 if (r)
1673 return r;
1674
1675 if (vdec->slave)
1676 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
1677
1678 return r;
1679 }
1680 EXPORT_SYMBOL(vdec_set_input_buffer);
1681
1682 /*
1683 * vdec_eos returns the possibility that there are
1684 * more input can be used by decoder through vdec_prepare_input
1685 * Note: this function should be called prior to vdec_vframe_dirty
1686 * by decoder driver to determine if EOS happens for stream based
1687 * decoding when there is no sufficient data for a frame
1688 */
vdec_has_more_input(struct vdec_s * vdec)1689 bool vdec_has_more_input(struct vdec_s *vdec)
1690 {
1691 struct vdec_input_s *input = &vdec->input;
1692
1693 if (!input->eos)
1694 return true;
1695
1696 if (input_frame_based(input))
1697 return vdec_input_next_input_chunk(input) != NULL;
1698 else {
1699 if (input->target == VDEC_INPUT_TARGET_VLD)
1700 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
1701 STBUF_READ(&vdec->vbuf, get_wp);
1702 else {
1703 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
1704 (STBUF_READ(&vdec->vbuf, get_wp) & ~0x3);
1705 }
1706 }
1707 }
1708 EXPORT_SYMBOL(vdec_has_more_input);
1709
vdec_set_prepare_level(struct vdec_s * vdec,int level)1710 void vdec_set_prepare_level(struct vdec_s *vdec, int level)
1711 {
1712 vdec->input.prepare_level = level;
1713 }
1714 EXPORT_SYMBOL(vdec_set_prepare_level);
1715
vdec_set_flag(struct vdec_s * vdec,u32 flag)1716 void vdec_set_flag(struct vdec_s *vdec, u32 flag)
1717 {
1718 vdec->flag = flag;
1719 }
1720 EXPORT_SYMBOL(vdec_set_flag);
1721
vdec_set_eos(struct vdec_s * vdec,bool eos)1722 void vdec_set_eos(struct vdec_s *vdec, bool eos)
1723 {
1724 struct vdec_core_s *core = vdec_core;
1725
1726 vdec->input.eos = eos;
1727
1728 if (vdec->slave)
1729 vdec->slave->input.eos = eos;
1730 up(&core->sem);
1731 }
1732 EXPORT_SYMBOL(vdec_set_eos);
1733
1734 #ifdef VDEC_DEBUG_SUPPORT
vdec_set_step_mode(void)1735 void vdec_set_step_mode(void)
1736 {
1737 step_mode = 0x1ff;
1738 }
1739 EXPORT_SYMBOL(vdec_set_step_mode);
1740 #endif
1741
vdec_set_next_sched(struct vdec_s * vdec,struct vdec_s * next_vdec)1742 void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
1743 {
1744 if (vdec && next_vdec) {
1745 vdec->sched = 0;
1746 next_vdec->sched = 1;
1747 }
1748 }
1749 EXPORT_SYMBOL(vdec_set_next_sched);
1750
1751 /*
1752 * Swap Context: S0 S1 S2 S3 S4
1753 * Sample sequence: M S M M S
1754 * Master Context: S0 S0 S2 S3 S3
1755 * Slave context: NA S1 S1 S2 S4
1756 * ^
1757 * ^
1758 * ^
1759 * the tricky part
1760 * If there are back to back decoding of master or slave
1761 * then the context of the counter part should be updated
1762 * with current decoder. In this example, S1 should be
1763 * updated to S2.
1764 * This is done by swap the swap_page and related info
1765 * between two layers.
1766 */
vdec_borrow_input_context(struct vdec_s * vdec)1767 static void vdec_borrow_input_context(struct vdec_s *vdec)
1768 {
1769 struct page *swap_page;
1770 unsigned long swap_page_phys;
1771 struct vdec_input_s *me;
1772 struct vdec_input_s *other;
1773
1774 if (!vdec_dual(vdec))
1775 return;
1776
1777 me = &vdec->input;
1778 other = &vdec_get_associate(vdec)->input;
1779
1780 /* swap the swap_context, borrow counter part's
1781 * swap context storage and update all related info.
1782 * After vdec_vframe_dirty, vdec_save_input_context
1783 * will be called to update current vdec's
1784 * swap context
1785 */
1786 swap_page = other->swap_page;
1787 other->swap_page = me->swap_page;
1788 me->swap_page = swap_page;
1789
1790 swap_page_phys = other->swap_page_phys;
1791 other->swap_page_phys = me->swap_page_phys;
1792 me->swap_page_phys = swap_page_phys;
1793
1794 other->swap_rp = me->swap_rp;
1795 other->streaming_rp = me->streaming_rp;
1796 other->stream_cookie = me->stream_cookie;
1797 other->swap_valid = me->swap_valid;
1798 }
1799
vdec_vframe_dirty(struct vdec_s * vdec,struct vframe_chunk_s * chunk)1800 void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
1801 {
1802 if (chunk)
1803 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
1804
1805 if (vdec_stream_based(vdec)) {
1806 vdec->input.swap_needed = true;
1807
1808 if (vdec_dual(vdec)) {
1809 vdec_get_associate(vdec)->input.dirty_count = 0;
1810 vdec->input.dirty_count++;
1811 if (vdec->input.dirty_count > 1) {
1812 vdec->input.dirty_count = 1;
1813 vdec_borrow_input_context(vdec);
1814 }
1815 }
1816
1817 /* for stream based mode, we update read and write pointer
1818 * also in case decoder wants to keep working on decoding
1819 * for more frames while input front end has more data
1820 */
1821 vdec_sync_input_read(vdec);
1822 vdec_sync_input_write(vdec);
1823
1824 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1825 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
1826 }
1827 }
1828 EXPORT_SYMBOL(vdec_vframe_dirty);
1829
vdec_need_more_data(struct vdec_s * vdec)1830 bool vdec_need_more_data(struct vdec_s *vdec)
1831 {
1832 if (vdec_stream_based(vdec))
1833 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1834
1835 return false;
1836 }
1837 EXPORT_SYMBOL(vdec_need_more_data);
1838
1839
hevc_wait_ddr(void)1840 void hevc_wait_ddr(void)
1841 {
1842 unsigned long flags;
1843 unsigned int mask = 0;
1844
1845 mask = 1 << 4; /* hevc */
1846 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
1847 mask |= (1 << 8); /* hevcb */
1848
1849 spin_lock_irqsave(&vdec_spin_lock, flags);
1850 codec_dmcbus_write(DMC_REQ_CTRL,
1851 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
1852 spin_unlock_irqrestore(&vdec_spin_lock, flags);
1853
1854 if (is_cpu_tm2_revb()) {
1855 while (!(codec_dmcbus_read(TM2_REVB_DMC_CHAN_STS)
1856 & mask))
1857 ;
1858 } else {
1859 while (!(codec_dmcbus_read(DMC_CHAN_STS)
1860 & mask))
1861 ;
1862 }
1863 }
1864
vdec_save_input_context(struct vdec_s * vdec)1865 void vdec_save_input_context(struct vdec_s *vdec)
1866 {
1867 struct vdec_input_s *input = &vdec->input;
1868
1869 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1870 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1871 #endif
1872
1873 if (input->target == VDEC_INPUT_TARGET_VLD)
1874 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1875
1876 if (input_stream_based(input) && (input->swap_needed)) {
1877 if (input->target == VDEC_INPUT_TARGET_VLD) {
1878 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1879 input->swap_page_phys);
1880 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1881 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1882 ;
1883 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1884 vdec->input.stream_cookie =
1885 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1886 vdec->input.swap_rp =
1887 READ_VREG(VLD_MEM_VIFIFO_RP);
1888 vdec->input.total_rd_count =
1889 (u64)vdec->input.stream_cookie *
1890 vdec->input.size + vdec->input.swap_rp -
1891 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
1892 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1893 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1894 input->swap_page_phys);
1895 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1896
1897 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1898 ;
1899 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1900
1901 vdec->input.stream_cookie =
1902 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1903 vdec->input.swap_rp =
1904 READ_VREG(HEVC_STREAM_RD_PTR);
1905 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1906 (vdec->input.streaming_rp & 0x80000000))
1907 vdec->input.streaming_rp += 1ULL << 32;
1908 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1909 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1910 vdec->input.total_rd_count = vdec->input.streaming_rp;
1911 hevc_wait_ddr();
1912 }
1913
1914 input->swap_valid = true;
1915 input->swap_needed = false;
1916 /*pr_info("vdec: save context\r\n");*/
1917
1918 vdec_sync_input_read(vdec);
1919
1920 if (vdec_dual(vdec)) {
1921 struct vdec_s *master = (vdec->slave) ?
1922 vdec : vdec->master;
1923 master->input.last_swap_slave = (master->slave == vdec);
1924 /* pr_info("master->input.last_swap_slave = %d\n",
1925 master->input.last_swap_slave); */
1926 }
1927 }
1928 }
1929 EXPORT_SYMBOL(vdec_save_input_context);
1930
vdec_clean_input(struct vdec_s * vdec)1931 void vdec_clean_input(struct vdec_s *vdec)
1932 {
1933 struct vdec_input_s *input = &vdec->input;
1934
1935 while (!list_empty(&input->vframe_chunk_list)) {
1936 struct vframe_chunk_s *chunk =
1937 vdec_input_next_chunk(input);
1938 if (chunk && (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED))
1939 vdec_input_release_chunk(input, chunk);
1940 else
1941 break;
1942 }
1943 vdec_save_input_context(vdec);
1944 }
1945 EXPORT_SYMBOL(vdec_clean_input);
1946
1947
vdec_input_read_restore(struct vdec_s * vdec)1948 static int vdec_input_read_restore(struct vdec_s *vdec)
1949 {
1950 struct vdec_input_s *input = &vdec->input;
1951
1952 if (!vdec_stream_based(vdec))
1953 return 0;
1954
1955 if (!input->swap_valid) {
1956 if (input->target == VDEC_INPUT_TARGET_VLD) {
1957 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1958 input->start);
1959 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1960 input->start + input->size - 8);
1961 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1962 input->start);
1963 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1964 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1965
1966 /* set to manual mode */
1967 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1968 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1969 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1970 WRITE_VREG(HEVC_STREAM_START_ADDR,
1971 input->start);
1972 WRITE_VREG(HEVC_STREAM_END_ADDR,
1973 input->start + input->size);
1974 WRITE_VREG(HEVC_STREAM_RD_PTR,
1975 input->start);
1976 }
1977 return 0;
1978 }
1979 if (input->target == VDEC_INPUT_TARGET_VLD) {
1980 /* restore read side */
1981 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1982 input->swap_page_phys);
1983
1984 /*swap active*/
1985 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1986
1987 /*wait swap busy*/
1988 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1989 ;
1990
1991 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1992 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1993 /* restore read side */
1994 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1995 input->swap_page_phys);
1996 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1997
1998 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1999 & (1<<7))
2000 ;
2001 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
2002 }
2003
2004 return 0;
2005 }
2006
2007
vdec_sync_input(struct vdec_s * vdec)2008 int vdec_sync_input(struct vdec_s *vdec)
2009 {
2010 struct vdec_input_s *input = &vdec->input;
2011 u32 rp = 0, wp = 0, fifo_len = 0;
2012 int size;
2013
2014 vdec_input_read_restore(vdec);
2015 vdec_sync_input_read(vdec);
2016 vdec_sync_input_write(vdec);
2017 if (input->target == VDEC_INPUT_TARGET_VLD) {
2018 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
2019 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
2020
2021 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
2022 rp = READ_VREG(HEVC_STREAM_RD_PTR);
2023 wp = READ_VREG(HEVC_STREAM_WR_PTR);
2024 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
2025 >> 16) & 0x7f;
2026 }
2027 if (wp >= rp)
2028 size = wp - rp + fifo_len;
2029 else
2030 size = wp + input->size - rp + fifo_len;
2031 if (size < 0) {
2032 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
2033 __func__, input->size, wp, rp, fifo_len, size);
2034 size = 0;
2035 }
2036 return size;
2037
2038 }
2039 EXPORT_SYMBOL(vdec_sync_input);
2040
vdec_status_str(struct vdec_s * vdec)2041 const char *vdec_status_str(struct vdec_s *vdec)
2042 {
2043 if (vdec->status < 0)
2044 return "INVALID";
2045 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
2046 vdec_status_string[vdec->status] : "INVALID";
2047 }
2048
vdec_type_str(struct vdec_s * vdec)2049 const char *vdec_type_str(struct vdec_s *vdec)
2050 {
2051 switch (vdec->type) {
2052 case VDEC_TYPE_SINGLE:
2053 return "VDEC_TYPE_SINGLE";
2054 case VDEC_TYPE_STREAM_PARSER:
2055 return "VDEC_TYPE_STREAM_PARSER";
2056 case VDEC_TYPE_FRAME_BLOCK:
2057 return "VDEC_TYPE_FRAME_BLOCK";
2058 case VDEC_TYPE_FRAME_CIRCULAR:
2059 return "VDEC_TYPE_FRAME_CIRCULAR";
2060 default:
2061 return "VDEC_TYPE_INVALID";
2062 }
2063 }
2064
vdec_device_name_str(struct vdec_s * vdec)2065 const char *vdec_device_name_str(struct vdec_s *vdec)
2066 {
2067 return vdec_device_name[vdec->format * 2 + 1];
2068 }
2069 EXPORT_SYMBOL(vdec_device_name_str);
2070
walk_vdec_core_list(char * s)2071 void walk_vdec_core_list(char *s)
2072 {
2073 struct vdec_s *vdec;
2074 struct vdec_core_s *core = vdec_core;
2075 unsigned long flags;
2076
2077 pr_info("%s --->\n", s);
2078
2079 flags = vdec_core_lock(vdec_core);
2080
2081 if (list_empty(&core->connected_vdec_list)) {
2082 pr_info("connected vdec list empty\n");
2083 } else {
2084 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2085 pr_info("\tvdec (%p), status = %s\n", vdec,
2086 vdec_status_str(vdec));
2087 }
2088 }
2089
2090 vdec_core_unlock(vdec_core, flags);
2091 }
2092 EXPORT_SYMBOL(walk_vdec_core_list);
2093
2094 /* insert vdec to vdec_core for scheduling,
2095 * for dual running decoders, connect/disconnect always runs in pairs
2096 */
vdec_connect(struct vdec_s * vdec)2097 int vdec_connect(struct vdec_s *vdec)
2098 {
2099 unsigned long flags;
2100
2101 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
2102
2103 if (vdec->status != VDEC_STATUS_DISCONNECTED)
2104 return 0;
2105
2106 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
2107 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
2108
2109 init_completion(&vdec->inactive_done);
2110
2111 if (vdec->slave) {
2112 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
2113 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
2114
2115 init_completion(&vdec->slave->inactive_done);
2116 }
2117
2118 flags = vdec_core_lock(vdec_core);
2119
2120 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
2121
2122 if (vdec->slave) {
2123 list_add_tail(&vdec->slave->list,
2124 &vdec_core->connected_vdec_list);
2125 }
2126
2127 vdec_core_unlock(vdec_core, flags);
2128
2129 up(&vdec_core->sem);
2130
2131 return 0;
2132 }
2133 EXPORT_SYMBOL(vdec_connect);
2134
2135 /* remove vdec from vdec_core scheduling */
vdec_disconnect(struct vdec_s * vdec)2136 int vdec_disconnect(struct vdec_s *vdec)
2137 {
2138 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2139 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
2140 #endif
2141 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
2142
2143 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
2144 (vdec->status != VDEC_STATUS_ACTIVE)) {
2145 return 0;
2146 }
2147 mutex_lock(&vdec_mutex);
2148 /*
2149 *when a vdec is under the management of scheduler
2150 * the status change will only be from vdec_core_thread
2151 */
2152 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
2153
2154 if (vdec->slave)
2155 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
2156 else if (vdec->master)
2157 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
2158 mutex_unlock(&vdec_mutex);
2159 up(&vdec_core->sem);
2160
2161 if(!wait_for_completion_timeout(&vdec->inactive_done,
2162 msecs_to_jiffies(2000)))
2163 goto discon_timeout;
2164
2165 if (vdec->slave) {
2166 if(!wait_for_completion_timeout(&vdec->slave->inactive_done,
2167 msecs_to_jiffies(2000)))
2168 goto discon_timeout;
2169 } else if (vdec->master) {
2170 if(!wait_for_completion_timeout(&vdec->master->inactive_done,
2171 msecs_to_jiffies(2000)))
2172 goto discon_timeout;
2173 }
2174
2175 return 0;
2176 discon_timeout:
2177 pr_err("%s timeout!!! status: 0x%x\n", __func__, vdec->status);
2178 return 0;
2179 }
2180 EXPORT_SYMBOL(vdec_disconnect);
2181
2182 /* release vdec structure */
vdec_destroy(struct vdec_s * vdec)2183 int vdec_destroy(struct vdec_s *vdec)
2184 {
2185 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
2186
2187 vdec_input_release(&vdec->input);
2188
2189 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2190 vdec_profile_flush(vdec);
2191 #endif
2192 ida_simple_remove(&vdec_core->ida, vdec->id);
2193 if (vdec->mvfrm)
2194 vfree(vdec->mvfrm);
2195 vfree(vdec);
2196
2197 #ifdef CONFIG_AMLOGIC_V4L_VIDEO3
2198 v4lvideo_dec_count_decrease();
2199 #endif
2200 atomic_dec(&vdec_core->vdec_nr);
2201
2202 return 0;
2203 }
2204 EXPORT_SYMBOL(vdec_destroy);
2205
2206 /*
2207 *register vdec_device
2208 * create output, vfm or create ionvideo output
2209 */
vdec_init(struct vdec_s * vdec,int is_4k)2210 s32 vdec_init(struct vdec_s *vdec, int is_4k)
2211 {
2212 int r = 0;
2213 struct vdec_s *p = vdec;
2214 const char *dev_name;
2215 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
2216
2217 //pr_err("%s [pid=%d,tgid=%d]\n", __func__, current->pid, current->tgid);
2218 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
2219
2220 if (dev_name == NULL)
2221 return -ENODEV;
2222
2223 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
2224 dev_name, vdec_type_str(vdec));
2225
2226 /*
2227 *todo: VFM patch control should be configurable,
2228 * for now all stream based input uses default VFM path.
2229 */
2230 if (!enable_stream_mode_multi_dec) {
2231 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
2232 if (vdec_core->vfm_vdec == NULL) {
2233 pr_debug("vdec_init set vfm decoder %p\n", vdec);
2234 vdec_core->vfm_vdec = vdec;
2235 } else {
2236 pr_info("vdec_init vfm path busy.\n");
2237 return -EBUSY;
2238 }
2239 }
2240 }
2241
2242 mutex_lock(&vdec_mutex);
2243 inited_vcodec_num++;
2244 mutex_unlock(&vdec_mutex);
2245
2246 vdec_input_set_type(&vdec->input, vdec->type,
2247 (vdec->format == VFORMAT_HEVC ||
2248 vdec->format == VFORMAT_AVS2 ||
2249 vdec->format == VFORMAT_VP9 ||
2250 vdec->format == VFORMAT_AV1
2251 ) ?
2252 VDEC_INPUT_TARGET_HEVC :
2253 VDEC_INPUT_TARGET_VLD);
2254 if (vdec_single(vdec) || (vdec_get_debug_flags() & 0x2))
2255 vdec_enable_DMC(vdec);
2256 p->cma_dev = vdec_core->cma_dev;
2257 p->get_canvas = get_canvas;
2258 p->get_canvas_ex = get_canvas_ex;
2259 p->free_canvas_ex = free_canvas_ex;
2260 p->vdec_fps_detec = vdec_fps_detec;
2261 atomic_set(&p->inrelease, 0);
2262 atomic_set(&p->inirq_flag, 0);
2263 atomic_set(&p->inirq_thread_flag, 0);
2264 /* todo */
2265 if (!vdec_dual(vdec)) {
2266 p->use_vfm_path =
2267 enable_stream_mode_multi_dec ?
2268 vdec_single(vdec) :
2269 vdec_stream_based(vdec);
2270 }
2271
2272 if (debugflags & 0x4)
2273 p->use_vfm_path = 1;
2274 /* vdec_dev_reg.flag = 0; */
2275 if (vdec->id >= 0)
2276 id = vdec->id;
2277 p->parallel_dec = parallel_decode;
2278 vdec_core->parallel_dec = parallel_decode;
2279 vdec->canvas_mode = CANVAS_BLKMODE_32X32;
2280 #ifdef FRAME_CHECK
2281 vdec_frame_check_init(vdec);
2282 #endif
2283 /* stream buffer init. */
2284 if (vdec->vbuf.ops && !vdec->master) {
2285 r = vdec->vbuf.ops->init(&vdec->vbuf, vdec);
2286 if (r) {
2287 pr_err("%s stream buffer init err (%d)\n", dev_name, r);
2288
2289 mutex_lock(&vdec_mutex);
2290 inited_vcodec_num--;
2291 mutex_unlock(&vdec_mutex);
2292
2293 goto error;
2294 }
2295
2296 if (vdec->slave) {
2297 memcpy(&vdec->slave->vbuf, &vdec->vbuf,
2298 sizeof(vdec->vbuf));
2299 }
2300 }
2301
2302 p->dev = platform_device_register_data(
2303 &vdec_core->vdec_core_platform_device->dev,
2304 dev_name,
2305 id,
2306 &p, sizeof(struct vdec_s *));
2307
2308 if (IS_ERR(p->dev)) {
2309 r = PTR_ERR(p->dev);
2310 pr_err("vdec: Decoder device %s register failed (%d)\n",
2311 dev_name, r);
2312
2313 mutex_lock(&vdec_mutex);
2314 inited_vcodec_num--;
2315 mutex_unlock(&vdec_mutex);
2316
2317 goto error;
2318 } else if (!p->dev->dev.driver) {
2319 pr_info("vdec: Decoder device %s driver probe failed.\n",
2320 dev_name);
2321 r = -ENODEV;
2322
2323 goto error;
2324 }
2325
2326 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
2327 r = -ENODEV;
2328 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
2329
2330 mutex_lock(&vdec_mutex);
2331 inited_vcodec_num--;
2332 mutex_unlock(&vdec_mutex);
2333
2334 goto error;
2335 }
2336
2337 if (p->use_vfm_path) {
2338 vdec->vf_receiver_inst = -1;
2339 vdec->vfm_map_id[0] = 0;
2340 } else if (!vdec_dual(vdec)) {
2341 /* create IONVIDEO instance and connect decoder's
2342 * vf_provider interface to it
2343 */
2344 if (!enable_stream_mode_multi_dec) {
2345 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
2346 r = -ENODEV;
2347 pr_err("vdec: Incorrect decoder type\n");
2348
2349 mutex_lock(&vdec_mutex);
2350 inited_vcodec_num--;
2351 mutex_unlock(&vdec_mutex);
2352
2353 goto error;
2354 }
2355 }
2356
2357 if (strncmp("disable", vfm_path, strlen("disable"))) {
2358 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2359 "%s %s", vdec->vf_provider_name, vfm_path);
2360 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2361 "vdec-map-%d", vdec->id);
2362 } else if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
2363 #if 1
2364 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2365 &vdec->vf_receiver_inst);
2366 #else
2367 /*
2368 * temporarily just use decoder instance ID as iondriver ID
2369 * to solve OMX iondriver instance number check time sequence
2370 * only the limitation is we can NOT mix different video
2371 * decoders since same ID will be used for different decoder
2372 * formats.
2373 */
2374 vdec->vf_receiver_inst = p->dev->id;
2375 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2376 &vdec->vf_receiver_inst);
2377 #endif
2378 if (r < 0) {
2379 pr_err("IonVideo frame receiver allocation failed.\n");
2380
2381 mutex_lock(&vdec_mutex);
2382 inited_vcodec_num--;
2383 mutex_unlock(&vdec_mutex);
2384
2385 goto error;
2386 }
2387
2388 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2389 "%s %s", vdec->vf_provider_name,
2390 vdec->vf_receiver_name);
2391 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2392 "vdec-map-%d", vdec->id);
2393 } else if (p->frame_base_video_path ==
2394 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
2395 if (vdec_secure(vdec)) {
2396 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2397 "%s %s", vdec->vf_provider_name,
2398 "amlvideo amvideo");
2399 } else {
2400 if (debug_vdetect)
2401 snprintf(vdec->vfm_map_chain,
2402 VDEC_MAP_NAME_SIZE,
2403 "%s vdetect.0 %s",
2404 vdec->vf_provider_name,
2405 "amlvideo ppmgr deinterlace amvideo");
2406 else
2407 snprintf(vdec->vfm_map_chain,
2408 VDEC_MAP_NAME_SIZE, "%s %s",
2409 vdec->vf_provider_name,
2410 "amlvideo ppmgr deinterlace amvideo");
2411 }
2412 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2413 "vdec-map-%d", vdec->id);
2414 } else if (p->frame_base_video_path ==
2415 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
2416 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2417 "%s %s", vdec->vf_provider_name,
2418 "aml_video.1 videosync.0 videopip");
2419 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2420 "vdec-map-%d", vdec->id);
2421 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_OSD) {
2422 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2423 "%s %s", vdec->vf_provider_name,
2424 vdec->vf_receiver_name);
2425 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2426 "vdec-map-%d", vdec->id);
2427 } else if (p->frame_base_video_path == FRAME_BASE_PATH_TUNNEL_MODE) {
2428 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2429 "%s %s", vdec->vf_provider_name,
2430 "amvideo");
2431 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2432 "vdec-map-%d", vdec->id);
2433 } else if (p->frame_base_video_path == FRAME_BASE_PATH_PIP_TUNNEL_MODE) {
2434 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2435 "%s %s", vdec->vf_provider_name,
2436 "videosync.0 videopip");
2437 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2438 "vdec-map-%d", vdec->id);
2439 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_VIDEO) {
2440 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2441 "%s %s %s", vdec->vf_provider_name,
2442 vdec->vf_receiver_name, "amvideo");
2443 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2444 "vdec-map-%d", vdec->id);
2445 } else if (p->frame_base_video_path ==
2446 FRAME_BASE_PATH_DI_V4LVIDEO) {
2447 #ifdef CONFIG_AMLOGIC_V4L_VIDEO3
2448 r = v4lvideo_assign_map(&vdec->vf_receiver_name,
2449 &vdec->vf_receiver_inst);
2450 #else
2451 r = -1;
2452 #endif
2453 if (r < 0) {
2454 pr_err("V4lVideo frame receiver allocation failed.\n");
2455 mutex_lock(&vdec_mutex);
2456 inited_vcodec_num--;
2457 mutex_unlock(&vdec_mutex);
2458 goto error;
2459 }
2460 if (!v4lvideo_add_di)
2461 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2462 "%s %s", vdec->vf_provider_name,
2463 vdec->vf_receiver_name);
2464 else {
2465 if ((vdec->vf_receiver_inst == 0)
2466 && (max_di_instance > 0))
2467 if (max_di_instance == 1)
2468 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2469 "%s %s %s", vdec->vf_provider_name,
2470 "deinterlace",
2471 vdec->vf_receiver_name);
2472 else
2473 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2474 "%s %s %s", vdec->vf_provider_name,
2475 "dimulti.1",
2476 vdec->vf_receiver_name);
2477 else if ((vdec->vf_receiver_inst <
2478 max_di_instance) &&
2479 (vdec->vf_receiver_inst == 1))
2480 snprintf(vdec->vfm_map_chain,
2481 VDEC_MAP_NAME_SIZE,
2482 "%s %s %s",
2483 vdec->vf_provider_name,
2484 "deinterlace",
2485 vdec->vf_receiver_name);
2486 else if (vdec->vf_receiver_inst <
2487 max_di_instance)
2488 snprintf(vdec->vfm_map_chain,
2489 VDEC_MAP_NAME_SIZE,
2490 "%s %s%d %s",
2491 vdec->vf_provider_name,
2492 "dimulti.",
2493 vdec->vf_receiver_inst,
2494 vdec->vf_receiver_name);
2495 else
2496 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2497 "%s %s", vdec->vf_provider_name,
2498 vdec->vf_receiver_name);
2499 }
2500 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2501 "vdec-map-%d", vdec->id);
2502 }
2503
2504 if (vfm_map_add(vdec->vfm_map_id,
2505 vdec->vfm_map_chain) < 0) {
2506 r = -ENOMEM;
2507 pr_err("Decoder pipeline map creation failed %s.\n",
2508 vdec->vfm_map_id);
2509 vdec->vfm_map_id[0] = 0;
2510
2511 mutex_lock(&vdec_mutex);
2512 inited_vcodec_num--;
2513 mutex_unlock(&vdec_mutex);
2514
2515 goto error;
2516 }
2517
2518 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
2519
2520 /*
2521 *assume IONVIDEO driver already have a few vframe_receiver
2522 * registered.
2523 * 1. Call iondriver function to allocate a IONVIDEO path and
2524 * provide receiver's name and receiver op.
2525 * 2. Get decoder driver's provider name from driver instance
2526 * 3. vfm_map_add(name, "<decoder provider name>
2527 * <iondriver receiver name>"), e.g.
2528 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
2529 * 4. vf_reg_provider and vf_reg_receiver
2530 * Note: the decoder provider's op uses vdec as op_arg
2531 * the iondriver receiver's op uses iondev device as
2532 * op_arg
2533 */
2534
2535 }
2536
2537 if (!vdec_single(vdec)) {
2538 vf_reg_provider(&p->vframe_provider);
2539
2540 vf_notify_receiver(p->vf_provider_name,
2541 VFRAME_EVENT_PROVIDER_START,
2542 vdec);
2543
2544 if (vdec_core->hint_fr_vdec == NULL)
2545 vdec_core->hint_fr_vdec = vdec;
2546
2547 if (vdec_core->hint_fr_vdec == vdec) {
2548 if (p->sys_info->rate != 0) {
2549 if (!vdec->is_reset) {
2550 vf_notify_receiver(p->vf_provider_name,
2551 VFRAME_EVENT_PROVIDER_FR_HINT,
2552 (void *)
2553 ((unsigned long)
2554 p->sys_info->rate));
2555 vdec->fr_hint_state = VDEC_HINTED;
2556 }
2557 } else {
2558 vdec->fr_hint_state = VDEC_NEED_HINT;
2559 }
2560 }
2561 }
2562
2563 p->dolby_meta_with_el = 0;
2564 pr_debug("vdec_init, vf_provider_name = %s, b %d\n",
2565 p->vf_provider_name, is_cpu_tm2_revb());
2566 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
2567 &vdec->input,
2568 vdec->sys_info->width,
2569 vdec->sys_info->height);
2570 /* vdec is now ready to be active */
2571 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
2572 return 0;
2573
2574 error:
2575 return r;
2576 }
2577 EXPORT_SYMBOL(vdec_init);
2578
2579 /*
2580 *Remove the vdec after timeout happens both in vdec_disconnect
2581 *and platform_device_unregister. Then after, we can release the vdec.
2582 */
vdec_connect_list_force_clear(struct vdec_core_s * core,struct vdec_s * v_ref)2583 static void vdec_connect_list_force_clear(struct vdec_core_s *core, struct vdec_s *v_ref)
2584 {
2585 struct vdec_s *vdec, *tmp;
2586 unsigned long flags;
2587
2588 flags = vdec_core_lock(core);
2589
2590 list_for_each_entry_safe(vdec, tmp,
2591 &core->connected_vdec_list, list) {
2592 if ((vdec->status == VDEC_STATUS_DISCONNECTED) &&
2593 (vdec == v_ref)) {
2594 pr_err("%s, vdec = %p, active vdec = %p\n",
2595 __func__, vdec, core->active_vdec);
2596 if (core->active_vdec == v_ref)
2597 core->active_vdec = NULL;
2598 if (core->last_vdec == v_ref)
2599 core->last_vdec = NULL;
2600 list_del(&vdec->list);
2601 }
2602 }
2603
2604 vdec_core_unlock(core, flags);
2605 }
2606
2607 /* vdec_create/init/release/destroy are applied to both dual running decoders
2608 */
vdec_release(struct vdec_s * vdec)2609 void vdec_release(struct vdec_s *vdec)
2610 {
2611 //trace_vdec_release(vdec);/*DEBUG_TMP*/
2612 #ifdef VDEC_DEBUG_SUPPORT
2613 if (step_mode) {
2614 pr_info("VDEC_DEBUG: in step_mode, wait release\n");
2615 while (step_mode)
2616 udelay(10);
2617 pr_info("VDEC_DEBUG: step_mode is clear\n");
2618 }
2619 #endif
2620 vdec_disconnect(vdec);
2621
2622 if (vdec->vframe_provider.name) {
2623 if (!vdec_single(vdec)) {
2624 if (vdec_core->hint_fr_vdec == vdec
2625 && vdec->fr_hint_state == VDEC_HINTED)
2626 vf_notify_receiver(
2627 vdec->vf_provider_name,
2628 VFRAME_EVENT_PROVIDER_FR_END_HINT,
2629 NULL);
2630 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
2631 }
2632 vf_unreg_provider(&vdec->vframe_provider);
2633 }
2634
2635 if (vdec_core->vfm_vdec == vdec)
2636 vdec_core->vfm_vdec = NULL;
2637
2638 if (vdec_core->hint_fr_vdec == vdec)
2639 vdec_core->hint_fr_vdec = NULL;
2640
2641 if (vdec->vf_receiver_inst >= 0) {
2642 if (vdec->vfm_map_id[0]) {
2643 vfm_map_remove(vdec->vfm_map_id);
2644 vdec->vfm_map_id[0] = 0;
2645 }
2646 }
2647
2648 atomic_set(&vdec->inrelease, 1);
2649 while ((atomic_read(&vdec->inirq_flag) > 0)
2650 || (atomic_read(&vdec->inirq_thread_flag) > 0))
2651 schedule();
2652
2653 #ifdef FRAME_CHECK
2654 vdec_frame_check_exit(vdec);
2655 #endif
2656 vdec_fps_clear(vdec->id);
2657 if (atomic_read(&vdec_core->vdec_nr) == 1)
2658 vdec_disable_DMC(vdec);
2659 platform_device_unregister(vdec->dev);
2660 /*Check if the vdec still in connected list, if yes, delete it*/
2661 vdec_connect_list_force_clear(vdec_core, vdec);
2662
2663 if (vdec->vbuf.ops && !vdec->master)
2664 vdec->vbuf.ops->release(&vdec->vbuf);
2665
2666 pr_debug("vdec_release instance %p, total %d\n", vdec,
2667 atomic_read(&vdec_core->vdec_nr));
2668 vdec_destroy(vdec);
2669
2670 mutex_lock(&vdec_mutex);
2671 inited_vcodec_num--;
2672 mutex_unlock(&vdec_mutex);
2673
2674 }
2675 EXPORT_SYMBOL(vdec_release);
2676
2677 /* For dual running decoders, vdec_reset is only called with master vdec.
2678 */
vdec_reset(struct vdec_s * vdec)2679 int vdec_reset(struct vdec_s *vdec)
2680 {
2681 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
2682
2683 vdec_disconnect(vdec);
2684
2685 if (vdec->vframe_provider.name)
2686 vf_unreg_provider(&vdec->vframe_provider);
2687
2688 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
2689 vf_unreg_provider(&vdec->slave->vframe_provider);
2690
2691 if (vdec->reset) {
2692 vdec->reset(vdec);
2693 if (vdec->slave)
2694 vdec->slave->reset(vdec->slave);
2695 }
2696 vdec->mc_loaded = 0;/*clear for reload firmware*/
2697 vdec_input_release(&vdec->input);
2698
2699 vdec_input_init(&vdec->input, vdec);
2700
2701 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
2702 vdec->sys_info->height);
2703
2704 vf_reg_provider(&vdec->vframe_provider);
2705 vf_notify_receiver(vdec->vf_provider_name,
2706 VFRAME_EVENT_PROVIDER_START, vdec);
2707
2708 if (vdec->slave) {
2709 vf_reg_provider(&vdec->slave->vframe_provider);
2710 vf_notify_receiver(vdec->slave->vf_provider_name,
2711 VFRAME_EVENT_PROVIDER_START, vdec->slave);
2712 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
2713 }
2714
2715 vdec_connect(vdec);
2716
2717 return 0;
2718 }
2719 EXPORT_SYMBOL(vdec_reset);
2720
vdec_v4l2_reset(struct vdec_s * vdec,int flag)2721 int vdec_v4l2_reset(struct vdec_s *vdec, int flag)
2722 {
2723 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
2724 pr_debug("vdec_v4l2_reset %d\n", flag);
2725 vdec_disconnect(vdec);
2726 if (flag != 2) {
2727 if (vdec->vframe_provider.name)
2728 vf_unreg_provider(&vdec->vframe_provider);
2729
2730 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
2731 vf_unreg_provider(&vdec->slave->vframe_provider);
2732
2733 if (vdec->reset) {
2734 vdec->reset(vdec);
2735 if (vdec->slave)
2736 vdec->slave->reset(vdec->slave);
2737 }
2738 vdec->mc_loaded = 0;/*clear for reload firmware*/
2739
2740 vdec_input_release(&vdec->input);
2741
2742 vdec_input_init(&vdec->input, vdec);
2743
2744 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
2745 vdec->sys_info->height);
2746
2747 vf_reg_provider(&vdec->vframe_provider);
2748 vf_notify_receiver(vdec->vf_provider_name,
2749 VFRAME_EVENT_PROVIDER_START, vdec);
2750
2751 if (vdec->slave) {
2752 vf_reg_provider(&vdec->slave->vframe_provider);
2753 vf_notify_receiver(vdec->slave->vf_provider_name,
2754 VFRAME_EVENT_PROVIDER_START, vdec->slave);
2755 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
2756 }
2757 } else {
2758 if (vdec->reset) {
2759 vdec->reset(vdec);
2760 if (vdec->slave)
2761 vdec->slave->reset(vdec->slave);
2762 }
2763 }
2764
2765 vdec_connect(vdec);
2766
2767 vdec_frame_check_init(vdec);
2768
2769 return 0;
2770 }
2771 EXPORT_SYMBOL(vdec_v4l2_reset);
2772
2773
vdec_free_cmabuf(void)2774 void vdec_free_cmabuf(void)
2775 {
2776 mutex_lock(&vdec_mutex);
2777
2778 /*if (inited_vcodec_num > 0) {
2779 mutex_unlock(&vdec_mutex);
2780 return;
2781 }*/
2782 mutex_unlock(&vdec_mutex);
2783 }
2784
vdec_core_request(struct vdec_s * vdec,unsigned long mask)2785 void vdec_core_request(struct vdec_s *vdec, unsigned long mask)
2786 {
2787 vdec->core_mask |= mask;
2788
2789 if (vdec->slave)
2790 vdec->slave->core_mask |= mask;
2791 if (vdec_core->parallel_dec == 1) {
2792 if (mask & CORE_MASK_COMBINE)
2793 vdec_core->vdec_combine_flag++;
2794 }
2795
2796 }
2797 EXPORT_SYMBOL(vdec_core_request);
2798
vdec_core_release(struct vdec_s * vdec,unsigned long mask)2799 int vdec_core_release(struct vdec_s *vdec, unsigned long mask)
2800 {
2801 vdec->core_mask &= ~mask;
2802
2803 if (vdec->slave)
2804 vdec->slave->core_mask &= ~mask;
2805 if (vdec_core->parallel_dec == 1) {
2806 if (mask & CORE_MASK_COMBINE)
2807 vdec_core->vdec_combine_flag--;
2808 }
2809 return 0;
2810 }
2811 EXPORT_SYMBOL(vdec_core_release);
2812
vdec_core_with_input(unsigned long mask)2813 bool vdec_core_with_input(unsigned long mask)
2814 {
2815 enum vdec_type_e type;
2816
2817 for (type = VDEC_1; type < VDEC_MAX; type++) {
2818 if ((mask & (1 << type)) && cores_with_input[type])
2819 return true;
2820 }
2821
2822 return false;
2823 }
2824
vdec_core_finish_run(struct vdec_s * vdec,unsigned long mask)2825 void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask)
2826 {
2827 unsigned long i;
2828 unsigned long t = mask;
2829 mutex_lock(&vdec_mutex);
2830 while (t) {
2831 i = __ffs(t);
2832 clear_bit(i, &vdec->active_mask);
2833 t &= ~(1 << i);
2834 }
2835
2836 if (vdec->active_mask == 0)
2837 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
2838
2839 mutex_unlock(&vdec_mutex);
2840 }
2841 EXPORT_SYMBOL(vdec_core_finish_run);
2842 /*
2843 * find what core resources are available for vdec
2844 */
vdec_schedule_mask(struct vdec_s * vdec,unsigned long active_mask)2845 static unsigned long vdec_schedule_mask(struct vdec_s *vdec,
2846 unsigned long active_mask)
2847 {
2848 unsigned long mask = vdec->core_mask &
2849 ~CORE_MASK_COMBINE;
2850
2851 if (vdec->core_mask & CORE_MASK_COMBINE) {
2852 /* combined cores must be granted together */
2853 if ((mask & ~active_mask) == mask)
2854 return mask;
2855 else
2856 return 0;
2857 } else
2858 return mask & ~vdec->sched_mask & ~active_mask;
2859 }
2860
2861 /*
2862 *Decoder callback
2863 * Each decoder instance uses this callback to notify status change, e.g. when
2864 * decoder finished using HW resource.
2865 * a sample callback from decoder's driver is following:
2866 *
2867 * if (hw->vdec_cb) {
2868 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
2869 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
2870 * }
2871 */
vdec_callback(struct vdec_s * vdec,void * data)2872 static void vdec_callback(struct vdec_s *vdec, void *data)
2873 {
2874 struct vdec_core_s *core = (struct vdec_core_s *)data;
2875
2876 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2877 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
2878 #endif
2879
2880 up(&core->sem);
2881 }
2882
vdec_isr(int irq,void * dev_id)2883 static irqreturn_t vdec_isr(int irq, void *dev_id)
2884 {
2885 struct vdec_isr_context_s *c =
2886 (struct vdec_isr_context_s *)dev_id;
2887 struct vdec_s *vdec = vdec_core->last_vdec;
2888 irqreturn_t ret = IRQ_HANDLED;
2889
2890 if (vdec_core->parallel_dec == 1) {
2891 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2892 vdec = vdec_core->active_hevc;
2893 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2894 vdec = vdec_core->active_vdec;
2895 else
2896 vdec = NULL;
2897 }
2898
2899 if (vdec) {
2900 if (atomic_read(&vdec->inrelease) > 0)
2901 return ret;
2902 atomic_set(&vdec->inirq_flag, 1);
2903 }
2904 if (c->dev_isr) {
2905 ret = c->dev_isr(irq, c->dev_id);
2906 goto isr_done;
2907 }
2908
2909 if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) &&
2910 (c != &vdec_core->isr_context[VDEC_IRQ_1]) &&
2911 (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) {
2912 #if 0
2913 pr_warn("vdec interrupt w/o a valid receiver\n");
2914 #endif
2915 goto isr_done;
2916 }
2917
2918 if (!vdec) {
2919 #if 0
2920 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
2921 core);
2922 #endif
2923 goto isr_done;
2924 }
2925
2926 if (!vdec->irq_handler) {
2927 #if 0
2928 pr_warn("vdec instance has no irq handle.\n");
2929 #endif
2930 goto isr_done;
2931 }
2932
2933 ret = vdec->irq_handler(vdec, c->index);
2934 isr_done:
2935 if (vdec)
2936 atomic_set(&vdec->inirq_flag, 0);
2937 return ret;
2938 }
2939
vdec_thread_isr(int irq,void * dev_id)2940 static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
2941 {
2942 struct vdec_isr_context_s *c =
2943 (struct vdec_isr_context_s *)dev_id;
2944 struct vdec_s *vdec = vdec_core->last_vdec;
2945 irqreturn_t ret = IRQ_HANDLED;
2946
2947 if (vdec_core->parallel_dec == 1) {
2948 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2949 vdec = vdec_core->active_hevc;
2950 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2951 vdec = vdec_core->active_vdec;
2952 else
2953 vdec = NULL;
2954 }
2955
2956 if (vdec) {
2957 if (atomic_read(&vdec->inrelease) > 0)
2958 return ret;
2959 atomic_set(&vdec->inirq_thread_flag, 1);
2960 }
2961 if (c->dev_threaded_isr) {
2962 ret = c->dev_threaded_isr(irq, c->dev_id);
2963 goto thread_isr_done;
2964 }
2965 if (!vdec)
2966 goto thread_isr_done;
2967
2968 if (!vdec->threaded_irq_handler)
2969 goto thread_isr_done;
2970 ret = vdec->threaded_irq_handler(vdec, c->index);
2971 thread_isr_done:
2972 if (vdec)
2973 atomic_set(&vdec->inirq_thread_flag, 0);
2974 return ret;
2975 }
2976
vdec_ready_to_run(struct vdec_s * vdec,unsigned long mask)2977 unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask)
2978 {
2979 unsigned long ready_mask;
2980 struct vdec_input_s *input = &vdec->input;
2981 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
2982 (vdec->status != VDEC_STATUS_ACTIVE))
2983 return false;
2984
2985 if (!vdec->run_ready)
2986 return false;
2987
2988 /* when crc32 error, block at error frame */
2989 if (vdec->vfc.err_crc_block)
2990 return false;
2991
2992 if ((vdec->slave || vdec->master) &&
2993 (vdec->sched == 0))
2994 return false;
2995 #ifdef VDEC_DEBUG_SUPPORT
2996 inc_profi_count(mask, vdec->check_count);
2997 #endif
2998 if (vdec_core_with_input(mask)) {
2999
3000 /* check frame based input underrun */
3001 if (input && !input->eos && input_frame_based(input)
3002 && (!vdec_input_next_chunk(input))) {
3003 #ifdef VDEC_DEBUG_SUPPORT
3004 inc_profi_count(mask, vdec->input_underrun_count);
3005 #endif
3006 return false;
3007 }
3008 /* check streaming prepare level threshold if not EOS */
3009 if (input && input_stream_based(input) && !input->eos) {
3010 u32 rp, wp, level;
3011
3012 rp = STBUF_READ(&vdec->vbuf, get_rp);
3013 wp = STBUF_READ(&vdec->vbuf, get_wp);
3014 if (wp < rp)
3015 level = input->size + wp - rp;
3016 else
3017 level = wp - rp;
3018
3019 if ((level < input->prepare_level) &&
3020 (pts_get_rec_num(PTS_TYPE_VIDEO,
3021 vdec->input.total_rd_count) < 2)) {
3022 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
3023 #ifdef VDEC_DEBUG_SUPPORT
3024 inc_profi_count(mask, vdec->input_underrun_count);
3025 if (step_mode & 0x200) {
3026 if ((step_mode & 0xff) == vdec->id) {
3027 step_mode |= 0xff;
3028 return mask;
3029 }
3030 }
3031 #endif
3032 return false;
3033 } else if (level > input->prepare_level)
3034 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
3035 }
3036 }
3037
3038 if (step_mode) {
3039 if ((step_mode & 0xff) != vdec->id)
3040 return 0;
3041 step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/
3042 }
3043
3044 /*step_mode &= ~0xff; not work for id of 0, removed*/
3045
3046 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
3047 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
3048 #endif
3049
3050 ready_mask = vdec->run_ready(vdec, mask) & mask;
3051 #ifdef VDEC_DEBUG_SUPPORT
3052 if (ready_mask != mask)
3053 inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count);
3054 #endif
3055 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
3056 if (ready_mask)
3057 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
3058 #endif
3059
3060 return ready_mask;
3061 }
3062
3063 /* bridge on/off vdec's interrupt processing to vdec core */
vdec_route_interrupt(struct vdec_s * vdec,unsigned long mask,bool enable)3064 static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask,
3065 bool enable)
3066 {
3067 enum vdec_type_e type;
3068
3069 for (type = VDEC_1; type < VDEC_MAX; type++) {
3070 if (mask & (1 << type)) {
3071 struct vdec_isr_context_s *c =
3072 &vdec_core->isr_context[cores_int[type]];
3073 if (enable)
3074 c->vdec = vdec;
3075 else if (c->vdec == vdec)
3076 c->vdec = NULL;
3077 }
3078 }
3079 }
3080
3081 /*
3082 * Set up secure protection for each decoder instance running.
3083 * Note: The operation from REE side only resets memory access
3084 * to a default policy and even a non_secure type will still be
3085 * changed to secure type automatically when secure source is
3086 * detected inside TEE.
3087 * Perform need_more_data checking and set flag is decoder
3088 * is not consuming data.
3089 */
vdec_prepare_run(struct vdec_s * vdec,unsigned long mask)3090 void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask)
3091 {
3092 struct vdec_input_s *input = &vdec->input;
3093 int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
3094 DMC_DEV_TYPE_NON_SECURE;
3095
3096 vdec_route_interrupt(vdec, mask, true);
3097
3098 if (!vdec_core_with_input(mask))
3099 return;
3100
3101 if (secure && vdec_stream_based(vdec) && force_nosecure_even_drm)
3102 {
3103 secure = 0;
3104 }
3105 if (input->target == VDEC_INPUT_TARGET_VLD)
3106 tee_config_device_secure(DMC_DEV_ID_VDEC, secure);
3107 else if (input->target == VDEC_INPUT_TARGET_HEVC)
3108 tee_config_device_secure(DMC_DEV_ID_HEVC, secure);
3109
3110 if (vdec_stream_based(vdec) &&
3111 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
3112 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
3113 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
3114 }
3115
3116 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
3117 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
3118 }
3119
3120
3121 /* struct vdec_core_shread manages all decoder instance in active list. When
3122 * a vdec is added into the active list, it can onlt be in two status:
3123 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
3124 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
3125 * Removing a decoder from active list is only performed within core thread.
3126 * Adding a decoder into active list is performed from user thread.
3127 */
vdec_core_thread(void * data)3128 static int vdec_core_thread(void *data)
3129 {
3130 struct vdec_core_s *core = (struct vdec_core_s *)data;
3131 struct sched_param param = {.sched_priority = MAX_RT_PRIO/2};
3132 unsigned long flags;
3133 int i;
3134
3135 //sched_setscheduler(current, SCHED_FIFO, ¶m); //FIXME
3136
3137 allow_signal(SIGTERM);
3138
3139 while (down_interruptible(&core->sem) == 0) {
3140 struct vdec_s *vdec, *tmp, *worker;
3141 unsigned long sched_mask = 0;
3142 LIST_HEAD(disconnecting_list);
3143
3144 if (kthread_should_stop())
3145 break;
3146 mutex_lock(&vdec_mutex);
3147
3148 if (core->parallel_dec == 1) {
3149 for (i = VDEC_1; i < VDEC_MAX; i++) {
3150 core->power_ref_mask =
3151 core->power_ref_count[i] > 0 ?
3152 (core->power_ref_mask | (1 << i)) :
3153 (core->power_ref_mask & ~(1 << i));
3154 }
3155 }
3156 /* clean up previous active vdec's input */
3157 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3158 unsigned long mask = vdec->sched_mask &
3159 (vdec->active_mask ^ vdec->sched_mask);
3160
3161 vdec_route_interrupt(vdec, mask, false);
3162
3163 #ifdef VDEC_DEBUG_SUPPORT
3164 update_profi_clk_stop(vdec, mask, get_current_clk());
3165 #endif
3166 /*
3167 * If decoder released some core resources (mask), then
3168 * check if these core resources are associated
3169 * with any input side and do input clean up accordingly
3170 */
3171 if (vdec_core_with_input(mask)) {
3172 struct vdec_input_s *input = &vdec->input;
3173 while (!list_empty(
3174 &input->vframe_chunk_list)) {
3175 struct vframe_chunk_s *chunk =
3176 vdec_input_next_chunk(input);
3177 if (chunk && (chunk->flag &
3178 VFRAME_CHUNK_FLAG_CONSUMED))
3179 vdec_input_release_chunk(input,
3180 chunk);
3181 else
3182 break;
3183 }
3184
3185 vdec_save_input_context(vdec);
3186 }
3187
3188 vdec->sched_mask &= ~mask;
3189 core->sched_mask &= ~mask;
3190 }
3191 vdec_update_buff_status();
3192 /*
3193 *todo:
3194 * this is the case when the decoder is in active mode and
3195 * the system side wants to stop it. Currently we rely on
3196 * the decoder instance to go back to VDEC_STATUS_CONNECTED
3197 * from VDEC_STATUS_ACTIVE by its own. However, if for some
3198 * reason the decoder can not exist by itself (dead decoding
3199 * or whatever), then we may have to add another vdec API
3200 * to kill the vdec and release its HW resource and make it
3201 * become inactive again.
3202 * if ((core->active_vdec) &&
3203 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
3204 * }
3205 */
3206
3207 /* check disconnected decoders */
3208 flags = vdec_core_lock(vdec_core);
3209 list_for_each_entry_safe(vdec, tmp,
3210 &core->connected_vdec_list, list) {
3211 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
3212 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
3213 if (core->parallel_dec == 1) {
3214 if (vdec_core->active_hevc == vdec)
3215 vdec_core->active_hevc = NULL;
3216 if (vdec_core->active_vdec == vdec)
3217 vdec_core->active_vdec = NULL;
3218 }
3219 if (core->last_vdec == vdec)
3220 core->last_vdec = NULL;
3221 list_move(&vdec->list, &disconnecting_list);
3222 }
3223 }
3224 vdec_core_unlock(vdec_core, flags);
3225 mutex_unlock(&vdec_mutex);
3226 /* elect next vdec to be scheduled */
3227 vdec = core->last_vdec;
3228 if (vdec) {
3229 vdec = list_entry(vdec->list.next, struct vdec_s, list);
3230 list_for_each_entry_from(vdec,
3231 &core->connected_vdec_list, list) {
3232 sched_mask = vdec_schedule_mask(vdec,
3233 core->sched_mask);
3234 if (!sched_mask)
3235 continue;
3236 sched_mask = vdec_ready_to_run(vdec,
3237 sched_mask);
3238 if (sched_mask)
3239 break;
3240 }
3241
3242 if (&vdec->list == &core->connected_vdec_list)
3243 vdec = NULL;
3244 }
3245
3246 if (!vdec) {
3247 /* search from beginning */
3248 list_for_each_entry(vdec,
3249 &core->connected_vdec_list, list) {
3250 sched_mask = vdec_schedule_mask(vdec,
3251 core->sched_mask);
3252 if (vdec == core->last_vdec) {
3253 if (!sched_mask) {
3254 vdec = NULL;
3255 break;
3256 }
3257
3258 sched_mask = vdec_ready_to_run(vdec,
3259 sched_mask);
3260
3261 if (!sched_mask) {
3262 vdec = NULL;
3263 break;
3264 }
3265 break;
3266 }
3267
3268 if (!sched_mask)
3269 continue;
3270
3271 sched_mask = vdec_ready_to_run(vdec,
3272 sched_mask);
3273 if (sched_mask)
3274 break;
3275 }
3276
3277 if (&vdec->list == &core->connected_vdec_list)
3278 vdec = NULL;
3279 }
3280
3281 worker = vdec;
3282
3283 if (vdec) {
3284 unsigned long mask = sched_mask;
3285 unsigned long i;
3286
3287 /* setting active_mask should be atomic.
3288 * it can be modified by decoder driver callbacks.
3289 */
3290 while (sched_mask) {
3291 i = __ffs(sched_mask);
3292 set_bit(i, &vdec->active_mask);
3293 sched_mask &= ~(1 << i);
3294 }
3295
3296 /* vdec's sched_mask is only set from core thread */
3297 vdec->sched_mask |= mask;
3298 if (core->last_vdec) {
3299 if ((core->last_vdec != vdec) &&
3300 (core->last_vdec->mc_type != vdec->mc_type))
3301 vdec->mc_loaded = 0;/*clear for reload firmware*/
3302 } else
3303 vdec->mc_loaded = 0;
3304 core->last_vdec = vdec;
3305 if (debug & 2)
3306 vdec->mc_loaded = 0;/*alway reload firmware*/
3307 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
3308
3309 core->sched_mask |= mask;
3310 if (core->parallel_dec == 1)
3311 vdec_save_active_hw(vdec);
3312 #ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
3313 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
3314 #endif
3315 vdec_prepare_run(vdec, mask);
3316 #ifdef VDEC_DEBUG_SUPPORT
3317 inc_profi_count(mask, vdec->run_count);
3318 update_profi_clk_run(vdec, mask, get_current_clk());
3319 #endif
3320 vdec->run(vdec, mask, vdec_callback, core);
3321
3322
3323 /* we have some cores scheduled, keep working until
3324 * all vdecs are checked with no cores to schedule
3325 */
3326 if (core->parallel_dec == 1) {
3327 if (vdec_core->vdec_combine_flag == 0)
3328 up(&core->sem);
3329 } else
3330 up(&core->sem);
3331 }
3332
3333 /* remove disconnected decoder from active list */
3334 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
3335 list_del(&vdec->list);
3336 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
3337 /*core->last_vdec = NULL;*/
3338 complete(&vdec->inactive_done);
3339 }
3340
3341 /* if there is no new work scheduled and nothing
3342 * is running, sleep 20ms
3343 */
3344 if (core->parallel_dec == 1) {
3345 if (vdec_core->vdec_combine_flag == 0) {
3346 if ((!worker) &&
3347 ((core->sched_mask != core->power_ref_mask)) &&
3348 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3349 ((core->buff_flag | core->stream_buff_flag) &
3350 (core->sched_mask ^ core->power_ref_mask))) {
3351 usleep_range(1000, 2000);
3352 up(&core->sem);
3353 }
3354 } else {
3355 if ((!worker) && (!core->sched_mask) &&
3356 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3357 (core->buff_flag | core->stream_buff_flag)) {
3358 usleep_range(1000, 2000);
3359 up(&core->sem);
3360 }
3361 }
3362 } else if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) {
3363 usleep_range(1000, 2000);
3364 up(&core->sem);
3365 }
3366
3367 }
3368
3369 return 0;
3370 }
3371
3372 #if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
test_hevc(u32 decomp_addr,u32 us_delay)3373 static bool test_hevc(u32 decomp_addr, u32 us_delay)
3374 {
3375 int i;
3376
3377 /* SW_RESET IPP */
3378 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1);
3379 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0);
3380
3381 /* initialize all canvas table */
3382 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
3383 for (i = 0; i < 32; i++)
3384 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
3385 0x1 | (i << 8) | decomp_addr);
3386 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
3387 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
3388 for (i = 0; i < 32; i++)
3389 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
3390
3391 /* Initialize mcrcc */
3392 WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
3393 WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0);
3394 WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0);
3395 WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
3396
3397 /* Decomp initialize */
3398 WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0);
3399 WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
3400
3401 /* Frame level initialization */
3402 WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16));
3403 WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0);
3404 WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5);
3405 WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2));
3406
3407 WRITE_VREG(HEVCD_IPP_CONFIG, 0x0);
3408 WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0);
3409
3410 /* Enable SWIMP mode */
3411 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1);
3412
3413 /* Enable frame */
3414 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2);
3415 WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1);
3416
3417 /* Send SW-command CTB info */
3418 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31);
3419
3420 /* Send PU_command */
3421 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16));
3422 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3);
3423 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0);
3424 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0);
3425
3426 udelay(us_delay);
3427
3428 WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4);
3429
3430 return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1;
3431 }
3432
vdec_power_reset(void)3433 void vdec_power_reset(void)
3434 {
3435 /* enable vdec1 isolation */
3436 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3437 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
3438 /* power off vdec1 memories */
3439 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3440 /* vdec1 power off */
3441 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3442 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
3443
3444 if (has_vdec2()) {
3445 /* enable vdec2 isolation */
3446 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3447 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x300);
3448 /* power off vdec2 memories */
3449 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3450 /* vdec2 power off */
3451 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3452 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x30);
3453 }
3454
3455 if (has_hdec()) {
3456 /* enable hcodec isolation */
3457 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3458 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30);
3459 /* power off hcodec memories */
3460 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3461 /* hcodec power off */
3462 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3463 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
3464 }
3465
3466 if (has_hevc_vdec()) {
3467 /* enable hevc isolation */
3468 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3469 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc00);
3470 /* power off hevc memories */
3471 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
3472 /* hevc power off */
3473 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3474 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc0);
3475 }
3476 }
3477 EXPORT_SYMBOL(vdec_power_reset);
3478
vdec_poweron(enum vdec_type_e core)3479 void vdec_poweron(enum vdec_type_e core)
3480 {
3481 void *decomp_addr = NULL;
3482 dma_addr_t decomp_dma_addr;
3483 u32 decomp_addr_aligned = 0;
3484 int hevc_loop = 0;
3485 int sleep_val, iso_val;
3486 bool is_power_ctrl_ver2 = false;
3487
3488 if (core >= VDEC_MAX)
3489 return;
3490
3491 mutex_lock(&vdec_mutex);
3492
3493 vdec_core->power_ref_count[core]++;
3494 if (vdec_core->power_ref_count[core] > 1) {
3495 mutex_unlock(&vdec_mutex);
3496 return;
3497 }
3498
3499 if (vdec_on(core)) {
3500 mutex_unlock(&vdec_mutex);
3501 return;
3502 }
3503
3504 is_power_ctrl_ver2 =
3505 ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3506 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false;
3507
3508 if (hevc_workaround_needed() &&
3509 (core == VDEC_HEVC)) {
3510 decomp_addr = codec_mm_dma_alloc_coherent(MEM_NAME,
3511 SZ_64K + SZ_4K, &decomp_dma_addr, GFP_KERNEL, 0);
3512
3513 if (decomp_addr) {
3514 decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K);
3515 memset((u8 *)decomp_addr +
3516 (decomp_addr_aligned - decomp_dma_addr),
3517 0xff, SZ_4K);
3518 } else
3519 pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n");
3520 }
3521
3522 if (core == VDEC_1) {
3523 sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc;
3524 iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0;
3525
3526 /* vdec1 power on */
3527 #ifdef CONFIG_AMLOGIC_POWER
3528 if (is_support_power_ctrl()) {
3529 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3530 mutex_unlock(&vdec_mutex);
3531 pr_err("vdec-1 power on ctrl sleep fail.\n");
3532 return;
3533 }
3534 } else {
3535 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3536 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3537 }
3538 #else
3539 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3540 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3541 #endif
3542 /* wait 10uS */
3543 udelay(10);
3544 /* vdec1 soft reset */
3545 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3546 WRITE_VREG(DOS_SW_RESET0, 0);
3547 /* enable vdec1 clock */
3548 /*
3549 *add power on vdec clock level setting,only for m8 chip,
3550 * m8baby and m8m2 can dynamic adjust vdec clock,
3551 * power on with default clock level
3552 */
3553 amports_switch_gate("clk_vdec_mux", 1);
3554 vdec_clock_hi_enable();
3555 /* power up vdec memories */
3556 WRITE_VREG(DOS_MEM_PD_VDEC, 0);
3557
3558 /* remove vdec1 isolation */
3559 #ifdef CONFIG_AMLOGIC_POWER
3560 if (is_support_power_ctrl()) {
3561 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3562 mutex_unlock(&vdec_mutex);
3563 pr_err("vdec-1 power on ctrl iso fail.\n");
3564 return;
3565 }
3566 } else {
3567 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3568 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3569 }
3570 #else
3571 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3572 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3573 #endif
3574 /* reset DOS top registers */
3575 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3576 } else if (core == VDEC_2) {
3577 if (has_vdec2()) {
3578 /* vdec2 power on */
3579 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3580 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3581 ~0x30);
3582 /* wait 10uS */
3583 udelay(10);
3584 /* vdec2 soft reset */
3585 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3586 WRITE_VREG(DOS_SW_RESET2, 0);
3587 /* enable vdec1 clock */
3588 vdec2_clock_hi_enable();
3589 /* power up vdec memories */
3590 WRITE_VREG(DOS_MEM_PD_VDEC2, 0);
3591 /* remove vdec2 isolation */
3592 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3593 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
3594 ~0x300);
3595 /* reset DOS top registers */
3596 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3597 }
3598 } else if (core == VDEC_HCODEC) {
3599 if (has_hdec()) {
3600 sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3;
3601 iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30;
3602
3603 /* hcodec power on */
3604 #ifdef CONFIG_AMLOGIC_POWER
3605 if (is_support_power_ctrl()) {
3606 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3607 mutex_unlock(&vdec_mutex);
3608 pr_err("hcodec power on ctrl sleep fail.\n");
3609 return;
3610 }
3611 } else {
3612 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3613 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3614 }
3615 #else
3616 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3617 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3618 #endif
3619 /* wait 10uS */
3620 udelay(10);
3621 /* hcodec soft reset */
3622 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3623 WRITE_VREG(DOS_SW_RESET1, 0);
3624 /* enable hcodec clock */
3625 hcodec_clock_enable();
3626 /* power up hcodec memories */
3627 WRITE_VREG(DOS_MEM_PD_HCODEC, 0);
3628 /* remove hcodec isolation */
3629 #ifdef CONFIG_AMLOGIC_POWER
3630 if (is_support_power_ctrl()) {
3631 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3632 mutex_unlock(&vdec_mutex);
3633 pr_err("hcodec power on ctrl iso fail.\n");
3634 return;
3635 }
3636 } else {
3637 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3638 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3639 }
3640 #else
3641 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3642 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3643 #endif
3644 }
3645 } else if (core == VDEC_HEVC) {
3646 if (has_hevc_vdec()) {
3647 bool hevc_fixed = false;
3648
3649 sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0;
3650 iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00;
3651
3652 while (!hevc_fixed) {
3653 /* hevc power on */
3654 #ifdef CONFIG_AMLOGIC_POWER
3655 if (is_support_power_ctrl()) {
3656 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3657 mutex_unlock(&vdec_mutex);
3658 pr_err("hevc power on ctrl sleep fail.\n");
3659 return;
3660 }
3661 } else {
3662 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3663 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3664 }
3665 #else
3666 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3667 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3668 #endif
3669 /* wait 10uS */
3670 udelay(10);
3671 /* hevc soft reset */
3672 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
3673 WRITE_VREG(DOS_SW_RESET3, 0);
3674 /* enable hevc clock */
3675 amports_switch_gate("clk_hevc_mux", 1);
3676 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3677 amports_switch_gate("clk_hevcb_mux", 1);
3678 hevc_clock_hi_enable();
3679 hevc_back_clock_hi_enable();
3680 /* power up hevc memories */
3681 WRITE_VREG(DOS_MEM_PD_HEVC, 0);
3682 /* remove hevc isolation */
3683 #ifdef CONFIG_AMLOGIC_POWER
3684 if (is_support_power_ctrl()) {
3685 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3686 mutex_unlock(&vdec_mutex);
3687 pr_err("hevc power on ctrl iso fail.\n");
3688 return;
3689 }
3690 } else {
3691 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3692 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3693 }
3694 #else
3695 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3696 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3697 #endif
3698 if (!hevc_workaround_needed())
3699 break;
3700
3701 if (decomp_addr)
3702 hevc_fixed = test_hevc(
3703 decomp_addr_aligned, 20);
3704
3705 if (!hevc_fixed) {
3706 hevc_loop++;
3707
3708 mutex_unlock(&vdec_mutex);
3709
3710 if (hevc_loop >= HEVC_TEST_LIMIT) {
3711 pr_warn("hevc power sequence over limit\n");
3712 pr_warn("=====================================================\n");
3713 pr_warn(" This chip is identified to have HW failure.\n");
3714 pr_warn(" Please contact sqa-platform to replace the platform.\n");
3715 pr_warn("=====================================================\n");
3716
3717 panic("Force panic for chip detection !!!\n");
3718
3719 break;
3720 }
3721
3722 vdec_poweroff(VDEC_HEVC);
3723
3724 mdelay(10);
3725
3726 mutex_lock(&vdec_mutex);
3727 }
3728 }
3729
3730 if (hevc_loop > hevc_max_reset_count)
3731 hevc_max_reset_count = hevc_loop;
3732
3733 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
3734 udelay(10);
3735 WRITE_VREG(DOS_SW_RESET3, 0);
3736 }
3737 }
3738
3739 if (decomp_addr)
3740 codec_mm_dma_free_coherent(MEM_NAME,
3741 SZ_64K + SZ_4K, decomp_addr, decomp_dma_addr, 0);
3742
3743 mutex_unlock(&vdec_mutex);
3744 }
3745 EXPORT_SYMBOL(vdec_poweron);
3746
vdec_poweroff(enum vdec_type_e core)3747 void vdec_poweroff(enum vdec_type_e core)
3748 {
3749 int sleep_val, iso_val;
3750 bool is_power_ctrl_ver2 = false;
3751
3752 if (core >= VDEC_MAX)
3753 return;
3754
3755 mutex_lock(&vdec_mutex);
3756
3757 vdec_core->power_ref_count[core]--;
3758 if (vdec_core->power_ref_count[core] > 0) {
3759 mutex_unlock(&vdec_mutex);
3760 return;
3761 }
3762
3763 is_power_ctrl_ver2 =
3764 ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3765 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false;
3766
3767 if (core == VDEC_1) {
3768 sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc;
3769 iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0;
3770
3771 /* enable vdec1 isolation */
3772 #ifdef CONFIG_AMLOGIC_POWER
3773 if (is_support_power_ctrl()) {
3774 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3775 mutex_unlock(&vdec_mutex);
3776 pr_err("vdec-1 power off ctrl iso fail.\n");
3777 return;
3778 }
3779 } else {
3780 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3781 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3782 }
3783 #else
3784 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3785 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3786 #endif
3787 /* power off vdec1 memories */
3788 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3789 /* disable vdec1 clock */
3790 vdec_clock_off();
3791 /* vdec1 power off */
3792 #ifdef CONFIG_AMLOGIC_POWER
3793 if (is_support_power_ctrl()) {
3794 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3795 mutex_unlock(&vdec_mutex);
3796 pr_err("vdec-1 power off ctrl sleep fail.\n");
3797 return;
3798 }
3799 } else {
3800 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3801 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3802 }
3803 #else
3804 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3805 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3806 #endif
3807 } else if (core == VDEC_2) {
3808 if (has_vdec2()) {
3809 /* enable vdec2 isolation */
3810 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3811 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
3812 0x300);
3813 /* power off vdec2 memories */
3814 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3815 /* disable vdec2 clock */
3816 vdec2_clock_off();
3817 /* vdec2 power off */
3818 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3819 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
3820 0x30);
3821 }
3822 } else if (core == VDEC_HCODEC) {
3823 if (has_hdec()) {
3824 sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3;
3825 iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30;
3826
3827 /* enable hcodec isolation */
3828 #ifdef CONFIG_AMLOGIC_POWER
3829 if (is_support_power_ctrl()) {
3830 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3831 mutex_unlock(&vdec_mutex);
3832 pr_err("hcodec power off ctrl iso fail.\n");
3833 return;
3834 }
3835 } else {
3836 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3837 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3838 }
3839 #else
3840 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3841 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3842 #endif
3843 /* power off hcodec memories */
3844 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3845 /* disable hcodec clock */
3846 hcodec_clock_off();
3847 /* hcodec power off */
3848 #ifdef CONFIG_AMLOGIC_POWER
3849 if (is_support_power_ctrl()) {
3850 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3851 mutex_unlock(&vdec_mutex);
3852 pr_err("hcodec power off ctrl sleep fail.\n");
3853 return;
3854 }
3855 } else {
3856 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3857 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3858 }
3859 #else
3860 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3861 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3862 #endif
3863 }
3864 } else if (core == VDEC_HEVC) {
3865 if (has_hevc_vdec()) {
3866 sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0;
3867 iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00;
3868
3869 if (no_powerdown == 0) {
3870 /* enable hevc isolation */
3871 #ifdef CONFIG_AMLOGIC_POWER
3872 if (is_support_power_ctrl()) {
3873 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3874 mutex_unlock(&vdec_mutex);
3875 pr_err("hevc power off ctrl iso fail.\n");
3876 return;
3877 }
3878 } else {
3879 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3880 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3881 }
3882 #else
3883 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3884 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3885 #endif
3886 /* power off hevc memories */
3887 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
3888
3889 /* disable hevc clock */
3890 hevc_clock_off();
3891 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3892 hevc_back_clock_off();
3893
3894 /* hevc power off */
3895 #ifdef CONFIG_AMLOGIC_POWER
3896 if (is_support_power_ctrl()) {
3897 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3898 mutex_unlock(&vdec_mutex);
3899 pr_err("hevc power off ctrl sleep fail.\n");
3900 return;
3901 }
3902 } else {
3903 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3904 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3905 }
3906 #else
3907 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3908 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3909 #endif
3910 } else {
3911 pr_info("!!!!!!!!not power down\n");
3912 hevc_reset_core(NULL);
3913 no_powerdown = 0;
3914 }
3915 }
3916 }
3917 mutex_unlock(&vdec_mutex);
3918 }
3919 EXPORT_SYMBOL(vdec_poweroff);
3920
vdec_on(enum vdec_type_e core)3921 bool vdec_on(enum vdec_type_e core)
3922 {
3923 bool ret = false;
3924
3925 if (core == VDEC_1) {
3926 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3927 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3928 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3929 ? 0x2 : 0xc)) == 0) &&
3930 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100))
3931 ret = true;
3932 } else if (core == VDEC_2) {
3933 if (has_vdec2()) {
3934 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) &&
3935 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100))
3936 ret = true;
3937 }
3938 } else if (core == VDEC_HCODEC) {
3939 if (has_hdec()) {
3940 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3941 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3942 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3943 ? 0x1 : 0x3)) == 0) &&
3944 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000))
3945 ret = true;
3946 }
3947 } else if (core == VDEC_HEVC) {
3948 if (has_hevc_vdec()) {
3949 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3950 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3951 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3952 ? 0x4 : 0xc0)) == 0) &&
3953 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000))
3954 ret = true;
3955 }
3956 }
3957
3958 return ret;
3959 }
3960 EXPORT_SYMBOL(vdec_on);
3961
3962 #elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
vdec_poweron(enum vdec_type_e core)3963 void vdec_poweron(enum vdec_type_e core)
3964 {
3965 ulong flags;
3966
3967 spin_lock_irqsave(&lock, flags);
3968
3969 if (core == VDEC_1) {
3970 /* vdec1 soft reset */
3971 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3972 WRITE_VREG(DOS_SW_RESET0, 0);
3973 /* enable vdec1 clock */
3974 vdec_clock_enable();
3975 /* reset DOS top registers */
3976 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3977 } else if (core == VDEC_2) {
3978 /* vdec2 soft reset */
3979 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3980 WRITE_VREG(DOS_SW_RESET2, 0);
3981 /* enable vdec2 clock */
3982 vdec2_clock_enable();
3983 /* reset DOS top registers */
3984 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3985 } else if (core == VDEC_HCODEC) {
3986 /* hcodec soft reset */
3987 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3988 WRITE_VREG(DOS_SW_RESET1, 0);
3989 /* enable hcodec clock */
3990 hcodec_clock_enable();
3991 }
3992
3993 spin_unlock_irqrestore(&lock, flags);
3994 }
3995
vdec_poweroff(enum vdec_type_e core)3996 void vdec_poweroff(enum vdec_type_e core)
3997 {
3998 ulong flags;
3999
4000 spin_lock_irqsave(&lock, flags);
4001
4002 if (core == VDEC_1) {
4003 /* disable vdec1 clock */
4004 vdec_clock_off();
4005 } else if (core == VDEC_2) {
4006 /* disable vdec2 clock */
4007 vdec2_clock_off();
4008 } else if (core == VDEC_HCODEC) {
4009 /* disable hcodec clock */
4010 hcodec_clock_off();
4011 }
4012
4013 spin_unlock_irqrestore(&lock, flags);
4014 }
4015
vdec_on(enum vdec_type_e core)4016 bool vdec_on(enum vdec_type_e core)
4017 {
4018 bool ret = false;
4019
4020 if (core == VDEC_1) {
4021 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
4022 ret = true;
4023 } else if (core == VDEC_2) {
4024 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
4025 ret = true;
4026 } else if (core == VDEC_HCODEC) {
4027 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
4028 ret = true;
4029 }
4030
4031 return ret;
4032 }
4033 #endif
4034
vdec_source_changed(int format,int width,int height,int fps)4035 int vdec_source_changed(int format, int width, int height, int fps)
4036 {
4037 /* todo: add level routines for clock adjustment per chips */
4038 int ret = -1;
4039 static int on_setting;
4040
4041 if (on_setting > 0)
4042 return ret;/*on changing clk,ignore this change*/
4043
4044 if (vdec_source_get(VDEC_1) == width * height * fps)
4045 return ret;
4046
4047
4048 on_setting = 1;
4049 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
4050 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
4051 width, height, fps, vdec_clk_get(VDEC_1));
4052 on_setting = 0;
4053 return ret;
4054
4055 }
4056 EXPORT_SYMBOL(vdec_source_changed);
4057
vdec_reset_core(struct vdec_s * vdec)4058 void vdec_reset_core(struct vdec_s *vdec)
4059 {
4060 unsigned long flags;
4061 unsigned int mask = 0;
4062
4063 mask = 1 << 13; /*bit13: DOS VDEC interface*/
4064 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
4065 mask = 1 << 21; /*bit21: DOS VDEC interface*/
4066
4067 spin_lock_irqsave(&vdec_spin_lock, flags);
4068 codec_dmcbus_write(DMC_REQ_CTRL,
4069 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
4070 spin_unlock_irqrestore(&vdec_spin_lock, flags);
4071
4072 if (is_cpu_tm2_revb()) {
4073 while (!(codec_dmcbus_read(TM2_REVB_DMC_CHAN_STS)
4074 & mask))
4075 ;
4076 } else {
4077 while (!(codec_dmcbus_read(DMC_CHAN_STS)
4078 & mask))
4079 ;
4080 }
4081 /*
4082 * 2: assist
4083 * 3: vld_reset
4084 * 4: vld_part_reset
4085 * 5: vfifo reset
4086 * 6: iqidct
4087 * 7: mc
4088 * 8: dblk
4089 * 9: pic_dc
4090 * 10: psc
4091 * 11: mcpu
4092 * 12: ccpu
4093 * 13: ddr
4094 * 14: afifo
4095 */
4096 if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
4097 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) {
4098 WRITE_VREG(DOS_SW_RESET0, (1<<3)|(1<<4)|(1<<5)|(1<<7)|(1<<8)|(1<<9));
4099 } else {
4100 WRITE_VREG(DOS_SW_RESET0,
4101 (1<<3)|(1<<4)|(1<<5));
4102 }
4103 WRITE_VREG(DOS_SW_RESET0, 0);
4104
4105 spin_lock_irqsave(&vdec_spin_lock, flags);
4106 codec_dmcbus_write(DMC_REQ_CTRL,
4107 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
4108 spin_unlock_irqrestore(&vdec_spin_lock, flags);
4109 }
4110 EXPORT_SYMBOL(vdec_reset_core);
4111
hevc_mmu_dma_check(struct vdec_s * vdec)4112 void hevc_mmu_dma_check(struct vdec_s *vdec)
4113 {
4114 ulong timeout;
4115 u32 data;
4116 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A)
4117 return;
4118 timeout = jiffies + HZ/100;
4119 while (1) {
4120 data = READ_VREG(HEVC_CM_CORE_STATUS);
4121 if ((data & 0x1) == 0)
4122 break;
4123 if (time_after(jiffies, timeout)) {
4124 if (debug & 0x10)
4125 pr_info(" %s sao mmu dma idle\n", __func__);
4126 break;
4127 }
4128 }
4129 /*disable sao mmu dma */
4130 CLEAR_VREG_MASK(HEVC_SAO_MMU_DMA_CTRL, 1 << 0);
4131 timeout = jiffies + HZ/100;
4132 while (1) {
4133 data = READ_VREG(HEVC_SAO_MMU_DMA_STATUS);
4134 if ((data & 0x1))
4135 break;
4136 if (time_after(jiffies, timeout)) {
4137 if (debug & 0x10)
4138 pr_err("%s sao mmu dma timeout, num_buf_used = 0x%x\n",
4139 __func__, (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16));
4140 break;
4141 }
4142 }
4143 }
4144 EXPORT_SYMBOL(hevc_mmu_dma_check);
4145
hevc_reset_core(struct vdec_s * vdec)4146 void hevc_reset_core(struct vdec_s *vdec)
4147 {
4148 unsigned long flags;
4149 unsigned int mask = 0;
4150
4151 mask = 1 << 4; /*bit4: hevc*/
4152 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
4153 mask |= 1 << 8; /*bit8: hevcb*/
4154
4155 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
4156 spin_lock_irqsave(&vdec_spin_lock, flags);
4157 codec_dmcbus_write(DMC_REQ_CTRL,
4158 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
4159 spin_unlock_irqrestore(&vdec_spin_lock, flags);
4160
4161 if (is_cpu_tm2_revb()) {
4162 while (!(codec_dmcbus_read(TM2_REVB_DMC_CHAN_STS)
4163 & mask))
4164 ;
4165 } else {
4166 while (!(codec_dmcbus_read(DMC_CHAN_STS)
4167 & mask))
4168 ;
4169 }
4170
4171 if (vdec == NULL || input_frame_based(vdec))
4172 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
4173
4174 /*
4175 * 2: assist
4176 * 3: parser
4177 * 4: parser_state
4178 * 8: dblk
4179 * 11:mcpu
4180 * 12:ccpu
4181 * 13:ddr
4182 * 14:iqit
4183 * 15:ipp
4184 * 17:qdct
4185 * 18:mpred
4186 * 19:sao
4187 * 24:hevc_afifo
4188 */
4189 WRITE_VREG(DOS_SW_RESET3,
4190 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
4191 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
4192 (1<<17)|(1<<18)|(1<<19)|(1<<24));
4193
4194 WRITE_VREG(DOS_SW_RESET3, 0);
4195
4196
4197 spin_lock_irqsave(&vdec_spin_lock, flags);
4198 codec_dmcbus_write(DMC_REQ_CTRL,
4199 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
4200 spin_unlock_irqrestore(&vdec_spin_lock, flags);
4201
4202 }
4203 EXPORT_SYMBOL(hevc_reset_core);
4204
vdec2_source_changed(int format,int width,int height,int fps)4205 int vdec2_source_changed(int format, int width, int height, int fps)
4206 {
4207 int ret = -1;
4208 static int on_setting;
4209
4210 if (has_vdec2()) {
4211 /* todo: add level routines for clock adjustment per chips */
4212 if (on_setting != 0)
4213 return ret;/*on changing clk,ignore this change*/
4214
4215 if (vdec_source_get(VDEC_2) == width * height * fps)
4216 return ret;
4217
4218 on_setting = 1;
4219 ret = vdec_source_changed_for_clk_set(format,
4220 width, height, fps);
4221 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
4222 width, height, fps, vdec_clk_get(VDEC_2));
4223 on_setting = 0;
4224 return ret;
4225 }
4226 return 0;
4227 }
4228 EXPORT_SYMBOL(vdec2_source_changed);
4229
hevc_source_changed(int format,int width,int height,int fps)4230 int hevc_source_changed(int format, int width, int height, int fps)
4231 {
4232 /* todo: add level routines for clock adjustment per chips */
4233 int ret = -1;
4234 static int on_setting;
4235
4236 if (on_setting != 0)
4237 return ret;/*on changing clk,ignore this change*/
4238
4239 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
4240 return ret;
4241
4242 on_setting = 1;
4243 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
4244 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
4245 width, height, fps, vdec_clk_get(VDEC_HEVC));
4246 on_setting = 0;
4247
4248 return ret;
4249 }
4250 EXPORT_SYMBOL(hevc_source_changed);
4251
4252 static struct am_reg am_risc[] = {
4253 {"MSP", 0x300},
4254 {"MPSR", 0x301},
4255 {"MCPU_INT_BASE", 0x302},
4256 {"MCPU_INTR_GRP", 0x303},
4257 {"MCPU_INTR_MSK", 0x304},
4258 {"MCPU_INTR_REQ", 0x305},
4259 {"MPC-P", 0x306},
4260 {"MPC-D", 0x307},
4261 {"MPC_E", 0x308},
4262 {"MPC_W", 0x309},
4263 {"CSP", 0x320},
4264 {"CPSR", 0x321},
4265 {"CCPU_INT_BASE", 0x322},
4266 {"CCPU_INTR_GRP", 0x323},
4267 {"CCPU_INTR_MSK", 0x324},
4268 {"CCPU_INTR_REQ", 0x325},
4269 {"CPC-P", 0x326},
4270 {"CPC-D", 0x327},
4271 {"CPC_E", 0x328},
4272 {"CPC_W", 0x329},
4273 {"AV_SCRATCH_0", 0x09c0},
4274 {"AV_SCRATCH_1", 0x09c1},
4275 {"AV_SCRATCH_2", 0x09c2},
4276 {"AV_SCRATCH_3", 0x09c3},
4277 {"AV_SCRATCH_4", 0x09c4},
4278 {"AV_SCRATCH_5", 0x09c5},
4279 {"AV_SCRATCH_6", 0x09c6},
4280 {"AV_SCRATCH_7", 0x09c7},
4281 {"AV_SCRATCH_8", 0x09c8},
4282 {"AV_SCRATCH_9", 0x09c9},
4283 {"AV_SCRATCH_A", 0x09ca},
4284 {"AV_SCRATCH_B", 0x09cb},
4285 {"AV_SCRATCH_C", 0x09cc},
4286 {"AV_SCRATCH_D", 0x09cd},
4287 {"AV_SCRATCH_E", 0x09ce},
4288 {"AV_SCRATCH_F", 0x09cf},
4289 {"AV_SCRATCH_G", 0x09d0},
4290 {"AV_SCRATCH_H", 0x09d1},
4291 {"AV_SCRATCH_I", 0x09d2},
4292 {"AV_SCRATCH_J", 0x09d3},
4293 {"AV_SCRATCH_K", 0x09d4},
4294 {"AV_SCRATCH_L", 0x09d5},
4295 {"AV_SCRATCH_M", 0x09d6},
4296 {"AV_SCRATCH_N", 0x09d7},
4297 };
4298
amrisc_regs_show(struct class * class,struct class_attribute * attr,char * buf)4299 static ssize_t amrisc_regs_show(struct class *class,
4300 struct class_attribute *attr, char *buf)
4301 {
4302 char *pbuf = buf;
4303 struct am_reg *regs = am_risc;
4304 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
4305 int i;
4306 unsigned int val;
4307 ssize_t ret;
4308
4309 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4310 mutex_lock(&vdec_mutex);
4311 if (!vdec_on(VDEC_1)) {
4312 mutex_unlock(&vdec_mutex);
4313 pbuf += sprintf(pbuf, "amrisc is power off\n");
4314 ret = pbuf - buf;
4315 return ret;
4316 }
4317 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4318 /*TODO:M6 define */
4319 /*
4320 * switch_mod_gate_by_type(MOD_VDEC, 1);
4321 */
4322 amports_switch_gate("vdec", 1);
4323 }
4324 pbuf += sprintf(pbuf, "amrisc registers show:\n");
4325 for (i = 0; i < rsize; i++) {
4326 val = READ_VREG(regs[i].offset);
4327 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
4328 regs[i].name, regs[i].offset, val, val);
4329 }
4330 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4331 mutex_unlock(&vdec_mutex);
4332 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4333 /*TODO:M6 define */
4334 /*
4335 * switch_mod_gate_by_type(MOD_VDEC, 0);
4336 */
4337 amports_switch_gate("vdec", 0);
4338 }
4339 ret = pbuf - buf;
4340 return ret;
4341 }
4342
dump_trace_show(struct class * class,struct class_attribute * attr,char * buf)4343 static ssize_t dump_trace_show(struct class *class,
4344 struct class_attribute *attr, char *buf)
4345 {
4346 int i;
4347 char *pbuf = buf;
4348 ssize_t ret;
4349 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
4350
4351 if (!trace_buf) {
4352 pbuf += sprintf(pbuf, "No Memory bug\n");
4353 ret = pbuf - buf;
4354 return ret;
4355 }
4356 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4357 mutex_lock(&vdec_mutex);
4358 if (!vdec_on(VDEC_1)) {
4359 mutex_unlock(&vdec_mutex);
4360 kfree(trace_buf);
4361 pbuf += sprintf(pbuf, "amrisc is power off\n");
4362 ret = pbuf - buf;
4363 return ret;
4364 }
4365 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4366 /*TODO:M6 define */
4367 /*
4368 * switch_mod_gate_by_type(MOD_VDEC, 1);
4369 */
4370 amports_switch_gate("vdec", 1);
4371 }
4372 pr_info("dump trace steps:%d start\n", debug_trace_num);
4373 i = 0;
4374 while (i <= debug_trace_num - 16) {
4375 trace_buf[i] = READ_VREG(MPC_E);
4376 trace_buf[i + 1] = READ_VREG(MPC_E);
4377 trace_buf[i + 2] = READ_VREG(MPC_E);
4378 trace_buf[i + 3] = READ_VREG(MPC_E);
4379 trace_buf[i + 4] = READ_VREG(MPC_E);
4380 trace_buf[i + 5] = READ_VREG(MPC_E);
4381 trace_buf[i + 6] = READ_VREG(MPC_E);
4382 trace_buf[i + 7] = READ_VREG(MPC_E);
4383 trace_buf[i + 8] = READ_VREG(MPC_E);
4384 trace_buf[i + 9] = READ_VREG(MPC_E);
4385 trace_buf[i + 10] = READ_VREG(MPC_E);
4386 trace_buf[i + 11] = READ_VREG(MPC_E);
4387 trace_buf[i + 12] = READ_VREG(MPC_E);
4388 trace_buf[i + 13] = READ_VREG(MPC_E);
4389 trace_buf[i + 14] = READ_VREG(MPC_E);
4390 trace_buf[i + 15] = READ_VREG(MPC_E);
4391 i += 16;
4392 };
4393 pr_info("dump trace steps:%d finished\n", debug_trace_num);
4394 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4395 mutex_unlock(&vdec_mutex);
4396 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4397 /*TODO:M6 define */
4398 /*
4399 * switch_mod_gate_by_type(MOD_VDEC, 0);
4400 */
4401 amports_switch_gate("vdec", 0);
4402 }
4403 for (i = 0; i < debug_trace_num; i++) {
4404 if (i % 4 == 0) {
4405 if (i % 16 == 0)
4406 pbuf += sprintf(pbuf, "\n");
4407 else if (i % 8 == 0)
4408 pbuf += sprintf(pbuf, " ");
4409 else /* 4 */
4410 pbuf += sprintf(pbuf, " ");
4411 }
4412 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
4413 }
4414 while (i < debug_trace_num)
4415 ;
4416 kfree(trace_buf);
4417 pbuf += sprintf(pbuf, "\n");
4418 ret = pbuf - buf;
4419 return ret;
4420 }
4421
clock_level_show(struct class * class,struct class_attribute * attr,char * buf)4422 static ssize_t clock_level_show(struct class *class,
4423 struct class_attribute *attr, char *buf)
4424 {
4425 char *pbuf = buf;
4426 size_t ret;
4427
4428 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
4429
4430 if (has_vdec2())
4431 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
4432
4433 if (has_hevc_vdec())
4434 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
4435
4436 ret = pbuf - buf;
4437 return ret;
4438 }
4439
enable_mvdec_info_show(struct class * cla,struct class_attribute * attr,char * buf)4440 static ssize_t enable_mvdec_info_show(struct class *cla,
4441 struct class_attribute *attr, char *buf)
4442 {
4443 return sprintf(buf, "%d\n", enable_mvdec_info);
4444 }
4445
enable_mvdec_info_store(struct class * cla,struct class_attribute * attr,const char * buf,size_t count)4446 static ssize_t enable_mvdec_info_store(struct class *cla,
4447 struct class_attribute *attr,
4448 const char *buf, size_t count)
4449 {
4450 int r;
4451 int val;
4452
4453 r = kstrtoint(buf, 0, &val);
4454 if (r < 0)
4455 return -EINVAL;
4456 enable_mvdec_info = val;
4457
4458 return count;
4459 }
4460
poweron_clock_level_store(struct class * class,struct class_attribute * attr,const char * buf,size_t size)4461 static ssize_t poweron_clock_level_store(struct class *class,
4462 struct class_attribute *attr,
4463 const char *buf, size_t size)
4464 {
4465 unsigned int val;
4466 ssize_t ret;
4467
4468 /*ret = sscanf(buf, "%d", &val);*/
4469 ret = kstrtoint(buf, 0, &val);
4470
4471 if (ret != 0)
4472 return -EINVAL;
4473 poweron_clock_level = val;
4474 return size;
4475 }
4476
poweron_clock_level_show(struct class * class,struct class_attribute * attr,char * buf)4477 static ssize_t poweron_clock_level_show(struct class *class,
4478 struct class_attribute *attr, char *buf)
4479 {
4480 return sprintf(buf, "%d\n", poweron_clock_level);
4481 }
4482
4483 /*
4484 *if keep_vdec_mem == 1
4485 *always don't release
4486 *vdec 64 memory for fast play.
4487 */
keep_vdec_mem_store(struct class * class,struct class_attribute * attr,const char * buf,size_t size)4488 static ssize_t keep_vdec_mem_store(struct class *class,
4489 struct class_attribute *attr,
4490 const char *buf, size_t size)
4491 {
4492 unsigned int val;
4493 ssize_t ret;
4494
4495 /*ret = sscanf(buf, "%d", &val);*/
4496 ret = kstrtoint(buf, 0, &val);
4497 if (ret != 0)
4498 return -EINVAL;
4499 keep_vdec_mem = val;
4500 return size;
4501 }
4502
keep_vdec_mem_show(struct class * class,struct class_attribute * attr,char * buf)4503 static ssize_t keep_vdec_mem_show(struct class *class,
4504 struct class_attribute *attr, char *buf)
4505 {
4506 return sprintf(buf, "%d\n", keep_vdec_mem);
4507 }
4508
4509
4510 #ifdef VDEC_DEBUG_SUPPORT
debug_store(struct class * class,struct class_attribute * attr,const char * buf,size_t size)4511 static ssize_t debug_store(struct class *class,
4512 struct class_attribute *attr,
4513 const char *buf, size_t size)
4514 {
4515 struct vdec_s *vdec;
4516 struct vdec_core_s *core = vdec_core;
4517 unsigned long flags;
4518
4519 unsigned id;
4520 unsigned val;
4521 ssize_t ret;
4522 char cbuf[32];
4523
4524 cbuf[0] = 0;
4525 ret = sscanf(buf, "%s %x %x", cbuf, &id, &val);
4526 /*pr_info(
4527 "%s(%s)=>ret %ld: %s, %x, %x\n",
4528 __func__, buf, ret, cbuf, id, val);*/
4529 if (strcmp(cbuf, "schedule") == 0) {
4530 pr_info("VDEC_DEBUG: force schedule\n");
4531 up(&core->sem);
4532 } else if (strcmp(cbuf, "power_off") == 0) {
4533 pr_info("VDEC_DEBUG: power off core %d\n", id);
4534 vdec_poweroff(id);
4535 } else if (strcmp(cbuf, "power_on") == 0) {
4536 pr_info("VDEC_DEBUG: power_on core %d\n", id);
4537 vdec_poweron(id);
4538 } else if (strcmp(cbuf, "wr") == 0) {
4539 pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n",
4540 id, val);
4541 WRITE_VREG(id, val);
4542 } else if (strcmp(cbuf, "rd") == 0) {
4543 pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n",
4544 id, READ_VREG(id));
4545 } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) {
4546 pr_info(
4547 "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n",
4548 READ_HHI_REG(HHI_VDEC4_CLK_CNTL),
4549 READ_HHI_REG(HHI_VDEC2_CLK_CNTL));
4550 }
4551
4552 flags = vdec_core_lock(vdec_core);
4553
4554 list_for_each_entry(vdec,
4555 &core->connected_vdec_list, list) {
4556 pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id);
4557 if (((vdec->status == VDEC_STATUS_CONNECTED
4558 || vdec->status == VDEC_STATUS_ACTIVE)) &&
4559 (vdec->id == id)) {
4560 /*to add*/
4561 break;
4562 }
4563 }
4564 vdec_core_unlock(vdec_core, flags);
4565 return size;
4566 }
4567
debug_show(struct class * class,struct class_attribute * attr,char * buf)4568 static ssize_t debug_show(struct class *class,
4569 struct class_attribute *attr, char *buf)
4570 {
4571 char *pbuf = buf;
4572 struct vdec_s *vdec;
4573 struct vdec_core_s *core = vdec_core;
4574 unsigned long flags = vdec_core_lock(vdec_core);
4575 u64 tmp;
4576
4577 pbuf += sprintf(pbuf,
4578 "============== help:\n");
4579 pbuf += sprintf(pbuf,
4580 "'echo xxx > debug' usuage:\n");
4581 pbuf += sprintf(pbuf,
4582 "schedule - trigger schedule thread to run\n");
4583 pbuf += sprintf(pbuf,
4584 "power_off core_num - call vdec_poweroff(core_num)\n");
4585 pbuf += sprintf(pbuf,
4586 "power_on core_num - call vdec_poweron(core_num)\n");
4587 pbuf += sprintf(pbuf,
4588 "wr adr val - call WRITE_VREG(adr, val)\n");
4589 pbuf += sprintf(pbuf,
4590 "rd adr - call READ_VREG(adr)\n");
4591 pbuf += sprintf(pbuf,
4592 "read_hevc_clk_reg - read HHI register for hevc clk\n");
4593 pbuf += sprintf(pbuf,
4594 "===================\n");
4595
4596 pbuf += sprintf(pbuf,
4597 "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n");
4598 list_for_each_entry(vdec,
4599 &core->connected_vdec_list, list) {
4600 enum vdec_type_e type;
4601 if ((vdec->status == VDEC_STATUS_CONNECTED
4602 || vdec->status == VDEC_STATUS_ACTIVE)) {
4603 for (type = VDEC_1; type < VDEC_MAX; type++) {
4604 if (vdec->core_mask & (1 << type)) {
4605 pbuf += sprintf(pbuf, "%s(%d):",
4606 vdec->vf_provider_name, type);
4607 pbuf += sprintf(pbuf, "\t%d",
4608 vdec->check_count[type]);
4609 pbuf += sprintf(pbuf, "\t%d",
4610 vdec->run_count[type]);
4611 pbuf += sprintf(pbuf, "\t%d",
4612 vdec->input_underrun_count[type]);
4613 pbuf += sprintf(pbuf, "\t%d",
4614 vdec->not_run_ready_count[type]);
4615 tmp = vdec->run_clk[type] * 100;
4616 do_div(tmp, vdec->total_clk[type]);
4617 pbuf += sprintf(pbuf,
4618 "\t%d%%\n",
4619 vdec->total_clk[type] == 0 ? 0 :
4620 (u32)tmp);
4621 }
4622 }
4623 }
4624 }
4625
4626 vdec_core_unlock(vdec_core, flags);
4627 return pbuf - buf;
4628
4629 }
4630 #endif
4631
stream_buffer_status_show(char * buf,int (* callback)(struct stream_buf_s *,char *))4632 int stream_buffer_status_show(char *buf,
4633 int (*callback) (struct stream_buf_s *, char *))
4634 {
4635 char *pbuf = buf;
4636 struct vdec_s *vdec;
4637 struct vdec_core_s *core = vdec_core;
4638 u64 flags = vdec_core_lock(vdec_core);
4639
4640 list_for_each_entry(vdec,
4641 &core->connected_vdec_list, list) {
4642 if ((vdec->status == VDEC_STATUS_CONNECTED
4643 || vdec->status == VDEC_STATUS_ACTIVE)) {
4644 if (vdec_frame_based(vdec))
4645 continue;
4646 pbuf += callback(&vdec->vbuf, pbuf);
4647 }
4648 }
4649 vdec_core_unlock(vdec_core, flags);
4650
4651 return pbuf - buf;
4652 }
4653 EXPORT_SYMBOL(stream_buffer_status_show);
4654
vfm_path_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)4655 static ssize_t vfm_path_store(struct class *class,
4656 struct class_attribute *attr,
4657 const char *buf, size_t count)
4658 {
4659 char *buf_dup, *ps, *token;
4660 char str[VDEC_MAP_NAME_SIZE] = "\0";
4661 bool found = false;
4662 int i;
4663
4664 if (strlen(buf) >= VDEC_MAP_NAME_SIZE) {
4665 pr_info("parameter is overflow\n");
4666 return -1;
4667 }
4668
4669 buf_dup = kstrdup(buf, GFP_KERNEL);
4670 ps = buf_dup;
4671 while (1) {
4672 token = strsep(&ps, "\n ");
4673 if (token == NULL)
4674 break;
4675 if (*token == '\0')
4676 continue;
4677
4678 for (i = 0; strcmp("reserved", vfm_path_node[i]) != 0; i++) {
4679 if (!strncmp (vfm_path_node[i], token, strlen(vfm_path_node[i]))) {
4680 break;
4681 }
4682 }
4683
4684 if (strcmp("reserved", vfm_path_node[i]) == 0 ||
4685 strncmp("help", buf, strlen("help")) == 0) {
4686 if (strncmp("help", buf, strlen("help")) != 0) {
4687 pr_info("warnning! Input parameter is invalid. set failed!\n");
4688 }
4689 pr_info("\nusage for example: \n");
4690 pr_info("echo help > /sys/class/vdec/vfm_path \n");
4691 pr_info("echo disable > /sys/class/vdec/vfm_path \n");
4692 pr_info("echo amlvideo ppmgr amvideo > /sys/class/vdec/vfm_path \n");
4693 found = false;
4694
4695 break;
4696 } else {
4697 strcat(str, vfm_path_node[i]);
4698 strcat(str, " ");
4699 found = true;
4700 }
4701 }
4702
4703 if (found == true) {
4704 memset(vfm_path, 0, sizeof(vfm_path));
4705 strncpy(vfm_path, str, strlen(str));
4706 vfm_path[VDEC_MAP_NAME_SIZE - 1] = '\0';
4707 pr_info("cfg path success: decoder %s\n", vfm_path);
4708 }
4709 kfree(buf_dup);
4710
4711 return count;
4712 }
4713
vfm_path_show(struct class * class,struct class_attribute * attr,char * buf)4714 static ssize_t vfm_path_show(struct class *class,
4715 struct class_attribute *attr, char *buf)
4716 {
4717 int len = 0;
4718 int i;
4719 len += sprintf(buf + len, "cfg vfm path: decoder %s\n", vfm_path);
4720 len += sprintf(buf + len, "\nvfm path node list: \n");
4721 for (i = 0; strcmp("reserved", vfm_path_node[i]) != 0; i++) {
4722 len += sprintf(buf + len, "\t%s \n", vfm_path_node[i]);
4723 }
4724
4725 return len;
4726 }
4727
4728 /*irq num as same as .dts*/
4729 /*
4730 * interrupts = <0 3 1
4731 * 0 23 1
4732 * 0 32 1
4733 * 0 43 1
4734 * 0 44 1
4735 * 0 45 1>;
4736 * interrupt-names = "vsync",
4737 * "demux",
4738 * "parser",
4739 * "mailbox_0",
4740 * "mailbox_1",
4741 * "mailbox_2";
4742 */
vdec_request_threaded_irq(enum vdec_irq_num num,irq_handler_t handler,irq_handler_t thread_fn,unsigned long irqflags,const char * devname,void * dev)4743 s32 vdec_request_threaded_irq(enum vdec_irq_num num,
4744 irq_handler_t handler,
4745 irq_handler_t thread_fn,
4746 unsigned long irqflags,
4747 const char *devname, void *dev)
4748 {
4749 s32 res_irq;
4750 s32 ret = 0;
4751
4752 if (num >= VDEC_IRQ_MAX) {
4753 pr_err("[%s] request irq error, irq num too big!", __func__);
4754 return -EINVAL;
4755 }
4756
4757 if (vdec_core->isr_context[num].irq < 0) {
4758 res_irq = platform_get_irq(
4759 vdec_core->vdec_core_platform_device, num);
4760 if (res_irq < 0) {
4761 pr_err("[%s] get irq error!", __func__);
4762 return -EINVAL;
4763 }
4764
4765 vdec_core->isr_context[num].irq = res_irq;
4766 vdec_core->isr_context[num].dev_isr = handler;
4767 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
4768 vdec_core->isr_context[num].dev_id = dev;
4769
4770 ret = request_threaded_irq(res_irq,
4771 vdec_isr,
4772 vdec_thread_isr,
4773 (thread_fn) ? IRQF_ONESHOT : irqflags,
4774 devname,
4775 &vdec_core->isr_context[num]);
4776
4777 if (ret) {
4778 vdec_core->isr_context[num].irq = -1;
4779 vdec_core->isr_context[num].dev_isr = NULL;
4780 vdec_core->isr_context[num].dev_threaded_isr = NULL;
4781 vdec_core->isr_context[num].dev_id = NULL;
4782
4783 pr_err("vdec irq register error for %s.\n", devname);
4784 return -EIO;
4785 }
4786 } else {
4787 vdec_core->isr_context[num].dev_isr = handler;
4788 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
4789 vdec_core->isr_context[num].dev_id = dev;
4790 }
4791
4792 return ret;
4793 }
4794 EXPORT_SYMBOL(vdec_request_threaded_irq);
4795
vdec_request_irq(enum vdec_irq_num num,irq_handler_t handler,const char * devname,void * dev)4796 s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
4797 const char *devname, void *dev)
4798 {
4799 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
4800
4801 return vdec_request_threaded_irq(num,
4802 handler,
4803 NULL,/*no thread_fn*/
4804 IRQF_SHARED,
4805 devname,
4806 dev);
4807 }
4808 EXPORT_SYMBOL(vdec_request_irq);
4809
vdec_free_irq(enum vdec_irq_num num,void * dev)4810 void vdec_free_irq(enum vdec_irq_num num, void *dev)
4811 {
4812 if (num >= VDEC_IRQ_MAX) {
4813 pr_err("[%s] request irq error, irq num too big!", __func__);
4814 return;
4815 }
4816 /*
4817 *assume amrisc is stopped already and there is no mailbox interrupt
4818 * when we reset pointers here.
4819 */
4820 vdec_core->isr_context[num].dev_isr = NULL;
4821 vdec_core->isr_context[num].dev_threaded_isr = NULL;
4822 vdec_core->isr_context[num].dev_id = NULL;
4823 synchronize_irq(vdec_core->isr_context[num].irq);
4824 }
4825 EXPORT_SYMBOL(vdec_free_irq);
4826
vdec_get_default_vdec_for_userdata(void)4827 struct vdec_s *vdec_get_default_vdec_for_userdata(void)
4828 {
4829 struct vdec_s *vdec;
4830 struct vdec_s *ret_vdec;
4831 struct vdec_core_s *core = vdec_core;
4832 unsigned long flags;
4833 int id;
4834
4835 flags = vdec_core_lock(vdec_core);
4836
4837 id = 0x10000000;
4838 ret_vdec = NULL;
4839 if (!list_empty(&core->connected_vdec_list)) {
4840 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4841 if (vdec->id < id) {
4842 id = vdec->id;
4843 ret_vdec = vdec;
4844 }
4845 }
4846 }
4847
4848 vdec_core_unlock(vdec_core, flags);
4849
4850 return ret_vdec;
4851 }
4852 EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata);
4853
vdec_get_vdec_by_id(int vdec_id)4854 struct vdec_s *vdec_get_vdec_by_id(int vdec_id)
4855 {
4856 struct vdec_s *vdec;
4857 struct vdec_s *ret_vdec;
4858 struct vdec_core_s *core = vdec_core;
4859 unsigned long flags;
4860
4861 flags = vdec_core_lock(vdec_core);
4862
4863 ret_vdec = NULL;
4864 if (!list_empty(&core->connected_vdec_list)) {
4865 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4866 if (vdec->id == vdec_id) {
4867 ret_vdec = vdec;
4868 break;
4869 }
4870 }
4871 }
4872
4873 vdec_core_unlock(vdec_core, flags);
4874
4875 return ret_vdec;
4876 }
4877 EXPORT_SYMBOL(vdec_get_vdec_by_id);
4878
vdec_read_user_data(struct vdec_s * vdec,struct userdata_param_t * p_userdata_param)4879 int vdec_read_user_data(struct vdec_s *vdec,
4880 struct userdata_param_t *p_userdata_param)
4881 {
4882 int ret = 0;
4883
4884 if (!vdec)
4885 vdec = vdec_get_default_vdec_for_userdata();
4886
4887 if (vdec) {
4888 if (vdec->user_data_read)
4889 ret = vdec->user_data_read(vdec, p_userdata_param);
4890 }
4891 return ret;
4892 }
4893 EXPORT_SYMBOL(vdec_read_user_data);
4894
vdec_wakeup_userdata_poll(struct vdec_s * vdec)4895 int vdec_wakeup_userdata_poll(struct vdec_s *vdec)
4896 {
4897 if (vdec) {
4898 if (vdec->wakeup_userdata_poll)
4899 vdec->wakeup_userdata_poll(vdec);
4900 }
4901
4902 return 0;
4903 }
4904 EXPORT_SYMBOL(vdec_wakeup_userdata_poll);
4905
vdec_reset_userdata_fifo(struct vdec_s * vdec,int bInit)4906 void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit)
4907 {
4908 if (!vdec)
4909 vdec = vdec_get_default_vdec_for_userdata();
4910
4911 if (vdec) {
4912 if (vdec->reset_userdata_fifo)
4913 vdec->reset_userdata_fifo(vdec, bInit);
4914 }
4915 }
4916 EXPORT_SYMBOL(vdec_reset_userdata_fifo);
4917
4918 static int dump_mode;
dump_risc_mem_store(struct class * class,struct class_attribute * attr,const char * buf,size_t size)4919 static ssize_t dump_risc_mem_store(struct class *class,
4920 struct class_attribute *attr,
4921 const char *buf, size_t size)/*set*/
4922 {
4923 unsigned int val;
4924 ssize_t ret;
4925 char dump_mode_str[4] = "PRL";
4926
4927 /*ret = sscanf(buf, "%d", &val);*/
4928 ret = kstrtoint(buf, 0, &val);
4929
4930 if (ret != 0)
4931 return -EINVAL;
4932 dump_mode = val & 0x3;
4933 pr_info("set dump mode to %d,%c_mem\n",
4934 dump_mode, dump_mode_str[dump_mode]);
4935 return size;
4936 }
read_amrisc_reg(int reg)4937 static u32 read_amrisc_reg(int reg)
4938 {
4939 WRITE_VREG(0x31b, reg);
4940 return READ_VREG(0x31c);
4941 }
4942
dump_pmem(void)4943 static void dump_pmem(void)
4944 {
4945 int i;
4946
4947 WRITE_VREG(0x301, 0x8000);
4948 WRITE_VREG(0x31d, 0);
4949 pr_info("start dump amrisc pmem of risc\n");
4950 for (i = 0; i < 0xfff; i++) {
4951 /*same as .o format*/
4952 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
4953 }
4954 }
4955
dump_lmem(void)4956 static void dump_lmem(void)
4957 {
4958 int i;
4959
4960 WRITE_VREG(0x301, 0x8000);
4961 WRITE_VREG(0x31d, 2);
4962 pr_info("start dump amrisc lmem\n");
4963 for (i = 0; i < 0x3ff; i++) {
4964 /*same as */
4965 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
4966 }
4967 }
4968
dump_risc_mem_show(struct class * class,struct class_attribute * attr,char * buf)4969 static ssize_t dump_risc_mem_show(struct class *class,
4970 struct class_attribute *attr, char *buf)
4971 {
4972 char *pbuf = buf;
4973 int ret;
4974
4975 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4976 mutex_lock(&vdec_mutex);
4977 if (!vdec_on(VDEC_1)) {
4978 mutex_unlock(&vdec_mutex);
4979 pbuf += sprintf(pbuf, "amrisc is power off\n");
4980 ret = pbuf - buf;
4981 return ret;
4982 }
4983 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4984 /*TODO:M6 define */
4985 /*
4986 * switch_mod_gate_by_type(MOD_VDEC, 1);
4987 */
4988 amports_switch_gate("vdec", 1);
4989 }
4990 /*start do**/
4991 switch (dump_mode) {
4992 case 0:
4993 dump_pmem();
4994 break;
4995 case 2:
4996 dump_lmem();
4997 break;
4998 default:
4999 break;
5000 }
5001
5002 /*done*/
5003 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
5004 mutex_unlock(&vdec_mutex);
5005 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
5006 /*TODO:M6 define */
5007 /*
5008 * switch_mod_gate_by_type(MOD_VDEC, 0);
5009 */
5010 amports_switch_gate("vdec", 0);
5011 }
5012 return sprintf(buf, "done\n");
5013 }
5014
core_show(struct class * class,struct class_attribute * attr,char * buf)5015 static ssize_t core_show(struct class *class, struct class_attribute *attr,
5016 char *buf)
5017 {
5018 struct vdec_core_s *core = vdec_core;
5019 char *pbuf = buf;
5020 unsigned long flags = vdec_core_lock(vdec_core);
5021
5022 if (list_empty(&core->connected_vdec_list))
5023 pbuf += sprintf(pbuf, "connected vdec list empty\n");
5024 else {
5025 struct vdec_s *vdec;
5026
5027 pbuf += sprintf(pbuf,
5028 " Core: last_sched %p, sched_mask %lx\n",
5029 core->last_vdec,
5030 core->sched_mask);
5031
5032 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
5033 pbuf += sprintf(pbuf,
5034 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n",
5035 vdec->id,
5036 vdec,
5037 vdec_device_name[vdec->format * 2],
5038 vdec_status_str(vdec),
5039 vdec_type_str(vdec),
5040 vdec->active_mask);
5041 }
5042 }
5043
5044 vdec_core_unlock(vdec_core, flags);
5045 return pbuf - buf;
5046 }
5047
vdec_status_show(struct class * class,struct class_attribute * attr,char * buf)5048 static ssize_t vdec_status_show(struct class *class,
5049 struct class_attribute *attr, char *buf)
5050 {
5051 char *pbuf = buf;
5052 struct vdec_s *vdec;
5053 struct vdec_info vs;
5054 unsigned char vdec_num = 0;
5055 struct vdec_core_s *core = vdec_core;
5056 unsigned long flags = vdec_core_lock(vdec_core);
5057
5058 if (list_empty(&core->connected_vdec_list)) {
5059 pbuf += sprintf(pbuf, "No vdec.\n");
5060 goto out;
5061 }
5062
5063 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
5064 if ((vdec->status == VDEC_STATUS_CONNECTED
5065 || vdec->status == VDEC_STATUS_ACTIVE)) {
5066 memset(&vs, 0, sizeof(vs));
5067 if (vdec_status(vdec, &vs)) {
5068 pbuf += sprintf(pbuf, "err.\n");
5069 goto out;
5070 }
5071 pbuf += sprintf(pbuf,
5072 "vdec channel %u statistics:\n",
5073 vdec_num);
5074 pbuf += sprintf(pbuf,
5075 "%13s : %s\n", "device name",
5076 vs.vdec_name);
5077 pbuf += sprintf(pbuf,
5078 "%13s : %u\n", "frame width",
5079 vs.frame_width);
5080 pbuf += sprintf(pbuf,
5081 "%13s : %u\n", "frame height",
5082 vs.frame_height);
5083 pbuf += sprintf(pbuf,
5084 "%13s : %u %s\n", "frame rate",
5085 vs.frame_rate, "fps");
5086 pbuf += sprintf(pbuf,
5087 "%13s : %u %s\n", "bit rate",
5088 vs.bit_rate / 1024 * 8, "kbps");
5089 pbuf += sprintf(pbuf,
5090 "%13s : %u\n", "status",
5091 vs.status);
5092 pbuf += sprintf(pbuf,
5093 "%13s : %u\n", "frame dur",
5094 vs.frame_dur);
5095 pbuf += sprintf(pbuf,
5096 "%13s : %u %s\n", "frame data",
5097 vs.frame_data / 1024, "KB");
5098 pbuf += sprintf(pbuf,
5099 "%13s : %u\n", "frame count",
5100 vs.frame_count);
5101 pbuf += sprintf(pbuf,
5102 "%13s : %u\n", "drop count",
5103 vs.drop_frame_count);
5104 pbuf += sprintf(pbuf,
5105 "%13s : %u\n", "fra err count",
5106 vs.error_frame_count);
5107 pbuf += sprintf(pbuf,
5108 "%13s : %u\n", "hw err count",
5109 vs.error_count);
5110 pbuf += sprintf(pbuf,
5111 "%13s : %llu %s\n", "total data",
5112 vs.total_data / 1024, "KB");
5113 pbuf += sprintf(pbuf,
5114 "%13s : %x\n\n", "ratio_control",
5115 vs.ratio_control);
5116
5117 vdec_num++;
5118 }
5119 }
5120 out:
5121 vdec_core_unlock(vdec_core, flags);
5122 return pbuf - buf;
5123 }
5124
dump_vdec_blocks_show(struct class * class,struct class_attribute * attr,char * buf)5125 static ssize_t dump_vdec_blocks_show(struct class *class,
5126 struct class_attribute *attr, char *buf)
5127 {
5128 struct vdec_core_s *core = vdec_core;
5129 char *pbuf = buf;
5130 unsigned long flags = vdec_core_lock(vdec_core);
5131
5132 if (list_empty(&core->connected_vdec_list))
5133 pbuf += sprintf(pbuf, "connected vdec list empty\n");
5134 else {
5135 struct vdec_s *vdec;
5136 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
5137 pbuf += vdec_input_dump_blocks(&vdec->input,
5138 pbuf, PAGE_SIZE - (pbuf - buf));
5139 }
5140 }
5141 vdec_core_unlock(vdec_core, flags);
5142
5143 return pbuf - buf;
5144 }
dump_vdec_chunks_show(struct class * class,struct class_attribute * attr,char * buf)5145 static ssize_t dump_vdec_chunks_show(struct class *class,
5146 struct class_attribute *attr, char *buf)
5147 {
5148 struct vdec_core_s *core = vdec_core;
5149 char *pbuf = buf;
5150 unsigned long flags = vdec_core_lock(vdec_core);
5151
5152 if (list_empty(&core->connected_vdec_list))
5153 pbuf += sprintf(pbuf, "connected vdec list empty\n");
5154 else {
5155 struct vdec_s *vdec;
5156 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
5157 pbuf += vdec_input_dump_chunks(vdec->id, &vdec->input,
5158 pbuf, PAGE_SIZE - (pbuf - buf));
5159 }
5160 }
5161 vdec_core_unlock(vdec_core, flags);
5162
5163 return pbuf - buf;
5164 }
5165
dump_decoder_state_show(struct class * class,struct class_attribute * attr,char * buf)5166 static ssize_t dump_decoder_state_show(struct class *class,
5167 struct class_attribute *attr, char *buf)
5168 {
5169 char *pbuf = buf;
5170 struct vdec_s *vdec;
5171 struct vdec_core_s *core = vdec_core;
5172 unsigned long flags = vdec_core_lock(vdec_core);
5173
5174 if (list_empty(&core->connected_vdec_list)) {
5175 pbuf += sprintf(pbuf, "No vdec.\n");
5176 } else {
5177 list_for_each_entry(vdec,
5178 &core->connected_vdec_list, list) {
5179 if ((vdec->status == VDEC_STATUS_CONNECTED
5180 || vdec->status == VDEC_STATUS_ACTIVE)
5181 && vdec->dump_state)
5182 vdec->dump_state(vdec);
5183 }
5184 }
5185 vdec_core_unlock(vdec_core, flags);
5186
5187 return pbuf - buf;
5188 }
5189
dump_fps_show(struct class * class,struct class_attribute * attr,char * buf)5190 static ssize_t dump_fps_show(struct class *class,
5191 struct class_attribute *attr, char *buf)
5192 {
5193 char *pbuf = buf;
5194 struct vdec_core_s *core = vdec_core;
5195 int i;
5196
5197 unsigned long flags = vdec_fps_lock(vdec_core);
5198 for (i = 0; i < MAX_INSTANCE_MUN; i++)
5199 pbuf += sprintf(pbuf, "%d ", core->decode_fps[i].fps);
5200
5201 pbuf += sprintf(pbuf, "\n");
5202 vdec_fps_unlock(vdec_core, flags);
5203
5204 return pbuf - buf;
5205 }
5206
5207
5208 static CLASS_ATTR_RO(amrisc_regs);
5209 static CLASS_ATTR_RO(dump_trace);
5210 static CLASS_ATTR_RO(clock_level);
5211 static CLASS_ATTR_RW(enable_mvdec_info);
5212 static CLASS_ATTR_RW(poweron_clock_level);
5213 static CLASS_ATTR_RW(dump_risc_mem);
5214 static CLASS_ATTR_RW(keep_vdec_mem);
5215 static CLASS_ATTR_RO(core);
5216 static CLASS_ATTR_RO(vdec_status);
5217 static CLASS_ATTR_RO(dump_vdec_blocks);
5218 static CLASS_ATTR_RO(dump_vdec_chunks);
5219 static CLASS_ATTR_RO(dump_decoder_state);
5220 #ifdef VDEC_DEBUG_SUPPORT
5221 static CLASS_ATTR_RW(debug);
5222 #endif
5223 #ifdef FRAME_CHECK
5224 static CLASS_ATTR_RW(dump_yuv);
5225 static CLASS_ATTR_RW(frame_check);
5226 #endif
5227 static CLASS_ATTR_RO(dump_fps);
5228 static CLASS_ATTR_RW(vfm_path);
5229
5230 static struct attribute *vdec_class_attrs[] = {
5231 &class_attr_amrisc_regs.attr,
5232 &class_attr_dump_trace.attr,
5233 &class_attr_clock_level.attr,
5234 &class_attr_enable_mvdec_info.attr,
5235 &class_attr_poweron_clock_level.attr,
5236 &class_attr_dump_risc_mem.attr,
5237 &class_attr_keep_vdec_mem.attr,
5238 &class_attr_core.attr,
5239 &class_attr_vdec_status.attr,
5240 &class_attr_dump_vdec_blocks.attr,
5241 &class_attr_dump_vdec_chunks.attr,
5242 &class_attr_dump_decoder_state.attr,
5243 #ifdef VDEC_DEBUG_SUPPORT
5244 &class_attr_debug.attr,
5245 #endif
5246 #ifdef FRAME_CHECK
5247 &class_attr_dump_yuv.attr,
5248 &class_attr_frame_check.attr,
5249 #endif
5250 &class_attr_dump_fps.attr,
5251 &class_attr_vfm_path.attr,
5252 NULL
5253 };
5254
5255 ATTRIBUTE_GROUPS(vdec_class);
5256
5257 static struct class vdec_class = {
5258 .name = "vdec",
5259 .class_groups = vdec_class_groups,
5260 };
5261
get_vdec_device(void)5262 struct device *get_vdec_device(void)
5263 {
5264 return &vdec_core->vdec_core_platform_device->dev;
5265 }
5266 EXPORT_SYMBOL(get_vdec_device);
5267
vdec_probe(struct platform_device * pdev)5268 static int vdec_probe(struct platform_device *pdev)
5269 {
5270 s32 i, r;
5271
5272 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
5273 sizeof(struct vdec_core_s), GFP_KERNEL);
5274 if (vdec_core == NULL) {
5275 pr_err("vdec core allocation failed.\n");
5276 return -ENOMEM;
5277 }
5278
5279 atomic_set(&vdec_core->vdec_nr, 0);
5280 sema_init(&vdec_core->sem, 1);
5281
5282 r = class_register(&vdec_class);
5283 if (r) {
5284 pr_info("vdec class create fail.\n");
5285 return r;
5286 }
5287
5288 vdec_core->vdec_core_platform_device = pdev;
5289
5290 platform_set_drvdata(pdev, vdec_core);
5291
5292 for (i = 0; i < VDEC_IRQ_MAX; i++) {
5293 vdec_core->isr_context[i].index = i;
5294 vdec_core->isr_context[i].irq = -1;
5295 }
5296
5297 r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL,
5298 IRQF_ONESHOT, "vdec-0", NULL);
5299 if (r < 0) {
5300 pr_err("vdec interrupt request failed\n");
5301 return r;
5302 }
5303
5304 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
5305 IRQF_ONESHOT, "vdec-1", NULL);
5306 if (r < 0) {
5307 pr_err("vdec interrupt request failed\n");
5308 return r;
5309 }
5310 #if 0
5311 if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) {
5312 r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL,
5313 IRQF_ONESHOT, "vdec-hevc_back", NULL);
5314 if (r < 0) {
5315 pr_err("vdec interrupt request failed\n");
5316 return r;
5317 }
5318 }
5319 #endif
5320 r = of_reserved_mem_device_init(&pdev->dev);
5321 if (r == 0)
5322 pr_info("vdec_probe done\n");
5323
5324 vdec_core->cma_dev = &pdev->dev;
5325
5326 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_M8) {
5327 /* default to 250MHz */
5328 vdec_clock_hi_enable();
5329 }
5330
5331 if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) {
5332 /* set vdec dmc request to urgent */
5333 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
5334 }
5335 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
5336 spin_lock_init(&vdec_core->lock);
5337 spin_lock_init(&vdec_core->canvas_lock);
5338 spin_lock_init(&vdec_core->fps_lock);
5339 spin_lock_init(&vdec_core->input_lock);
5340 ida_init(&vdec_core->ida);
5341 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
5342 "vdec-core");
5343
5344 vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",__WQ_LEGACY |
5345 WQ_MEM_RECLAIM |WQ_HIGHPRI/*high priority*/, "vdec-work");
5346 /*work queue priority lower than vdec-core.*/
5347 return 0;
5348 }
5349
vdec_remove(struct platform_device * pdev)5350 static int vdec_remove(struct platform_device *pdev)
5351 {
5352 int i;
5353
5354 for (i = 0; i < VDEC_IRQ_MAX; i++) {
5355 if (vdec_core->isr_context[i].irq >= 0) {
5356 free_irq(vdec_core->isr_context[i].irq,
5357 &vdec_core->isr_context[i]);
5358 vdec_core->isr_context[i].irq = -1;
5359 vdec_core->isr_context[i].dev_isr = NULL;
5360 vdec_core->isr_context[i].dev_threaded_isr = NULL;
5361 vdec_core->isr_context[i].dev_id = NULL;
5362 }
5363 }
5364
5365 kthread_stop(vdec_core->thread);
5366
5367 destroy_workqueue(vdec_core->vdec_core_wq);
5368 class_unregister(&vdec_class);
5369
5370 return 0;
5371 }
5372
5373 static const struct of_device_id amlogic_vdec_dt_match[] = {
5374 {
5375 .compatible = "amlogic, vdec",
5376 },
5377 {},
5378 };
5379
5380 static struct mconfig vdec_configs[] = {
5381 MC_PU32("debug_trace_num", &debug_trace_num),
5382 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
5383 MC_PU32("clk_config", &clk_config),
5384 MC_PI32("step_mode", &step_mode),
5385 MC_PI32("poweron_clock_level", &poweron_clock_level),
5386 };
5387 static struct mconfig_node vdec_node;
5388
5389 static struct platform_driver vdec_driver = {
5390 .probe = vdec_probe,
5391 .remove = vdec_remove,
5392 .driver = {
5393 .name = "vdec",
5394 .of_match_table = amlogic_vdec_dt_match,
5395 }
5396 };
5397
5398 static struct codec_profile_t amvdec_input_profile = {
5399 .name = "vdec_input",
5400 .profile = "drm_framemode"
5401 };
5402
vdec_module_init(void)5403 int vdec_module_init(void)
5404 {
5405 if (platform_driver_register(&vdec_driver)) {
5406 pr_info("failed to register vdec module\n");
5407 return -ENODEV;
5408 }
5409 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
5410 "vdec", vdec_configs, CONFIG_FOR_RW);
5411 vcodec_profile_register(&amvdec_input_profile);
5412 return 0;
5413 }
5414 EXPORT_SYMBOL(vdec_module_init);
5415
vdec_module_exit(void)5416 void vdec_module_exit(void)
5417 {
5418 platform_driver_unregister(&vdec_driver);
5419 }
5420 EXPORT_SYMBOL(vdec_module_exit);
5421
5422 #if 0
5423 static int __init vdec_module_init(void)
5424 {
5425 if (platform_driver_register(&vdec_driver)) {
5426 pr_info("failed to register vdec module\n");
5427 return -ENODEV;
5428 }
5429 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
5430 "vdec", vdec_configs, CONFIG_FOR_RW);
5431 return 0;
5432 }
5433
5434 static void __exit vdec_module_exit(void)
5435 {
5436 platform_driver_unregister(&vdec_driver);
5437 }
5438 #endif
5439
vdec_mem_device_init(struct reserved_mem * rmem,struct device * dev)5440 static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
5441 {
5442 vdec_core->cma_dev = dev;
5443
5444 return 0;
5445 }
5446
5447 static const struct reserved_mem_ops rmem_vdec_ops = {
5448 .device_init = vdec_mem_device_init,
5449 };
5450
vdec_mem_setup(struct reserved_mem * rmem)5451 static int __init vdec_mem_setup(struct reserved_mem *rmem)
5452 {
5453 rmem->ops = &rmem_vdec_ops;
5454 pr_info("vdec: reserved mem setup\n");
5455
5456 return 0;
5457 }
5458
5459
vdec_set_vframe_comm(struct vdec_s * vdec,char * n)5460 void vdec_set_vframe_comm(struct vdec_s *vdec, char *n)
5461 {
5462 struct vdec_frames_s *mvfrm = vdec->mvfrm;
5463
5464 if (!mvfrm)
5465 return;
5466
5467 mvfrm->comm.vdec_id = vdec->id;
5468
5469 snprintf(mvfrm->comm.vdec_name, sizeof(mvfrm->comm.vdec_name)-1,
5470 "%s", n);
5471 mvfrm->comm.vdec_type = vdec->type;
5472 }
5473 EXPORT_SYMBOL(vdec_set_vframe_comm);
5474
vdec_fill_vdec_frame(struct vdec_s * vdec,struct vframe_qos_s * vframe_qos,struct vdec_info * vinfo,struct vframe_s * vf,u32 hw_dec_time)5475 void vdec_fill_vdec_frame(struct vdec_s *vdec, struct vframe_qos_s *vframe_qos,
5476 struct vdec_info *vinfo,struct vframe_s *vf,
5477 u32 hw_dec_time)
5478 {
5479 u32 i;
5480 struct vframe_counter_s *fifo_buf;
5481 struct vdec_frames_s *mvfrm = vdec->mvfrm;
5482
5483 if (!mvfrm)
5484 return;
5485 fifo_buf = mvfrm->fifo_buf;
5486
5487 /* assume fps==60,mv->wr max value can support system running 828 days,
5488 this is enough for us */
5489 i = mvfrm->wr & (NUM_FRAME_VDEC-1); //find the slot num in fifo_buf
5490 mvfrm->fifo_buf[i].decode_time_cost = hw_dec_time;
5491 if (vframe_qos)
5492 memcpy(&fifo_buf[i].qos, vframe_qos, sizeof(struct vframe_qos_s));
5493 if (vinfo) {
5494 memcpy(&fifo_buf[i].frame_width, &vinfo->frame_width,
5495 ((char*)&vinfo->reserved[0] - (char*)&vinfo->frame_width));
5496 }
5497 if (vf) {
5498 fifo_buf[i].vf_type = vf->type;
5499 fifo_buf[i].signal_type = vf->signal_type;
5500 fifo_buf[i].pts = vf->pts;
5501 fifo_buf[i].pts_us64 = vf->pts_us64;
5502 }
5503 mvfrm->wr++;
5504 }
5505 EXPORT_SYMBOL(vdec_fill_vdec_frame);
5506
5507 /* In this function,if we use copy_to_user, we may encounter sleep,
5508 which may block the vdec_fill_vdec_frame,this is not acceptable.
5509 So, we should use a tmp buffer(passed by caller) to get the content */
vdec_get_frame_vdec(struct vdec_s * vdec,struct vframe_counter_s * tmpbuf)5510 u32 vdec_get_frame_vdec(struct vdec_s *vdec, struct vframe_counter_s *tmpbuf)
5511 {
5512 u32 toread = 0;
5513 u32 slot_rd;
5514 struct vframe_counter_s *fifo_buf = NULL;
5515 struct vdec_frames_s *mvfrm = NULL;
5516
5517 /*
5518 switch (version) {
5519 case version_1:
5520 f1();
5521 case version_2:
5522 f2();
5523 default:
5524 break;
5525 }
5526 */
5527
5528 if (!vdec)
5529 return 0;
5530 mvfrm = vdec->mvfrm;
5531 if (!mvfrm)
5532 return 0;
5533
5534 fifo_buf = &mvfrm->fifo_buf[0];
5535
5536 toread = mvfrm->wr - mvfrm->rd;
5537 if (toread) {
5538 if (toread >= NUM_FRAME_VDEC - QOS_FRAME_NUM) {
5539 /* round the fifo_buf length happens, give QOS_FRAME_NUM for buffer */
5540 mvfrm->rd = mvfrm->wr - (NUM_FRAME_VDEC - QOS_FRAME_NUM);
5541 }
5542
5543 if (toread >= QOS_FRAME_NUM) {
5544 toread = QOS_FRAME_NUM; //by default, we use this num
5545 }
5546
5547 slot_rd = mvfrm->rd &( NUM_FRAME_VDEC-1); //In this case it equals to x%y
5548 if (slot_rd + toread <= NUM_FRAME_VDEC) {
5549 memcpy(tmpbuf, &fifo_buf[slot_rd], toread*sizeof(struct vframe_counter_s));
5550 } else {
5551 u32 exeed;
5552 exeed = slot_rd + toread - NUM_FRAME_VDEC;
5553 memcpy(tmpbuf, &fifo_buf[slot_rd], (NUM_FRAME_VDEC - slot_rd)*sizeof(struct vframe_counter_s));
5554 memcpy(&tmpbuf[NUM_FRAME_VDEC-slot_rd], &fifo_buf[0], exeed*sizeof(struct vframe_counter_s));
5555 }
5556
5557 mvfrm->rd += toread;
5558 }
5559 return toread;
5560 }
5561 EXPORT_SYMBOL(vdec_get_frame_vdec);
5562
5563 RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
5564 /*
5565 uint force_hevc_clock_cntl;
5566 EXPORT_SYMBOL(force_hevc_clock_cntl);
5567
5568 module_param(force_hevc_clock_cntl, uint, 0664);
5569 */
5570 module_param(debug, uint, 0664);
5571 module_param(debug_trace_num, uint, 0664);
5572 module_param(hevc_max_reset_count, int, 0664);
5573 module_param(clk_config, uint, 0664);
5574 module_param(step_mode, int, 0664);
5575 module_param(debugflags, int, 0664);
5576 module_param(parallel_decode, int, 0664);
5577 module_param(fps_detection, int, 0664);
5578 module_param(fps_clear, int, 0664);
5579 module_param(force_nosecure_even_drm, int, 0664);
5580 module_param(disable_switch_single_to_mult, int, 0664);
5581
5582 module_param(frameinfo_flag, int, 0664);
5583 MODULE_PARM_DESC(frameinfo_flag,
5584 "\n frameinfo_flag\n");
5585 module_param(v4lvideo_add_di, int, 0664);
5586 MODULE_PARM_DESC(v4lvideo_add_di,
5587 "\n v4lvideo_add_di\n");
5588
5589 module_param(max_di_instance, int, 0664);
5590 MODULE_PARM_DESC(max_di_instance,
5591 "\n max_di_instance\n");
5592
5593 module_param(debug_vdetect, int, 0664);
5594 MODULE_PARM_DESC(debug_vdetect, "\n debug_vdetect\n");
5595
5596 module_param(enable_stream_mode_multi_dec, int, 0664);
5597 EXPORT_SYMBOL(enable_stream_mode_multi_dec);
5598 MODULE_PARM_DESC(enable_stream_mode_multi_dec,
5599 "\n enable multi-decoding on stream mode. \n");
5600 /*
5601 *module_init(vdec_module_init);
5602 *module_exit(vdec_module_exit);
5603 */
5604 #define CREATE_TRACE_POINTS
5605 #include "vdec_trace.h"
5606 MODULE_DESCRIPTION("AMLOGIC vdec driver");
5607 MODULE_LICENSE("GPL");
5608 MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
5609