• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *    Alpha Lin, alpha.lin@rock-chips.com
7  *    Randy Li, randy.li@rock-chips.com
8  *    Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24 
25 #include "mpp_debug.h"
26 #include "mpp_common.h"
27 #include "mpp_iommu.h"
28 
29 #define VDPU1_DRIVER_NAME "mpp_vdpu1"
30 
31 #define VDPU1_SESSION_MAX_BUFFERS 40
32 /* The maximum registers number of all the version */
33 #define VDPU1_REG_NUM 60
34 #define VDPU1_REG_HW_ID_INDEX 0
35 #define VDPU1_REG_START_INDEX 0
36 #define VDPU1_REG_END_INDEX 59
37 
38 #define VDPU1_REG_PP_NUM 101
39 #define VDPU1_REG_PP_START_INDEX 0
40 #define VDPU1_REG_PP_END_INDEX 100
41 
42 #define VDPU1_REG_DEC_INT_EN 0x004
43 #define VDPU1_REG_DEC_INT_EN_INDEX (1)
44 /* B slice detected, used in 8190 decoder and later */
45 #define VDPU1_INT_PIC_INF BIT(24)
46 #define VDPU1_INT_TIMEOUT BIT(18)
47 #define VDPU1_INT_SLICE BIT(17)
48 #define VDPU1_INT_STRM_ERROR BIT(16)
49 #define VDPU1_INT_ASO_ERROR BIT(15)
50 #define VDPU1_INT_BUF_EMPTY BIT(14)
51 #define VDPU1_INT_BUS_ERROR BIT(13)
52 #define VDPU1_DEC_INT BIT(12)
53 #define VDPU1_DEC_INT_RAW BIT(8)
54 #define VDPU1_DEC_IRQ_DIS BIT(4)
55 #define VDPU1_DEC_START BIT(0)
56 
57 /* NOTE: Don't enable it or decoding AVC would meet problem at rk3288 */
58 #define VDPU1_REG_DEC_EN 0x008
59 #define VDPU1_CLOCK_GATE_EN BIT(10)
60 
61 #define VDPU1_REG_SYS_CTRL 0x00c
62 #define VDPU1_REG_SYS_CTRL_INDEX (3)
63 #define VDPU1_RGE_WIDTH_INDEX (4)
64 #define VDPU1_GET_FORMAT(x) (((x) >> 28) & 0xf)
65 #define VDPU1_GET_PROD_NUM(x) (((x) >> 16) & 0xffff)
66 #define VDPU1_GET_WIDTH(x) (((x)&0xff800000) >> 19)
67 #define VDPU1_FMT_H264D 0
68 #define VDPU1_FMT_MPEG4D 1
69 #define VDPU1_FMT_H263D 2
70 #define VDPU1_FMT_JPEGD 3
71 #define VDPU1_FMT_VC1D 4
72 #define VDPU1_FMT_MPEG2D 5
73 #define VDPU1_FMT_MPEG1D 6
74 #define VDPU1_FMT_VP6D 7
75 #define VDPU1_FMT_RESERVED 8
76 #define VDPU1_FMT_VP7D 9
77 #define VDPU1_FMT_VP8D 10
78 #define VDPU1_FMT_AVSD 11
79 
80 #define VDPU1_REG_STREAM_RLC_BASE 0x030
81 #define VDPU1_REG_STREAM_RLC_BASE_INDEX (12)
82 
83 #define VDPU1_REG_DIR_MV_BASE 0x0a4
84 #define VDPU1_REG_DIR_MV_BASE_INDEX (41)
85 
86 #define VDPU1_REG_CLR_CACHE_BASE 0x810
87 
88 #define to_vdpu_task(task) container_of(task, struct vdpu_task, mpp_task)
89 #define to_vdpu_dev(dev) container_of(dev, struct vdpu_dev, mpp)
90 
91 enum VPUD1_HW_ID {
92     VDPU1_ID_0102 = 0x0102,
93     VDPU1_ID_9190 = 0x6731,
94 };
95 
96 struct vdpu_task {
97     struct mpp_task mpp_task;
98     /* enable of post process */
99     bool pp_enable;
100 
101     enum MPP_CLOCK_MODE clk_mode;
102     u32 reg[VDPU1_REG_PP_NUM];
103 
104     struct reg_offset_info off_inf;
105     u32 strm_addr;
106     u32 irq_status;
107     /* req for current task */
108     u32 w_req_cnt;
109     struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
110     u32 r_req_cnt;
111     struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
112 };
113 
114 struct vdpu_dev {
115     struct mpp_dev mpp;
116 
117     struct mpp_clk_info aclk_info;
118     struct mpp_clk_info hclk_info;
119 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
120     struct proc_dir_entry *procfs;
121 #endif
122     struct reset_control *rst_a;
123     struct reset_control *rst_h;
124 };
125 
126 static struct mpp_hw_info vdpu_v1_hw_info = {
127     .reg_num = VDPU1_REG_NUM,
128     .reg_id = VDPU1_REG_HW_ID_INDEX,
129     .reg_start = VDPU1_REG_START_INDEX,
130     .reg_end = VDPU1_REG_END_INDEX,
131     .reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
132 };
133 
134 static struct mpp_hw_info vdpu_pp_v1_hw_info = {
135     .reg_num = VDPU1_REG_PP_NUM,
136     .reg_id = VDPU1_REG_HW_ID_INDEX,
137     .reg_start = VDPU1_REG_PP_START_INDEX,
138     .reg_end = VDPU1_REG_PP_END_INDEX,
139     .reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
140 };
141 
142 /*
143  * file handle translate information
144  */
145 static const u16 trans_tbl_avsd[] = {12, 13, 14, 15, 16, 17, 40, 41, 45};
146 
147 static const u16 trans_tbl_default[] = {12, 13, 14, 15, 16, 17, 40, 41};
148 
149 static const u16 trans_tbl_jpegd[] = {12, 13, 14, 40, 66, 67};
150 
151 static const u16 trans_tbl_h264d[] = {12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 40};
152 
153 static const u16 trans_tbl_vc1d[] = {12, 13, 14, 15, 16, 17, 27, 41};
154 
155 static const u16 trans_tbl_vp6d[] = {12, 13, 14, 18, 27, 40};
156 
157 static const u16 trans_tbl_vp8d[] = {10, 12, 13, 14, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 40};
158 
159 static struct mpp_trans_info vdpu_v1_trans[] = {
160     [VDPU1_FMT_H264D] =
161         {
162             .count = ARRAY_SIZE(trans_tbl_h264d),
163             .table = trans_tbl_h264d,
164         },
165     [VDPU1_FMT_H263D] =
166         {
167             .count = ARRAY_SIZE(trans_tbl_default),
168             .table = trans_tbl_default,
169         },
170     [VDPU1_FMT_MPEG4D] =
171         {
172             .count = ARRAY_SIZE(trans_tbl_default),
173             .table = trans_tbl_default,
174         },
175     [VDPU1_FMT_JPEGD] =
176         {
177             .count = ARRAY_SIZE(trans_tbl_jpegd),
178             .table = trans_tbl_jpegd,
179         },
180     [VDPU1_FMT_VC1D] =
181         {
182             .count = ARRAY_SIZE(trans_tbl_vc1d),
183             .table = trans_tbl_vc1d,
184         },
185     [VDPU1_FMT_MPEG2D] =
186         {
187             .count = ARRAY_SIZE(trans_tbl_default),
188             .table = trans_tbl_default,
189         },
190     [VDPU1_FMT_MPEG1D] =
191         {
192             .count = ARRAY_SIZE(trans_tbl_default),
193             .table = trans_tbl_default,
194         },
195     [VDPU1_FMT_VP6D] =
196         {
197             .count = ARRAY_SIZE(trans_tbl_vp6d),
198             .table = trans_tbl_vp6d,
199         },
200     [VDPU1_FMT_RESERVED] =
201         {
202             .count = 0,
203             .table = NULL,
204         },
205     [VDPU1_FMT_VP7D] =
206         {
207             .count = ARRAY_SIZE(trans_tbl_default),
208             .table = trans_tbl_default,
209         },
210     [VDPU1_FMT_VP8D] =
211         {
212             .count = ARRAY_SIZE(trans_tbl_vp8d),
213             .table = trans_tbl_vp8d,
214         },
215     [VDPU1_FMT_AVSD] =
216         {
217             .count = ARRAY_SIZE(trans_tbl_avsd),
218             .table = trans_tbl_avsd,
219         },
220 };
221 
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)222 static int vdpu_process_reg_fd(struct mpp_session *session, struct vdpu_task *task, struct mpp_task_msgs *msgs)
223 {
224     int ret = 0;
225     int fmt = VDPU1_GET_FORMAT(task->reg[VDPU1_REG_SYS_CTRL_INDEX]);
226 
227     ret = mpp_translate_reg_address(session, &task->mpp_task, fmt, task->reg, &task->off_inf);
228     if (ret) {
229         return ret;
230     }
231     /*
232      * special offset scale case
233      *
234      * This translation is for fd + offset translation.
235      * One register has 32bits. We need to transfer both buffer file
236      * handle and the start address offset so we packet file handle
237      * and offset together using below format.
238      *
239      *  0~9  bit for buffer file handle range 0 ~ 1023
240      * 10~31 bit for offset range 0 ~ 4M
241      *
242      * But on 4K case the offset can be larger the 4M
243      */
244     if (likely(fmt == VDPU1_FMT_H264D)) {
245         int fd;
246         u32 offset;
247         dma_addr_t iova = 0;
248         u32 idx = VDPU1_REG_DIR_MV_BASE_INDEX;
249         struct mpp_mem_region *mem_region = NULL;
250 
251         if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
252             fd = task->reg[idx];
253             offset = 0;
254         } else {
255             fd = task->reg[idx] & 0x3ff;
256             offset = task->reg[idx] >> 0xA << 0x4;
257         }
258         mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
259         if (IS_ERR(mem_region)) {
260             goto fail;
261         }
262 
263         iova = mem_region->iova;
264         mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n", idx, fd, &iova, offset);
265         task->reg[idx] = iova + offset;
266     }
267 
268     mpp_translate_reg_offset_info(&task->mpp_task, &task->off_inf, task->reg);
269     return 0;
270 fail:
271     return -EFAULT;
272 }
273 
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)274 static int vdpu_extract_task_msg(struct vdpu_task *task, struct mpp_task_msgs *msgs)
275 {
276     u32 i;
277     int ret;
278     struct mpp_request *req;
279     struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
280 
281     for (i = 0; i < msgs->req_cnt; i++) {
282         u32 off_s, off_e;
283 
284         req = &msgs->reqs[i];
285         if (!req->size) {
286             continue;
287         }
288 
289         switch (req->cmd) {
290             case MPP_CMD_SET_REG_WRITE: {
291                 off_s = hw_info->reg_start * sizeof(u32);
292                 off_e = hw_info->reg_end * sizeof(u32);
293                 ret = mpp_check_req(req, 0, sizeof(task->reg), off_s, off_e);
294                 if (ret) {
295                     continue;
296                 }
297                 if (copy_from_user((u8 *)task->reg + req->offset, req->data, req->size)) {
298                     mpp_err("copy_from_user reg failed\n");
299                     return -EIO;
300                 }
301                 memcpy(&task->w_reqs[task->w_req_cnt++], req, sizeof(*req));
302                 break;
303             }
304             case MPP_CMD_SET_REG_READ: {
305                 off_s = hw_info->reg_start * sizeof(u32);
306                 off_e = hw_info->reg_end * sizeof(u32);
307                 ret = mpp_check_req(req, 0, sizeof(task->reg), off_s, off_e);
308                 if (ret) {
309                     continue;
310                 }
311                 memcpy(&task->r_reqs[task->r_req_cnt++], req, sizeof(*req));
312                 break;
313             }
314             case MPP_CMD_SET_REG_ADDR_OFFSET: {
315                 mpp_extract_reg_offset_info(&task->off_inf, req);
316                 break;
317             }
318             default:
319                 break;
320         }
321     }
322     mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n", task->w_req_cnt, task->r_req_cnt);
323     return 0;
324 }
325 
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)326 static void *vdpu_alloc_task(struct mpp_session *session, struct mpp_task_msgs *msgs)
327 {
328     int ret;
329     struct mpp_task *mpp_task = NULL;
330     struct vdpu_task *task = NULL;
331     struct mpp_dev *mpp = session->mpp;
332 
333     mpp_debug_enter();
334 
335     task = kzalloc(sizeof(*task), GFP_KERNEL);
336     if (!task) {
337         return NULL;
338     }
339 
340     mpp_task = &task->mpp_task;
341     mpp_task_init(session, mpp_task);
342     if (session->device_type == MPP_DEVICE_VDPU1_PP) {
343         task->pp_enable = true;
344         mpp_task->hw_info = &vdpu_pp_v1_hw_info;
345     } else {
346         mpp_task->hw_info = mpp->var->hw_info;
347     }
348     mpp_task->reg = task->reg;
349     /* extract reqs for current task */
350     ret = vdpu_extract_task_msg(task, msgs);
351     if (ret) {
352         goto fail;
353     }
354     /* process fd in register */
355     if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
356         ret = vdpu_process_reg_fd(session, task, msgs);
357         if (ret) {
358             goto fail;
359         }
360     }
361     task->strm_addr = task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX];
362     task->clk_mode = CLK_MODE_NORMAL;
363 
364     mpp_debug_leave();
365 
366     return mpp_task;
367 
368 fail:
369     mpp_task_dump_mem_region(mpp, mpp_task);
370     mpp_task_dump_reg(mpp, mpp_task);
371     mpp_task_finalize(session, mpp_task);
372     kfree(task);
373     return NULL;
374 }
375 
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)376 static int vdpu_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
377 {
378     u32 i;
379     u32 reg_en;
380     struct vdpu_task *task = to_vdpu_task(mpp_task);
381 
382     mpp_debug_enter();
383 
384     /* clear cache */
385     mpp_write_relaxed(mpp, VDPU1_REG_CLR_CACHE_BASE, 1);
386     /* set registers for hardware */
387     reg_en = mpp_task->hw_info->reg_en;
388     for (i = 0; i < task->w_req_cnt; i++) {
389         struct mpp_request *req = &task->w_reqs[i];
390         int s = req->offset / sizeof(u32);
391         int e = s + req->size / sizeof(u32);
392 
393         mpp_write_req(mpp, task->reg, s, e, reg_en);
394     }
395     /* init current task */
396     mpp->cur_task = mpp_task;
397     /* Flush the register before the start the device */
398     wmb();
399     mpp_write(mpp, VDPU1_REG_DEC_INT_EN, task->reg[reg_en] | VDPU1_DEC_START);
400 
401     mpp_debug_leave();
402 
403     return 0;
404 }
405 
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)406 static int vdpu_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
407 {
408     u32 i;
409     u32 s, e;
410     u32 dec_get;
411     s32 dec_length;
412     struct mpp_request *req;
413     struct vdpu_task *task = to_vdpu_task(mpp_task);
414 
415     mpp_debug_enter();
416 
417     /* read register after running */
418     for (i = 0; i < task->r_req_cnt; i++) {
419         req = &task->r_reqs[i];
420         s = req->offset / sizeof(u32);
421         e = s + req->size / sizeof(u32);
422         mpp_read_req(mpp, task->reg, s, e);
423     }
424     /* revert hack for irq status */
425     task->reg[VDPU1_REG_DEC_INT_EN_INDEX] = task->irq_status;
426     /* revert hack for decoded length */
427     dec_get = mpp_read_relaxed(mpp, VDPU1_REG_STREAM_RLC_BASE);
428     dec_length = dec_get - task->strm_addr;
429     task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX] = dec_length << 0xA;
430     mpp_debug(DEBUG_REGISTER, "dec_get %08x dec_length %d\n", dec_get, dec_length);
431 
432     mpp_debug_leave();
433 
434     return 0;
435 }
436 
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)437 static int vdpu_result(struct mpp_dev *mpp, struct mpp_task *mpp_task, struct mpp_task_msgs *msgs)
438 {
439     u32 i;
440     struct mpp_request *req;
441     struct vdpu_task *task = to_vdpu_task(mpp_task);
442 
443     /* may overflow the kernel */
444     for (i = 0; i < task->r_req_cnt; i++) {
445         req = &task->r_reqs[i];
446 
447         if (copy_to_user(req->data, (u8 *)task->reg + req->offset, req->size)) {
448             mpp_err("copy_to_user reg fail\n");
449             return -EIO;
450         }
451     }
452 
453     return 0;
454 }
455 
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)456 static int vdpu_free_task(struct mpp_session *session, struct mpp_task *mpp_task)
457 {
458     struct vdpu_task *task = to_vdpu_task(mpp_task);
459 
460     mpp_task_finalize(session, mpp_task);
461     kfree(task);
462 
463     return 0;
464 }
465 
466 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)467 static int vdpu_procfs_remove(struct mpp_dev *mpp)
468 {
469     struct vdpu_dev *dec = to_vdpu_dev(mpp);
470 
471     if (dec->procfs) {
472         proc_remove(dec->procfs);
473         dec->procfs = NULL;
474     }
475 
476     return 0;
477 }
478 
vdpu_procfs_init(struct mpp_dev * mpp)479 static int vdpu_procfs_init(struct mpp_dev *mpp)
480 {
481     struct vdpu_dev *dec = to_vdpu_dev(mpp);
482 
483     dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
484     if (IS_ERR_OR_NULL(dec->procfs)) {
485         mpp_err("failed on open procfs\n");
486         dec->procfs = NULL;
487         return -EIO;
488     }
489     mpp_procfs_create_u32("aclk", FILE_RIGHT_644, dec->procfs, &dec->aclk_info.debug_rate_hz);
490     mpp_procfs_create_u32("session_buffers", FILE_RIGHT_644, dec->procfs, &mpp->session_max_buffers);
491 
492     return 0;
493 }
494 #else
vdpu_procfs_remove(struct mpp_dev * mpp)495 static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
496 {
497     return 0;
498 }
499 
vdpu_procfs_init(struct mpp_dev * mpp)500 static inline int vdpu_procfs_init(struct mpp_dev *mpp)
501 {
502     return 0;
503 }
504 #endif
505 
vdpu_init(struct mpp_dev * mpp)506 static int vdpu_init(struct mpp_dev *mpp)
507 {
508     int ret;
509     struct vdpu_dev *dec = to_vdpu_dev(mpp);
510 
511     mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU1];
512 
513     /* Get clock info from dtsi */
514     ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
515     if (ret) {
516         mpp_err("failed on clk_get aclk_vcodec\n");
517     }
518     ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
519     if (ret) {
520         mpp_err("failed on clk_get hclk_vcodec\n");
521     }
522     /* Set default rates */
523     mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 0x12C * MHZ);
524 
525     /* Get reset control from dtsi */
526     dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
527     if (!dec->rst_a) {
528         mpp_err("No aclk reset resource define\n");
529     }
530     dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
531     if (!dec->rst_h) {
532         mpp_err("No hclk reset resource define\n");
533     }
534 
535     return 0;
536 }
537 
vdpu_clk_on(struct mpp_dev * mpp)538 static int vdpu_clk_on(struct mpp_dev *mpp)
539 {
540     struct vdpu_dev *dec = to_vdpu_dev(mpp);
541 
542     mpp_clk_safe_enable(dec->aclk_info.clk);
543     mpp_clk_safe_enable(dec->hclk_info.clk);
544 
545     return 0;
546 }
547 
vdpu_clk_off(struct mpp_dev * mpp)548 static int vdpu_clk_off(struct mpp_dev *mpp)
549 {
550     struct vdpu_dev *dec = to_vdpu_dev(mpp);
551 
552     mpp_clk_safe_disable(dec->aclk_info.clk);
553     mpp_clk_safe_disable(dec->hclk_info.clk);
554 
555     return 0;
556 }
557 
vdpu_3288_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)558 static int vdpu_3288_get_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
559 {
560     u32 width;
561     struct vdpu_task *task = to_vdpu_task(mpp_task);
562 
563     width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
564     if (width > 0xA00) {
565         task->clk_mode = CLK_MODE_ADVANCED;
566     }
567 
568     return 0;
569 }
570 
vdpu_3368_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)571 static int vdpu_3368_get_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
572 {
573     u32 width;
574     struct vdpu_task *task = to_vdpu_task(mpp_task);
575 
576     width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
577     if (width > 0xA00) {
578         task->clk_mode = CLK_MODE_ADVANCED;
579     }
580 
581     return 0;
582 }
583 
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)584 static int vdpu_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
585 {
586     struct vdpu_dev *dec = to_vdpu_dev(mpp);
587     struct vdpu_task *task = to_vdpu_task(mpp_task);
588 
589     mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
590 
591     return 0;
592 }
593 
vdpu_reduce_freq(struct mpp_dev * mpp)594 static int vdpu_reduce_freq(struct mpp_dev *mpp)
595 {
596     struct vdpu_dev *dec = to_vdpu_dev(mpp);
597 
598     mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
599 
600     return 0;
601 }
602 
vdpu_irq(struct mpp_dev * mpp)603 static int vdpu_irq(struct mpp_dev *mpp)
604 {
605     mpp->irq_status = mpp_read(mpp, VDPU1_REG_DEC_INT_EN);
606     if (!(mpp->irq_status & VDPU1_DEC_INT_RAW)) {
607         return IRQ_NONE;
608     }
609 
610     mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
611     /* set clock gating to save power */
612     mpp_write(mpp, VDPU1_REG_DEC_EN, VDPU1_CLOCK_GATE_EN);
613 
614     return IRQ_WAKE_THREAD;
615 }
616 
vdpu_isr(struct mpp_dev * mpp)617 static int vdpu_isr(struct mpp_dev *mpp)
618 {
619     u32 err_mask;
620     struct vdpu_task *task = NULL;
621     struct mpp_task *mpp_task = mpp->cur_task;
622     /* use a spin lock here */
623     if (!mpp_task) {
624         dev_err(mpp->dev, "no current task\n");
625         return IRQ_HANDLED;
626     }
627     mpp_time_diff(mpp_task);
628     mpp->cur_task = NULL;
629     task = to_vdpu_task(mpp_task);
630     task->irq_status = mpp->irq_status;
631     mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
632     err_mask =
633         VDPU1_INT_TIMEOUT | VDPU1_INT_STRM_ERROR | VDPU1_INT_ASO_ERROR | VDPU1_INT_BUF_EMPTY | VDPU1_INT_BUS_ERROR;
634     if (err_mask & task->irq_status) {
635         atomic_inc(&mpp->reset_request);
636     }
637     mpp_task_finish(mpp_task->session, mpp_task);
638     mpp_debug_leave();
639     return IRQ_HANDLED;
640 }
641 
vdpu_reset(struct mpp_dev * mpp)642 static int vdpu_reset(struct mpp_dev *mpp)
643 {
644     struct vdpu_dev *dec = to_vdpu_dev(mpp);
645 
646     if (dec->rst_a && dec->rst_h) {
647         mpp_debug(DEBUG_RESET, "reset in\n");
648 
649         /* Don't skip this or iommu won't work after reset */
650         rockchip_pmu_idle_request(mpp->dev, true);
651         mpp_safe_reset(dec->rst_a);
652         mpp_safe_reset(dec->rst_h);
653         udelay(0x5);
654         mpp_safe_unreset(dec->rst_a);
655         mpp_safe_unreset(dec->rst_h);
656         rockchip_pmu_idle_request(mpp->dev, false);
657 
658         mpp_debug(DEBUG_RESET, "reset out\n");
659     }
660     mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
661 
662     return 0;
663 }
664 
665 static struct mpp_hw_ops vdpu_v1_hw_ops = {
666     .init = vdpu_init,
667     .clk_on = vdpu_clk_on,
668     .clk_off = vdpu_clk_off,
669     .set_freq = vdpu_set_freq,
670     .reduce_freq = vdpu_reduce_freq,
671     .reset = vdpu_reset,
672 };
673 
674 static struct mpp_hw_ops vdpu_3288_hw_ops = {
675     .init = vdpu_init,
676     .clk_on = vdpu_clk_on,
677     .clk_off = vdpu_clk_off,
678     .get_freq = vdpu_3288_get_freq,
679     .set_freq = vdpu_set_freq,
680     .reduce_freq = vdpu_reduce_freq,
681     .reset = vdpu_reset,
682 };
683 
684 static struct mpp_hw_ops vdpu_3368_hw_ops = {
685     .init = vdpu_init,
686     .clk_on = vdpu_clk_on,
687     .clk_off = vdpu_clk_off,
688     .get_freq = vdpu_3368_get_freq,
689     .set_freq = vdpu_set_freq,
690     .reduce_freq = vdpu_reduce_freq,
691     .reset = vdpu_reset,
692 };
693 
694 static struct mpp_dev_ops vdpu_v1_dev_ops = {
695     .alloc_task = vdpu_alloc_task,
696     .run = vdpu_run,
697     .irq = vdpu_irq,
698     .isr = vdpu_isr,
699     .finish = vdpu_finish,
700     .result = vdpu_result,
701     .free_task = vdpu_free_task,
702 };
703 
704 static const struct mpp_dev_var vdpu_v1_data = {
705     .device_type = MPP_DEVICE_VDPU1,
706     .hw_info = &vdpu_v1_hw_info,
707     .trans_info = vdpu_v1_trans,
708     .hw_ops = &vdpu_v1_hw_ops,
709     .dev_ops = &vdpu_v1_dev_ops,
710 };
711 
712 static const struct mpp_dev_var vdpu_3288_data = {
713     .device_type = MPP_DEVICE_VDPU1,
714     .hw_info = &vdpu_v1_hw_info,
715     .trans_info = vdpu_v1_trans,
716     .hw_ops = &vdpu_3288_hw_ops,
717     .dev_ops = &vdpu_v1_dev_ops,
718 };
719 
720 static const struct mpp_dev_var vdpu_3368_data = {
721     .device_type = MPP_DEVICE_VDPU1,
722     .hw_info = &vdpu_v1_hw_info,
723     .trans_info = vdpu_v1_trans,
724     .hw_ops = &vdpu_3368_hw_ops,
725     .dev_ops = &vdpu_v1_dev_ops,
726 };
727 
728 static const struct mpp_dev_var avsd_plus_data = {
729     .device_type = MPP_DEVICE_AVSPLUS_DEC,
730     .hw_info = &vdpu_v1_hw_info,
731     .trans_info = vdpu_v1_trans,
732     .hw_ops = &vdpu_v1_hw_ops,
733     .dev_ops = &vdpu_v1_dev_ops,
734 };
735 
736 static const struct of_device_id mpp_vdpu1_dt_match[] = {
737     {
738         .compatible = "rockchip,vpu-decoder-v1",
739         .data = &vdpu_v1_data,
740     },
741 #ifdef CONFIG_CPU_RK3288
742     {
743         .compatible = "rockchip,vpu-decoder-rk3288",
744         .data = &vdpu_3288_data,
745     },
746 #endif
747 #ifdef CONFIG_CPU_RK3368
748     {
749         .compatible = "rockchip,vpu-decoder-rk3368",
750         .data = &vdpu_3368_data,
751     },
752 #endif
753 #ifdef CONFIG_CPU_RK3328
754     {
755         .compatible = "rockchip,avs-plus-decoder",
756         .data = &avsd_plus_data,
757     },
758 #endif
759     {},
760 };
761 
vdpu_probe(struct platform_device * pdev)762 static int vdpu_probe(struct platform_device *pdev)
763 {
764     struct device *dev = &pdev->dev;
765     struct vdpu_dev *dec = NULL;
766     struct mpp_dev *mpp = NULL;
767     const struct of_device_id *match = NULL;
768     int ret = 0;
769 
770     dev_info(dev, "probe device\n");
771     dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
772     if (!dec) {
773         return -ENOMEM;
774     }
775     platform_set_drvdata(pdev, dec);
776 
777     mpp = &dec->mpp;
778     if (pdev->dev.of_node) {
779         match = of_match_node(mpp_vdpu1_dt_match, pdev->dev.of_node);
780         if (match) {
781             mpp->var = (struct mpp_dev_var *)match->data;
782         }
783     }
784 
785     ret = mpp_dev_probe(mpp, pdev);
786     if (ret) {
787         dev_err(dev, "probe sub driver failed\n");
788         return -EINVAL;
789     }
790 
791     ret = devm_request_threaded_irq(dev, mpp->irq, mpp_dev_irq, mpp_dev_isr_sched, IRQF_SHARED, dev_name(dev), mpp);
792     if (ret) {
793         dev_err(dev, "register interrupter runtime failed\n");
794         return -EINVAL;
795     }
796 
797     if (mpp->var->device_type == MPP_DEVICE_VDPU1) {
798         mpp->srv->sub_devices[MPP_DEVICE_VDPU1_PP] = mpp;
799         set_bit(MPP_DEVICE_VDPU1_PP, &mpp->srv->hw_support);
800     }
801 
802     mpp->session_max_buffers = VDPU1_SESSION_MAX_BUFFERS;
803     vdpu_procfs_init(mpp);
804     /* register current device to mpp service */
805     mpp_dev_register_srv(mpp, mpp->srv);
806     dev_info(dev, "probing finish\n");
807 
808     return 0;
809 }
810 
vdpu_remove(struct platform_device * pdev)811 static int vdpu_remove(struct platform_device *pdev)
812 {
813     struct device *dev = &pdev->dev;
814     struct vdpu_dev *dec = platform_get_drvdata(pdev);
815 
816     dev_info(dev, "remove device\n");
817     mpp_dev_remove(&dec->mpp);
818     vdpu_procfs_remove(&dec->mpp);
819 
820     return 0;
821 }
822 
vdpu_shutdown(struct platform_device * pdev)823 static void vdpu_shutdown(struct platform_device *pdev)
824 {
825     int ret;
826     int val;
827     struct device *dev = &pdev->dev;
828     struct vdpu_dev *dec = platform_get_drvdata(pdev);
829     struct mpp_dev *mpp = &dec->mpp;
830 
831     dev_info(dev, "shutdown device\n");
832 
833     atomic_inc(&mpp->srv->shutdown_request);
834     ret = readx_poll_timeout(atomic_read, &mpp->task_count, val, val == 0, 0x4E20, 0x30D40);
835     if (ret == -ETIMEDOUT) {
836         dev_err(dev, "wait total running time out\n");
837     }
838 }
839 
840 struct platform_driver rockchip_vdpu1_driver = {
841     .probe = vdpu_probe,
842     .remove = vdpu_remove,
843     .shutdown = vdpu_shutdown,
844     .driver =
845         {
846             .name = VDPU1_DRIVER_NAME,
847             .of_match_table = of_match_ptr(mpp_vdpu1_dt_match),
848         },
849 };
850 EXPORT_SYMBOL(rockchip_vdpu1_driver);
851