1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24
25 #include "mpp_debug.h"
26 #include "mpp_common.h"
27 #include "mpp_iommu.h"
28
29 #define VDPU2_DRIVER_NAME "mpp_vdpu2"
30
31 #define VDPU2_SESSION_MAX_BUFFERS 40
32 /* The maximum registers number of all the version */
33 #define VDPU2_REG_NUM 159
34 #define VDPU2_REG_HW_ID_INDEX -1 /* INVALID */
35 #define VDPU2_REG_START_INDEX 50
36 #define VDPU2_REG_END_INDEX 158
37
38 #define VDPU2_REG_SYS_CTRL 0x0d4
39 #define VDPU2_REG_SYS_CTRL_INDEX (53)
40 #define VDPU2_GET_FORMAT(x) ((x) & 0xf)
41 #define VDPU2_FMT_H264D 0
42 #define VDPU2_FMT_MPEG4D 1
43 #define VDPU2_FMT_H263D 2
44 #define VDPU2_FMT_JPEGD 3
45 #define VDPU2_FMT_VC1D 4
46 #define VDPU2_FMT_MPEG2D 5
47 #define VDPU2_FMT_MPEG1D 6
48 #define VDPU2_FMT_VP6D 7
49 #define VDPU2_FMT_RESERVED 8
50 #define VDPU2_FMT_VP7D 9
51 #define VDPU2_FMT_VP8D 10
52 #define VDPU2_FMT_AVSD 11
53
54 #define VDPU2_REG_DEC_INT 0x0dc
55 #define VDPU2_REG_DEC_INT_INDEX (55)
56 #define VDPU2_INT_TIMEOUT BIT(13)
57 #define VDPU2_INT_STRM_ERROR BIT(12)
58 #define VDPU2_INT_SLICE BIT(9)
59 #define VDPU2_INT_ASO_ERROR BIT(8)
60 #define VDPU2_INT_BUF_EMPTY BIT(6)
61 #define VDPU2_INT_BUS_ERROR BIT(5)
62 #define VDPU2_DEC_INT BIT(4)
63 #define VDPU2_DEC_IRQ_DIS BIT(1)
64 #define VDPU2_DEC_INT_RAW BIT(0)
65
66 #define VDPU2_REG_DEC_EN 0x0e4
67 #define VDPU2_REG_DEC_EN_INDEX (57)
68 #define VDPU2_DEC_CLOCK_GATE_EN BIT(4)
69 #define VDPU2_DEC_START BIT(0)
70
71 #define VDPU2_REG_DIR_MV_BASE 0x0f8
72 #define VDPU2_REG_DIR_MV_BASE_INDEX (62)
73
74 #define VDPU2_REG_STREAM_RLC_BASE 0x100
75 #define VDPU2_REG_STREAM_RLC_BASE_INDEX (64)
76
77 #define VDPU2_REG_CLR_CACHE_BASE 0x810
78
79 #define to_vdpu_task(task) \
80 container_of(task, struct vdpu_task, mpp_task)
81 #define to_vdpu_dev(dev) \
82 container_of(dev, struct vdpu_dev, mpp)
83
84 struct vdpu_task {
85 struct mpp_task mpp_task;
86
87 enum MPP_CLOCK_MODE clk_mode;
88 u32 reg[VDPU2_REG_NUM];
89
90 struct reg_offset_info off_inf;
91 u32 strm_addr;
92 u32 irq_status;
93 /* req for current task */
94 u32 w_req_cnt;
95 struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
96 u32 r_req_cnt;
97 struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
98 };
99
100 struct vdpu_dev {
101 struct mpp_dev mpp;
102
103 struct mpp_clk_info aclk_info;
104 struct mpp_clk_info hclk_info;
105 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
106 struct proc_dir_entry *procfs;
107 #endif
108 struct reset_control *rst_a;
109 struct reset_control *rst_h;
110 };
111
112 static struct mpp_hw_info vdpu_v2_hw_info = {
113 .reg_num = VDPU2_REG_NUM,
114 .reg_id = VDPU2_REG_HW_ID_INDEX,
115 .reg_start = VDPU2_REG_START_INDEX,
116 .reg_end = VDPU2_REG_END_INDEX,
117 .reg_en = VDPU2_REG_DEC_EN_INDEX,
118 };
119
120 /*
121 * file handle translate information
122 */
123 static const u16 trans_tbl_default[] = {
124 61, 62, 63, 64, 131, 134, 135, 148
125 };
126
127 static const u16 trans_tbl_jpegd[] = {
128 21, 22, 61, 63, 64, 131
129 };
130
131 static const u16 trans_tbl_h264d[] = {
132 61, 63, 64, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
133 98, 99
134 };
135
136 static const u16 trans_tbl_vc1d[] = {
137 62, 63, 64, 131, 134, 135, 145, 148
138 };
139
140 static const u16 trans_tbl_vp6d[] = {
141 61, 63, 64, 131, 136, 145
142 };
143
144 static const u16 trans_tbl_vp8d[] = {
145 61, 63, 64, 131, 136, 137, 140, 141, 142, 143, 144, 145, 146, 147, 149
146 };
147
148 static struct mpp_trans_info vdpu_v2_trans[] = {
149 [VDPU2_FMT_H264D] = {
150 .count = ARRAY_SIZE(trans_tbl_h264d),
151 .table = trans_tbl_h264d,
152 },
153 [VDPU2_FMT_H263D] = {
154 .count = ARRAY_SIZE(trans_tbl_default),
155 .table = trans_tbl_default,
156 },
157 [VDPU2_FMT_MPEG4D] = {
158 .count = ARRAY_SIZE(trans_tbl_default),
159 .table = trans_tbl_default,
160 },
161 [VDPU2_FMT_JPEGD] = {
162 .count = ARRAY_SIZE(trans_tbl_jpegd),
163 .table = trans_tbl_jpegd,
164 },
165 [VDPU2_FMT_VC1D] = {
166 .count = ARRAY_SIZE(trans_tbl_vc1d),
167 .table = trans_tbl_vc1d,
168 },
169 [VDPU2_FMT_MPEG2D] = {
170 .count = ARRAY_SIZE(trans_tbl_default),
171 .table = trans_tbl_default,
172 },
173 [VDPU2_FMT_MPEG1D] = {
174 .count = ARRAY_SIZE(trans_tbl_default),
175 .table = trans_tbl_default,
176 },
177 [VDPU2_FMT_VP6D] = {
178 .count = ARRAY_SIZE(trans_tbl_vp6d),
179 .table = trans_tbl_vp6d,
180 },
181 [VDPU2_FMT_RESERVED] = {
182 .count = 0,
183 .table = NULL,
184 },
185 [VDPU2_FMT_VP7D] = {
186 .count = ARRAY_SIZE(trans_tbl_default),
187 .table = trans_tbl_default,
188 },
189 [VDPU2_FMT_VP8D] = {
190 .count = ARRAY_SIZE(trans_tbl_vp8d),
191 .table = trans_tbl_vp8d,
192 },
193 [VDPU2_FMT_AVSD] = {
194 .count = ARRAY_SIZE(trans_tbl_default),
195 .table = trans_tbl_default,
196 },
197 };
198
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)199 static int vdpu_process_reg_fd(struct mpp_session *session,
200 struct vdpu_task *task,
201 struct mpp_task_msgs *msgs)
202 {
203 int ret = 0;
204 int fmt = VDPU2_GET_FORMAT(task->reg[VDPU2_REG_SYS_CTRL_INDEX]);
205
206 ret = mpp_translate_reg_address(session, &task->mpp_task,
207 fmt, task->reg, &task->off_inf);
208 if (ret)
209 return ret;
210
211 if (likely(fmt == VDPU2_FMT_H264D)) {
212 int fd;
213 u32 offset;
214 dma_addr_t iova = 0;
215 struct mpp_mem_region *mem_region = NULL;
216 int idx = VDPU2_REG_DIR_MV_BASE_INDEX;
217
218 if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
219 fd = task->reg[idx];
220 offset = 0;
221 } else {
222 fd = task->reg[idx] & 0x3ff;
223 offset = task->reg[idx] >> 10 << 4;
224 }
225 mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
226 if (IS_ERR(mem_region))
227 return -EFAULT;
228
229 iova = mem_region->iova;
230 mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n",
231 idx, fd, &iova, offset);
232 task->reg[idx] = iova + offset;
233 }
234 mpp_translate_reg_offset_info(&task->mpp_task,
235 &task->off_inf, task->reg);
236 return 0;
237 }
238
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)239 static int vdpu_extract_task_msg(struct vdpu_task *task,
240 struct mpp_task_msgs *msgs)
241 {
242 u32 i;
243 int ret;
244 struct mpp_request *req;
245 struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
246
247 for (i = 0; i < msgs->req_cnt; i++) {
248 u32 off_s, off_e;
249
250 req = &msgs->reqs[i];
251 if (!req->size)
252 continue;
253
254 switch (req->cmd) {
255 case MPP_CMD_SET_REG_WRITE: {
256 off_s = hw_info->reg_start * sizeof(u32);
257 off_e = hw_info->reg_end * sizeof(u32);
258 ret = mpp_check_req(req, 0, sizeof(task->reg),
259 off_s, off_e);
260 if (ret)
261 continue;
262 if (copy_from_user((u8 *)task->reg + req->offset,
263 req->data, req->size)) {
264 mpp_err("copy_from_user reg failed\n");
265 return -EIO;
266 }
267 memcpy(&task->w_reqs[task->w_req_cnt++],
268 req, sizeof(*req));
269 } break;
270 case MPP_CMD_SET_REG_READ: {
271 off_s = hw_info->reg_start * sizeof(u32);
272 off_e = hw_info->reg_end * sizeof(u32);
273 ret = mpp_check_req(req, 0, sizeof(task->reg),
274 off_s, off_e);
275 if (ret)
276 continue;
277 memcpy(&task->r_reqs[task->r_req_cnt++],
278 req, sizeof(*req));
279 } break;
280 case MPP_CMD_SET_REG_ADDR_OFFSET: {
281 mpp_extract_reg_offset_info(&task->off_inf, req);
282 } break;
283 default:
284 break;
285 }
286 }
287 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
288 task->w_req_cnt, task->r_req_cnt);
289
290 return 0;
291 }
292
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)293 static void *vdpu_alloc_task(struct mpp_session *session,
294 struct mpp_task_msgs *msgs)
295 {
296 int ret;
297 struct mpp_task *mpp_task = NULL;
298 struct vdpu_task *task = NULL;
299 struct mpp_dev *mpp = session->mpp;
300
301 mpp_debug_enter();
302
303 task = kzalloc(sizeof(*task), GFP_KERNEL);
304 if (!task)
305 return NULL;
306
307 mpp_task = &task->mpp_task;
308 mpp_task_init(session, mpp_task);
309 mpp_task->hw_info = mpp->var->hw_info;
310 mpp_task->reg = task->reg;
311 /* extract reqs for current task */
312 ret = vdpu_extract_task_msg(task, msgs);
313 if (ret)
314 goto fail;
315 /* process fd in register */
316 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
317 ret = vdpu_process_reg_fd(session, task, msgs);
318 if (ret)
319 goto fail;
320 }
321 task->strm_addr = task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX];
322 task->clk_mode = CLK_MODE_NORMAL;
323
324 mpp_debug_leave();
325
326 return mpp_task;
327
328 fail:
329 mpp_task_dump_mem_region(mpp, mpp_task);
330 mpp_task_dump_reg(mpp, mpp_task);
331 mpp_task_finalize(session, mpp_task);
332 kfree(task);
333 return NULL;
334 }
335
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)336 static int vdpu_run(struct mpp_dev *mpp,
337 struct mpp_task *mpp_task)
338 {
339 u32 i;
340 u32 reg_en;
341 struct vdpu_task *task = to_vdpu_task(mpp_task);
342
343 mpp_debug_enter();
344
345 /* clear cache */
346 mpp_write_relaxed(mpp, VDPU2_REG_CLR_CACHE_BASE, 1);
347 /* set registers for hardware */
348 reg_en = mpp_task->hw_info->reg_en;
349 for (i = 0; i < task->w_req_cnt; i++) {
350 struct mpp_request *req = &task->w_reqs[i];
351 int s = req->offset / sizeof(u32);
352 int e = s + req->size / sizeof(u32);
353
354 mpp_write_req(mpp, task->reg, s, e, reg_en);
355 }
356 /* init current task */
357 mpp->cur_task = mpp_task;
358 /* Flush the registers */
359 wmb();
360 mpp_write(mpp, VDPU2_REG_DEC_EN,
361 task->reg[reg_en] | VDPU2_DEC_START);
362
363 mpp_debug_leave();
364
365 return 0;
366 }
367
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)368 static int vdpu_finish(struct mpp_dev *mpp,
369 struct mpp_task *mpp_task)
370 {
371 u32 i;
372 u32 s, e;
373 u32 dec_get;
374 s32 dec_length;
375 struct mpp_request *req;
376 struct vdpu_task *task = to_vdpu_task(mpp_task);
377
378 mpp_debug_enter();
379
380 /* read register after running */
381 for (i = 0; i < task->r_req_cnt; i++) {
382 req = &task->r_reqs[i];
383 s = req->offset / sizeof(u32);
384 e = s + req->size / sizeof(u32);
385 mpp_read_req(mpp, task->reg, s, e);
386 }
387 /* revert hack for irq status */
388 task->reg[VDPU2_REG_DEC_INT_INDEX] = task->irq_status;
389 /* revert hack for decoded length */
390 dec_get = mpp_read_relaxed(mpp, VDPU2_REG_STREAM_RLC_BASE);
391 dec_length = dec_get - task->strm_addr;
392 task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX] = dec_length << 10;
393 mpp_debug(DEBUG_REGISTER,
394 "dec_get %08x dec_length %d\n", dec_get, dec_length);
395
396 mpp_debug_leave();
397
398 return 0;
399 }
400
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)401 static int vdpu_result(struct mpp_dev *mpp,
402 struct mpp_task *mpp_task,
403 struct mpp_task_msgs *msgs)
404 {
405 u32 i;
406 struct mpp_request *req;
407 struct vdpu_task *task = to_vdpu_task(mpp_task);
408
409 /* FIXME may overflow the kernel */
410 for (i = 0; i < task->r_req_cnt; i++) {
411 req = &task->r_reqs[i];
412
413 if (copy_to_user(req->data,
414 (u8 *)task->reg + req->offset,
415 req->size)) {
416 mpp_err("copy_to_user reg fail\n");
417 return -EIO;
418 }
419 }
420
421 return 0;
422 }
423
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)424 static int vdpu_free_task(struct mpp_session *session,
425 struct mpp_task *mpp_task)
426 {
427 struct vdpu_task *task = to_vdpu_task(mpp_task);
428
429 mpp_task_finalize(session, mpp_task);
430 kfree(task);
431
432 return 0;
433 }
434
435 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)436 static int vdpu_procfs_remove(struct mpp_dev *mpp)
437 {
438 struct vdpu_dev *dec = to_vdpu_dev(mpp);
439
440 if (dec->procfs) {
441 proc_remove(dec->procfs);
442 dec->procfs = NULL;
443 }
444
445 return 0;
446 }
447
vdpu_procfs_init(struct mpp_dev * mpp)448 static int vdpu_procfs_init(struct mpp_dev *mpp)
449 {
450 struct vdpu_dev *dec = to_vdpu_dev(mpp);
451
452 dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
453 if (IS_ERR_OR_NULL(dec->procfs)) {
454 mpp_err("failed on open procfs\n");
455 dec->procfs = NULL;
456 return -EIO;
457 }
458 mpp_procfs_create_u32("aclk", 0644,
459 dec->procfs, &dec->aclk_info.debug_rate_hz);
460 mpp_procfs_create_u32("session_buffers", 0644,
461 dec->procfs, &mpp->session_max_buffers);
462
463 return 0;
464 }
465 #else
vdpu_procfs_remove(struct mpp_dev * mpp)466 static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
467 {
468 return 0;
469 }
470
vdpu_procfs_init(struct mpp_dev * mpp)471 static inline int vdpu_procfs_init(struct mpp_dev *mpp)
472 {
473 return 0;
474 }
475 #endif
476
vdpu_init(struct mpp_dev * mpp)477 static int vdpu_init(struct mpp_dev *mpp)
478 {
479 int ret;
480 struct vdpu_dev *dec = to_vdpu_dev(mpp);
481
482 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU2];
483
484 /* Get clock info from dtsi */
485 ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
486 if (ret)
487 mpp_err("failed on clk_get aclk_vcodec\n");
488 ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
489 if (ret)
490 mpp_err("failed on clk_get hclk_vcodec\n");
491 /* Set default rates */
492 mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
493
494 /* Get reset control from dtsi */
495 dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
496 if (!dec->rst_a)
497 mpp_err("No aclk reset resource define\n");
498 dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
499 if (!dec->rst_h)
500 mpp_err("No hclk reset resource define\n");
501
502 return 0;
503 }
504
vdpu_clk_on(struct mpp_dev * mpp)505 static int vdpu_clk_on(struct mpp_dev *mpp)
506 {
507 struct vdpu_dev *dec = to_vdpu_dev(mpp);
508
509 mpp_clk_safe_enable(dec->aclk_info.clk);
510 mpp_clk_safe_enable(dec->hclk_info.clk);
511
512 return 0;
513 }
514
vdpu_clk_off(struct mpp_dev * mpp)515 static int vdpu_clk_off(struct mpp_dev *mpp)
516 {
517 struct vdpu_dev *dec = to_vdpu_dev(mpp);
518
519 mpp_clk_safe_disable(dec->aclk_info.clk);
520 mpp_clk_safe_disable(dec->hclk_info.clk);
521
522 return 0;
523 }
524
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)525 static int vdpu_set_freq(struct mpp_dev *mpp,
526 struct mpp_task *mpp_task)
527 {
528 struct vdpu_dev *dec = to_vdpu_dev(mpp);
529 struct vdpu_task *task = to_vdpu_task(mpp_task);
530
531 mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
532
533 return 0;
534 }
535
vdpu_reduce_freq(struct mpp_dev * mpp)536 static int vdpu_reduce_freq(struct mpp_dev *mpp)
537 {
538 struct vdpu_dev *dec = to_vdpu_dev(mpp);
539
540 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
541
542 return 0;
543 }
544
vdpu_irq(struct mpp_dev * mpp)545 static int vdpu_irq(struct mpp_dev *mpp)
546 {
547 mpp->irq_status = mpp_read(mpp, VDPU2_REG_DEC_INT);
548 if (!(mpp->irq_status & VDPU2_DEC_INT_RAW))
549 return IRQ_NONE;
550
551 mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
552 /* set clock gating to save power */
553 mpp_write(mpp, VDPU2_REG_DEC_EN, VDPU2_DEC_CLOCK_GATE_EN);
554
555 return IRQ_WAKE_THREAD;
556 }
557
vdpu_isr(struct mpp_dev * mpp)558 static int vdpu_isr(struct mpp_dev *mpp)
559 {
560 u32 err_mask;
561 struct vdpu_task *task = NULL;
562 struct mpp_task *mpp_task = mpp->cur_task;
563
564 /* FIXME use a spin lock here */
565 if (!mpp_task) {
566 dev_err(mpp->dev, "no current task\n");
567 return IRQ_HANDLED;
568 }
569 mpp_time_diff(mpp_task);
570 mpp->cur_task = NULL;
571 task = to_vdpu_task(mpp_task);
572 task->irq_status = mpp->irq_status;
573 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
574 task->irq_status);
575
576 err_mask = VDPU2_INT_TIMEOUT
577 | VDPU2_INT_STRM_ERROR
578 | VDPU2_INT_ASO_ERROR
579 | VDPU2_INT_BUF_EMPTY
580 | VDPU2_INT_BUS_ERROR;
581
582 if (err_mask & task->irq_status)
583 atomic_inc(&mpp->reset_request);
584
585 mpp_task_finish(mpp_task->session, mpp_task);
586
587 mpp_debug_leave();
588
589 return IRQ_HANDLED;
590 }
591
vdpu_reset(struct mpp_dev * mpp)592 static int vdpu_reset(struct mpp_dev *mpp)
593 {
594 struct vdpu_dev *dec = to_vdpu_dev(mpp);
595
596 mpp_write(mpp, VDPU2_REG_DEC_EN, 0);
597 mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
598 if (dec->rst_a && dec->rst_h) {
599 /* Don't skip this or iommu won't work after reset */
600 mpp_pmu_idle_request(mpp, true);
601 mpp_safe_reset(dec->rst_a);
602 mpp_safe_reset(dec->rst_h);
603 udelay(5);
604 mpp_safe_unreset(dec->rst_a);
605 mpp_safe_unreset(dec->rst_h);
606 mpp_pmu_idle_request(mpp, false);
607 }
608
609 return 0;
610 }
611
612 static struct mpp_hw_ops vdpu_v2_hw_ops = {
613 .init = vdpu_init,
614 .clk_on = vdpu_clk_on,
615 .clk_off = vdpu_clk_off,
616 .set_freq = vdpu_set_freq,
617 .reduce_freq = vdpu_reduce_freq,
618 .reset = vdpu_reset,
619 };
620
621 static struct mpp_dev_ops vdpu_v2_dev_ops = {
622 .alloc_task = vdpu_alloc_task,
623 .run = vdpu_run,
624 .irq = vdpu_irq,
625 .isr = vdpu_isr,
626 .finish = vdpu_finish,
627 .result = vdpu_result,
628 .free_task = vdpu_free_task,
629 };
630
631 static const struct mpp_dev_var vdpu_v2_data = {
632 .device_type = MPP_DEVICE_VDPU2,
633 .hw_info = &vdpu_v2_hw_info,
634 .trans_info = vdpu_v2_trans,
635 .hw_ops = &vdpu_v2_hw_ops,
636 .dev_ops = &vdpu_v2_dev_ops,
637 };
638
639 static const struct of_device_id mpp_vdpu2_dt_match[] = {
640 {
641 .compatible = "rockchip,vpu-decoder-v2",
642 .data = &vdpu_v2_data,
643 },
644 {},
645 };
646
vdpu_probe(struct platform_device * pdev)647 static int vdpu_probe(struct platform_device *pdev)
648 {
649 int ret = 0;
650 struct device *dev = &pdev->dev;
651 struct vdpu_dev *dec = NULL;
652 struct mpp_dev *mpp = NULL;
653 const struct of_device_id *match = NULL;
654
655 dev_info(dev, "probe device\n");
656 dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
657 if (!dec)
658 return -ENOMEM;
659 mpp = &dec->mpp;
660 platform_set_drvdata(pdev, mpp);
661
662 if (pdev->dev.of_node) {
663 match = of_match_node(mpp_vdpu2_dt_match,
664 pdev->dev.of_node);
665 if (match)
666 mpp->var = (struct mpp_dev_var *)match->data;
667
668 mpp->core_id = of_alias_get_id(pdev->dev.of_node, "vdpu");
669 }
670
671 ret = mpp_dev_probe(mpp, pdev);
672 if (ret) {
673 dev_err(dev, "probe sub driver failed\n");
674 return -EINVAL;
675 }
676
677 ret = devm_request_threaded_irq(dev, mpp->irq,
678 mpp_dev_irq,
679 mpp_dev_isr_sched,
680 IRQF_SHARED,
681 dev_name(dev), mpp);
682 if (ret) {
683 dev_err(dev, "register interrupter runtime failed\n");
684 return -EINVAL;
685 }
686
687 if (mpp->var->device_type == MPP_DEVICE_VDPU2) {
688 mpp->srv->sub_devices[MPP_DEVICE_VDPU2_PP] = mpp;
689 set_bit(MPP_DEVICE_VDPU2_PP, &mpp->srv->hw_support);
690 }
691
692 mpp->session_max_buffers = VDPU2_SESSION_MAX_BUFFERS;
693 vdpu_procfs_init(mpp);
694 /* register current device to mpp service */
695 mpp_dev_register_srv(mpp, mpp->srv);
696 dev_info(dev, "probing finish\n");
697
698 return 0;
699 }
700
vdpu_remove(struct platform_device * pdev)701 static int vdpu_remove(struct platform_device *pdev)
702 {
703 struct device *dev = &pdev->dev;
704 struct mpp_dev *mpp = dev_get_drvdata(dev);
705
706 dev_info(dev, "remove device\n");
707 mpp_dev_remove(mpp);
708 vdpu_procfs_remove(mpp);
709
710 return 0;
711 }
712
713 struct platform_driver rockchip_vdpu2_driver = {
714 .probe = vdpu_probe,
715 .remove = vdpu_remove,
716 .shutdown = mpp_dev_shutdown,
717 .driver = {
718 .name = VDPU2_DRIVER_NAME,
719 .of_match_table = of_match_ptr(mpp_vdpu2_dt_match),
720 },
721 };
722 EXPORT_SYMBOL(rockchip_vdpu2_driver);
723