1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2020 Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Ding Wei, leo.ding@rock-chips.com
8 *
9 */
10 #include <linux/pm_runtime.h>
11
12 #include "mpp_debug.h"
13 #include "mpp_common.h"
14 #include "mpp_iommu.h"
15
16 #include "mpp_rkvdec2_link.h"
17
18 /*
19 * hardware information
20 */
21 static struct mpp_hw_info rkvdec_v2_hw_info = {
22 .reg_num = RKVDEC_REG_NUM,
23 .reg_id = RKVDEC_REG_HW_ID_INDEX,
24 .reg_start = RKVDEC_REG_START_INDEX,
25 .reg_end = RKVDEC_REG_END_INDEX,
26 .reg_en = RKVDEC_REG_START_EN_INDEX,
27 };
28
29 /*
30 * file handle translate information
31 */
32 static const u16 trans_tbl_h264d[] = {
33 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
34 161, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
35 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
36 192, 193, 194, 195, 196, 197
37 };
38
39 static const u16 trans_tbl_h265d[] = {
40 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
41 161, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
42 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
43 192, 193, 194, 195, 196, 197
44 };
45
46 static const u16 trans_tbl_vp9d[] = {
47 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
48 160, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 180, 181, 182, 183,
49 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197
50 };
51
52 static const u16 trans_tbl_avs2d[] = {
53 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
54 161, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
55 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
56 192, 193, 194, 195, 196, 197
57 };
58
59 static struct mpp_trans_info rkvdec_v2_trans[] = {
60 [RKVDEC_FMT_H265D] = {
61 .count = ARRAY_SIZE(trans_tbl_h265d),
62 .table = trans_tbl_h265d,
63 },
64 [RKVDEC_FMT_H264D] = {
65 .count = ARRAY_SIZE(trans_tbl_h264d),
66 .table = trans_tbl_h264d,
67 },
68 [RKVDEC_FMT_VP9D] = {
69 .count = ARRAY_SIZE(trans_tbl_vp9d),
70 .table = trans_tbl_vp9d,
71 },
72 [RKVDEC_FMT_AVS2] = {
73 .count = ARRAY_SIZE(trans_tbl_avs2d),
74 .table = trans_tbl_avs2d,
75 }
76 };
77
mpp_extract_rcb_info(struct rkvdec2_rcb_info * rcb_inf,struct mpp_request * req)78 static int mpp_extract_rcb_info(struct rkvdec2_rcb_info *rcb_inf,
79 struct mpp_request *req)
80 {
81 int max_size = ARRAY_SIZE(rcb_inf->elem);
82 int cnt = req->size / sizeof(rcb_inf->elem[0]);
83
84 if (req->size > sizeof(rcb_inf->elem)) {
85 mpp_err("count %d,max_size %d\n", cnt, max_size);
86 return -EINVAL;
87 }
88 if (copy_from_user(rcb_inf->elem, req->data, req->size)) {
89 mpp_err("copy_from_user failed\n");
90 return -EINVAL;
91 }
92 rcb_inf->cnt = cnt;
93
94 return 0;
95 }
96
rkvdec2_extract_task_msg(struct mpp_session * session,struct rkvdec2_task * task,struct mpp_task_msgs * msgs)97 static int rkvdec2_extract_task_msg(struct mpp_session *session,
98 struct rkvdec2_task *task,
99 struct mpp_task_msgs *msgs)
100 {
101 u32 i;
102 int ret;
103 struct mpp_request *req;
104 struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
105
106 for (i = 0; i < msgs->req_cnt; i++) {
107 u32 off_s, off_e;
108
109 req = &msgs->reqs[i];
110 if (!req->size)
111 continue;
112
113 switch (req->cmd) {
114 case MPP_CMD_SET_REG_WRITE: {
115 off_s = hw_info->reg_start * sizeof(u32);
116 off_e = hw_info->reg_end * sizeof(u32);
117 ret = mpp_check_req(req, 0, sizeof(task->reg), off_s, off_e);
118 if (ret)
119 continue;
120 if (copy_from_user((u8 *)task->reg + req->offset,
121 req->data, req->size)) {
122 mpp_err("copy_from_user reg failed\n");
123 return -EIO;
124 }
125 memcpy(&task->w_reqs[task->w_req_cnt++], req, sizeof(*req));
126 } break;
127 case MPP_CMD_SET_REG_READ: {
128 int req_base;
129 int max_size;
130
131 if (req->offset >= RKVDEC_PERF_SEL_OFFSET) {
132 req_base = RKVDEC_PERF_SEL_OFFSET;
133 max_size = sizeof(task->reg_sel);
134 } else {
135 req_base = 0;
136 max_size = sizeof(task->reg);
137 }
138
139 ret = mpp_check_req(req, req_base, max_size, 0, max_size);
140 if (ret)
141 continue;
142
143 memcpy(&task->r_reqs[task->r_req_cnt++], req, sizeof(*req));
144 } break;
145 case MPP_CMD_SET_REG_ADDR_OFFSET: {
146 mpp_extract_reg_offset_info(&task->off_inf, req);
147 } break;
148 case MPP_CMD_SET_RCB_INFO: {
149 struct rkvdec2_session_priv *priv = session->priv;
150
151 if (priv)
152 mpp_extract_rcb_info(&priv->rcb_inf, req);
153 } break;
154 default:
155 break;
156 }
157 }
158 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
159 task->w_req_cnt, task->r_req_cnt);
160
161 return 0;
162 }
163
mpp_set_rcbbuf(struct mpp_dev * mpp,struct mpp_session * session,struct mpp_task * task)164 int mpp_set_rcbbuf(struct mpp_dev *mpp, struct mpp_session *session,
165 struct mpp_task *task)
166 {
167 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
168 struct rkvdec2_session_priv *priv = session->priv;
169
170 mpp_debug_enter();
171
172 if (priv && dec->rcb_iova) {
173 int i;
174 u32 reg_idx, rcb_size, rcb_offset;
175 struct rkvdec2_rcb_info *rcb_inf = &priv->rcb_inf;
176 u32 width = priv->codec_info[DEC_INFO_WIDTH].val;
177
178 if (width < dec->rcb_min_width)
179 goto done;
180
181 rcb_offset = 0;
182 for (i = 0; i < rcb_inf->cnt; i++) {
183 reg_idx = rcb_inf->elem[i].index;
184 rcb_size = rcb_inf->elem[i].size;
185 if ((rcb_offset + rcb_size) > dec->rcb_size) {
186 mpp_debug(DEBUG_SRAM_INFO,
187 "rcb: reg %d use original buffer\n", reg_idx);
188 continue;
189 }
190 mpp_debug(DEBUG_SRAM_INFO, "rcb: reg %d offset %d, size %d\n",
191 reg_idx, rcb_offset, rcb_size);
192 task->reg[reg_idx] = dec->rcb_iova + rcb_offset;
193 rcb_offset += rcb_size;
194 }
195 }
196 done:
197 mpp_debug_leave();
198
199 return 0;
200 }
201
rkvdec2_task_init(struct mpp_dev * mpp,struct mpp_session * session,struct rkvdec2_task * task,struct mpp_task_msgs * msgs)202 int rkvdec2_task_init(struct mpp_dev *mpp, struct mpp_session *session,
203 struct rkvdec2_task *task, struct mpp_task_msgs *msgs)
204 {
205 int ret;
206 struct mpp_task *mpp_task = &task->mpp_task;
207
208 mpp_debug_enter();
209
210 mpp_task_init(session, mpp_task);
211 mpp_task->hw_info = mpp->var->hw_info;
212 mpp_task->reg = task->reg;
213 /* extract reqs for current task */
214 ret = rkvdec2_extract_task_msg(session, task, msgs);
215 if (ret)
216 return ret;
217
218 /* process fd in register */
219 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
220 u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_FORMAT_INDEX]);
221
222 ret = mpp_translate_reg_address(session, mpp_task,
223 fmt, task->reg, &task->off_inf);
224 if (ret)
225 goto fail;
226
227 mpp_translate_reg_offset_info(mpp_task, &task->off_inf, task->reg);
228 }
229
230 task->strm_addr = task->reg[RKVDEC_REG_RLC_BASE_INDEX];
231 task->clk_mode = CLK_MODE_NORMAL;
232 task->slot_idx = -1;
233 init_waitqueue_head(&task->wait);
234 /* get resolution info */
235 if (session->priv) {
236 struct rkvdec2_session_priv *priv = session->priv;
237 u32 width = priv->codec_info[DEC_INFO_WIDTH].val;
238 u32 bitdepth = priv->codec_info[DEC_INFO_BITDEPTH].val;
239
240 task->width = (bitdepth > 8) ? ((width * bitdepth + 7) >> 3) : width;
241 task->height = priv->codec_info[DEC_INFO_HEIGHT].val;
242 task->pixels = task->width * task->height;
243 mpp_debug(DEBUG_TASK_INFO, "width=%d, bitdepth=%d, height=%d\n",
244 width, bitdepth, task->height);
245 }
246
247 mpp_debug_leave();
248
249 return 0;
250
251 fail:
252 mpp_task_dump_mem_region(mpp, mpp_task);
253 mpp_task_dump_reg(mpp, mpp_task);
254 mpp_task_finalize(session, mpp_task);
255 return ret;
256 }
257
rkvdec2_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)258 void *rkvdec2_alloc_task(struct mpp_session *session,
259 struct mpp_task_msgs *msgs)
260 {
261 int ret;
262 struct rkvdec2_task *task;
263
264 task = kzalloc(sizeof(*task), GFP_KERNEL);
265 if (!task)
266 return NULL;
267
268 ret = rkvdec2_task_init(session->mpp, session, task, msgs);
269 if (ret) {
270 kfree(task);
271 return NULL;
272 }
273 mpp_set_rcbbuf(session->mpp, session, &task->mpp_task);
274
275 return &task->mpp_task;
276 }
277
rkvdec2_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)278 static int rkvdec2_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
279 {
280 struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
281 u32 reg_en = mpp_task->hw_info->reg_en;
282 /* set cache size */
283 u32 reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
284 RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
285 int i;
286
287 mpp_debug_enter();
288
289 if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
290 reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
291
292 mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
293 mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
294 mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
295 /* clear cache */
296 mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
297 mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
298 mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
299
300 /* set registers for hardware */
301 for (i = 0; i < task->w_req_cnt; i++) {
302 int s, e;
303 struct mpp_request *req = &task->w_reqs[i];
304
305 s = req->offset / sizeof(u32);
306 e = s + req->size / sizeof(u32);
307 mpp_write_req(mpp, task->reg, s, e, reg_en);
308 }
309
310 /* flush tlb before starting hardware */
311 mpp_iommu_flush_tlb(mpp->iommu_info);
312
313 /* init current task */
314 mpp->cur_task = mpp_task;
315 /* Flush the register before the start the device */
316 wmb();
317 mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
318
319 mpp_debug_leave();
320
321 return 0;
322 }
323
rkvdec2_irq(struct mpp_dev * mpp)324 static int rkvdec2_irq(struct mpp_dev *mpp)
325 {
326 mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
327 if (!(mpp->irq_status & RKVDEC_IRQ_RAW))
328 return IRQ_NONE;
329
330 mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
331
332 return IRQ_WAKE_THREAD;
333 }
334
rkvdec2_isr(struct mpp_dev * mpp)335 static int rkvdec2_isr(struct mpp_dev *mpp)
336 {
337 u32 err_mask;
338 struct rkvdec2_task *task = NULL;
339 struct mpp_task *mpp_task = mpp->cur_task;
340
341 /* FIXME use a spin lock here */
342 if (!mpp_task) {
343 dev_err(mpp->dev, "no current task\n");
344 return IRQ_HANDLED;
345 }
346 mpp_time_diff(mpp_task);
347 mpp->cur_task = NULL;
348 task = to_rkvdec2_task(mpp_task);
349 task->irq_status = mpp->irq_status;
350
351 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
352 err_mask = RKVDEC_COLMV_REF_ERR_STA | RKVDEC_BUF_EMPTY_STA |
353 RKVDEC_TIMEOUT_STA | RKVDEC_ERROR_STA;
354 if (err_mask & task->irq_status) {
355 atomic_inc(&mpp->reset_request);
356 mpp_debug(DEBUG_DUMP_ERR_REG, "irq_status: %08x\n", task->irq_status);
357 mpp_task_dump_hw_reg(mpp);
358 }
359
360 mpp_task_finish(mpp_task->session, mpp_task);
361
362 mpp_debug_leave();
363 return IRQ_HANDLED;
364 }
365
rkvdec2_read_perf_sel(struct mpp_dev * mpp,u32 * regs,u32 s,u32 e)366 static int rkvdec2_read_perf_sel(struct mpp_dev *mpp, u32 *regs, u32 s, u32 e)
367 {
368 u32 i;
369 u32 sel0, sel1, sel2, val;
370
371 for (i = s; i < e; i += 3) {
372 /* set sel */
373 sel0 = i;
374 sel1 = ((i + 1) < e) ? (i + 1) : 0;
375 sel2 = ((i + 2) < e) ? (i + 2) : 0;
376 val = RKVDEC_SET_PERF_SEL(sel0, sel1, sel2);
377 writel_relaxed(val, mpp->reg_base + RKVDEC_PERF_SEL_BASE);
378 /* read data */
379 regs[sel0] = readl_relaxed(mpp->reg_base + RKVDEC_SEL_VAL0_BASE);
380 mpp_debug(DEBUG_GET_PERF_VAL, "sel[%d]:%u\n", sel0, regs[sel0]);
381 if (sel1) {
382 regs[sel1] = readl_relaxed(mpp->reg_base + RKVDEC_SEL_VAL1_BASE);
383 mpp_debug(DEBUG_GET_PERF_VAL, "sel[%d]:%u\n", sel1, regs[sel1]);
384 }
385 if (sel2) {
386 regs[sel2] = readl_relaxed(mpp->reg_base + RKVDEC_SEL_VAL2_BASE);
387 mpp_debug(DEBUG_GET_PERF_VAL, "sel[%d]:%u\n", sel2, regs[sel2]);
388 }
389 }
390
391 return 0;
392 }
393
rkvdec2_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)394 static int rkvdec2_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
395 {
396 u32 i;
397 u32 dec_get;
398 s32 dec_length;
399 struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
400 struct mpp_request *req;
401 u32 s, e;
402
403 mpp_debug_enter();
404
405 /* read register after running */
406 for (i = 0; i < task->r_req_cnt; i++) {
407 req = &task->r_reqs[i];
408 /* read perf register */
409 if (req->offset >= RKVDEC_PERF_SEL_OFFSET) {
410 int off = req->offset - RKVDEC_PERF_SEL_OFFSET;
411
412 s = off / sizeof(u32);
413 e = s + req->size / sizeof(u32);
414 rkvdec2_read_perf_sel(mpp, task->reg_sel, s, e);
415 } else {
416 s = req->offset / sizeof(u32);
417 e = s + req->size / sizeof(u32);
418 mpp_read_req(mpp, task->reg, s, e);
419 }
420 }
421 /* revert hack for irq status */
422 task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
423 /* revert hack for decoded length */
424 dec_get = mpp_read_relaxed(mpp, RKVDEC_REG_RLC_BASE);
425 dec_length = dec_get - task->strm_addr;
426 task->reg[RKVDEC_REG_RLC_BASE_INDEX] = dec_length << 10;
427 mpp_debug(DEBUG_REGISTER, "dec_get %08x dec_length %d\n", dec_get, dec_length);
428
429 mpp_debug_leave();
430
431 return 0;
432 }
433
rkvdec2_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)434 int rkvdec2_result(struct mpp_dev *mpp, struct mpp_task *mpp_task,
435 struct mpp_task_msgs *msgs)
436 {
437 u32 i;
438 struct mpp_request *req;
439 struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
440
441 for (i = 0; i < task->r_req_cnt; i++) {
442 req = &task->r_reqs[i];
443
444 if (req->offset >= RKVDEC_PERF_SEL_OFFSET) {
445 int off = req->offset - RKVDEC_PERF_SEL_OFFSET;
446
447 if (copy_to_user(req->data,
448 (u8 *)task->reg_sel + off,
449 req->size)) {
450 mpp_err("copy_to_user perf_sel fail\n");
451 return -EIO;
452 }
453 } else {
454 if (copy_to_user(req->data,
455 (u8 *)task->reg + req->offset,
456 req->size)) {
457 mpp_err("copy_to_user reg fail\n");
458 return -EIO;
459 }
460 }
461 }
462
463 return 0;
464 }
465
rkvdec2_free_task(struct mpp_session * session,struct mpp_task * mpp_task)466 int rkvdec2_free_task(struct mpp_session *session, struct mpp_task *mpp_task)
467 {
468 struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
469
470 mpp_task_finalize(session, mpp_task);
471 kfree(task);
472
473 return 0;
474 }
475
rkvdec2_control(struct mpp_session * session,struct mpp_request * req)476 static int rkvdec2_control(struct mpp_session *session, struct mpp_request *req)
477 {
478 switch (req->cmd) {
479 case MPP_CMD_SEND_CODEC_INFO: {
480 int i;
481 int cnt;
482 struct codec_info_elem elem;
483 struct rkvdec2_session_priv *priv;
484
485 if (!session || !session->priv) {
486 mpp_err("session info null\n");
487 return -EINVAL;
488 }
489 priv = session->priv;
490
491 cnt = req->size / sizeof(elem);
492 cnt = (cnt > DEC_INFO_BUTT) ? DEC_INFO_BUTT : cnt;
493 mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
494 for (i = 0; i < cnt; i++) {
495 if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
496 mpp_err("copy_from_user failed\n");
497 continue;
498 }
499 if (elem.type > DEC_INFO_BASE && elem.type < DEC_INFO_BUTT &&
500 elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
501 elem.type = array_index_nospec(elem.type, DEC_INFO_BUTT);
502 priv->codec_info[elem.type].flag = elem.flag;
503 priv->codec_info[elem.type].val = elem.data;
504 } else {
505 mpp_err("codec info invalid, type %d, flag %d\n",
506 elem.type, elem.flag);
507 }
508 }
509 } break;
510 default: {
511 mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
512 } break;
513 }
514
515 return 0;
516 }
517
rkvdec2_free_session(struct mpp_session * session)518 int rkvdec2_free_session(struct mpp_session *session)
519 {
520 if (session && session->priv) {
521 kfree(session->priv);
522 session->priv = NULL;
523 }
524
525 return 0;
526 }
527
rkvdec2_init_session(struct mpp_session * session)528 static int rkvdec2_init_session(struct mpp_session *session)
529 {
530 struct rkvdec2_session_priv *priv;
531
532 if (!session) {
533 mpp_err("session is null\n");
534 return -EINVAL;
535 }
536
537 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
538 if (!priv)
539 return -ENOMEM;
540 session->priv = priv;
541
542 return 0;
543 }
544
545 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec2_procfs_remove(struct mpp_dev * mpp)546 static int rkvdec2_procfs_remove(struct mpp_dev *mpp)
547 {
548 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
549
550 if (dec->procfs) {
551 proc_remove(dec->procfs);
552 dec->procfs = NULL;
553 }
554
555 return 0;
556 }
557
rkvdec2_show_pref_sel_offset(struct seq_file * file,void * v)558 static int rkvdec2_show_pref_sel_offset(struct seq_file *file, void *v)
559 {
560 seq_printf(file, "0x%08x\n", RKVDEC_PERF_SEL_OFFSET);
561
562 return 0;
563 }
564
rkvdec2_procfs_init(struct mpp_dev * mpp)565 static int rkvdec2_procfs_init(struct mpp_dev *mpp)
566 {
567 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
568 char name[32];
569
570 if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name ||
571 !mpp->srv || !mpp->srv->procfs)
572 return -EINVAL;
573
574 snprintf(name, sizeof(name) - 1, "%s%d",
575 mpp->dev->of_node->name, mpp->core_id);
576 dec->procfs = proc_mkdir(name, mpp->srv->procfs);
577 if (IS_ERR_OR_NULL(dec->procfs)) {
578 mpp_err("failed on open procfs\n");
579 dec->procfs = NULL;
580 return -EIO;
581 }
582 mpp_procfs_create_u32("aclk", 0644,
583 dec->procfs, &dec->aclk_info.debug_rate_hz);
584 mpp_procfs_create_u32("clk_core", 0644,
585 dec->procfs, &dec->core_clk_info.debug_rate_hz);
586 mpp_procfs_create_u32("clk_cabac", 0644,
587 dec->procfs, &dec->cabac_clk_info.debug_rate_hz);
588 mpp_procfs_create_u32("clk_hevc_cabac", 0644,
589 dec->procfs, &dec->hevc_cabac_clk_info.debug_rate_hz);
590 mpp_procfs_create_u32("session_buffers", 0644,
591 dec->procfs, &mpp->session_max_buffers);
592 proc_create_single("perf_sel_offset", 0444,
593 dec->procfs, rkvdec2_show_pref_sel_offset);
594 mpp_procfs_create_u32("task_count", 0644,
595 dec->procfs, &mpp->task_index);
596 mpp_procfs_create_u32("disable_work", 0644,
597 dec->procfs, &dec->disable_work);
598
599 return 0;
600 }
601 #else
rkvdec2_procfs_remove(struct mpp_dev * mpp)602 static inline int rkvdec2_procfs_remove(struct mpp_dev *mpp)
603 {
604 return 0;
605 }
606
rkvdec2_procfs_init(struct mpp_dev * mpp)607 static inline int rkvdec2_procfs_init(struct mpp_dev *mpp)
608 {
609 return 0;
610 }
611 #endif
612
rkvdec2_init(struct mpp_dev * mpp)613 static int rkvdec2_init(struct mpp_dev *mpp)
614 {
615 int ret;
616 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
617
618 mutex_init(&dec->sip_reset_lock);
619 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVDEC];
620
621 /* Get clock info from dtsi */
622 ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
623 if (ret)
624 mpp_err("failed on clk_get aclk_vcodec\n");
625 ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
626 if (ret)
627 mpp_err("failed on clk_get hclk_vcodec\n");
628 ret = mpp_get_clk_info(mpp, &dec->core_clk_info, "clk_core");
629 if (ret)
630 mpp_err("failed on clk_get clk_core\n");
631 ret = mpp_get_clk_info(mpp, &dec->cabac_clk_info, "clk_cabac");
632 if (ret)
633 mpp_err("failed on clk_get clk_cabac\n");
634 ret = mpp_get_clk_info(mpp, &dec->hevc_cabac_clk_info, "clk_hevc_cabac");
635 if (ret)
636 mpp_err("failed on clk_get clk_hevc_cabac\n");
637 /* Set default rates */
638 mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
639 mpp_set_clk_info_rate_hz(&dec->core_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
640 mpp_set_clk_info_rate_hz(&dec->cabac_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
641 mpp_set_clk_info_rate_hz(&dec->hevc_cabac_clk_info, CLK_MODE_DEFAULT, 300 * MHZ);
642
643 /* Get normal max workload from dtsi */
644 of_property_read_u32(mpp->dev->of_node,
645 "rockchip,default-max-load", &dec->default_max_load);
646 /* Get reset control from dtsi */
647 dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
648 if (!dec->rst_a)
649 mpp_err("No aclk reset resource define\n");
650 dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
651 if (!dec->rst_h)
652 mpp_err("No hclk reset resource define\n");
653 dec->rst_niu_a = mpp_reset_control_get(mpp, RST_TYPE_NIU_A, "niu_a");
654 if (!dec->rst_niu_a)
655 mpp_err("No niu aclk reset resource define\n");
656 dec->rst_niu_h = mpp_reset_control_get(mpp, RST_TYPE_NIU_H, "niu_h");
657 if (!dec->rst_niu_h)
658 mpp_err("No niu hclk reset resource define\n");
659 dec->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
660 if (!dec->rst_core)
661 mpp_err("No core reset resource define\n");
662 dec->rst_cabac = mpp_reset_control_get(mpp, RST_TYPE_CABAC, "video_cabac");
663 if (!dec->rst_cabac)
664 mpp_err("No cabac reset resource define\n");
665 dec->rst_hevc_cabac = mpp_reset_control_get(mpp, RST_TYPE_HEVC_CABAC, "video_hevc_cabac");
666 if (!dec->rst_hevc_cabac)
667 mpp_err("No hevc cabac reset resource define\n");
668
669 return 0;
670 }
671
rkvdec2_clk_on(struct mpp_dev * mpp)672 static int rkvdec2_clk_on(struct mpp_dev *mpp)
673 {
674 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
675
676 mpp_clk_safe_enable(dec->aclk_info.clk);
677 mpp_clk_safe_enable(dec->hclk_info.clk);
678 mpp_clk_safe_enable(dec->core_clk_info.clk);
679 mpp_clk_safe_enable(dec->cabac_clk_info.clk);
680 mpp_clk_safe_enable(dec->hevc_cabac_clk_info.clk);
681
682 return 0;
683 }
684
rkvdec2_clk_off(struct mpp_dev * mpp)685 static int rkvdec2_clk_off(struct mpp_dev *mpp)
686 {
687 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
688
689 clk_disable_unprepare(dec->aclk_info.clk);
690 clk_disable_unprepare(dec->hclk_info.clk);
691 clk_disable_unprepare(dec->core_clk_info.clk);
692 clk_disable_unprepare(dec->cabac_clk_info.clk);
693 clk_disable_unprepare(dec->hevc_cabac_clk_info.clk);
694
695 return 0;
696 }
697
rkvdec2_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)698 static int rkvdec2_get_freq(struct mpp_dev *mpp,
699 struct mpp_task *mpp_task)
700 {
701 u32 task_cnt;
702 u32 workload;
703 struct mpp_task *loop = NULL, *n;
704 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
705 struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
706
707 /* if not set max load, consider not have advanced mode */
708 if (!dec->default_max_load || !task->pixels)
709 return 0;
710
711 task_cnt = 1;
712 workload = task->pixels;
713 /* calc workload in pending list */
714 mutex_lock(&mpp->queue->pending_lock);
715 list_for_each_entry_safe(loop, n,
716 &mpp->queue->pending_list,
717 queue_link) {
718 struct rkvdec2_task *loop_task = to_rkvdec2_task(loop);
719
720 task_cnt++;
721 workload += loop_task->pixels;
722 }
723 mutex_unlock(&mpp->queue->pending_lock);
724
725 if (workload > dec->default_max_load)
726 task->clk_mode = CLK_MODE_ADVANCED;
727
728 mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
729 task_cnt, workload, task->clk_mode);
730
731 return 0;
732 }
733
rkvdec2_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)734 static int rkvdec2_set_freq(struct mpp_dev *mpp,
735 struct mpp_task *mpp_task)
736 {
737 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
738 struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
739
740 mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
741 mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
742 mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
743 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
744
745 return 0;
746 }
747
rkvdec2_reset(struct mpp_dev * mpp)748 int rkvdec2_reset(struct mpp_dev *mpp)
749 {
750 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
751
752 mpp_debug_enter();
753 if (dec->rst_a && dec->rst_h) {
754 mpp_pmu_idle_request(mpp, true);
755 mpp_safe_reset(dec->rst_niu_a);
756 mpp_safe_reset(dec->rst_niu_h);
757 mpp_safe_reset(dec->rst_a);
758 mpp_safe_reset(dec->rst_h);
759 mpp_safe_reset(dec->rst_core);
760 mpp_safe_reset(dec->rst_cabac);
761 mpp_safe_reset(dec->rst_hevc_cabac);
762 udelay(5);
763 mpp_safe_unreset(dec->rst_niu_h);
764 mpp_safe_unreset(dec->rst_niu_a);
765 mpp_safe_unreset(dec->rst_a);
766 mpp_safe_unreset(dec->rst_h);
767 mpp_safe_unreset(dec->rst_core);
768 mpp_safe_unreset(dec->rst_cabac);
769 mpp_safe_unreset(dec->rst_hevc_cabac);
770 mpp_pmu_idle_request(mpp, false);
771 }
772 mpp_debug_leave();
773
774 return 0;
775 }
776
777 static struct mpp_hw_ops rkvdec_v2_hw_ops = {
778 .init = rkvdec2_init,
779 .clk_on = rkvdec2_clk_on,
780 .clk_off = rkvdec2_clk_off,
781 .get_freq = rkvdec2_get_freq,
782 .set_freq = rkvdec2_set_freq,
783 .reset = rkvdec2_reset,
784 };
785
786 static struct mpp_dev_ops rkvdec_v2_dev_ops = {
787 .alloc_task = rkvdec2_alloc_task,
788 .run = rkvdec2_run,
789 .irq = rkvdec2_irq,
790 .isr = rkvdec2_isr,
791 .finish = rkvdec2_finish,
792 .result = rkvdec2_result,
793 .free_task = rkvdec2_free_task,
794 .ioctl = rkvdec2_control,
795 .init_session = rkvdec2_init_session,
796 .free_session = rkvdec2_free_session,
797 };
798
799 static const struct mpp_dev_var rkvdec_v2_data = {
800 .device_type = MPP_DEVICE_RKVDEC,
801 .hw_info = &rkvdec_v2_hw_info,
802 .trans_info = rkvdec_v2_trans,
803 .hw_ops = &rkvdec_v2_hw_ops,
804 .dev_ops = &rkvdec_v2_dev_ops,
805 };
806
807 static const struct of_device_id mpp_rkvdec2_dt_match[] = {
808 {
809 .compatible = "rockchip,rkv-decoder-v2",
810 .data = &rkvdec_v2_data,
811 },
812 #ifdef CONFIG_CPU_RK3588
813 {
814 .compatible = "rockchip,rkv-decoder-v2-ccu",
815 },
816 #endif
817 {},
818 };
819
rkvdec2_ccu_remove(struct device * dev)820 static int rkvdec2_ccu_remove(struct device *dev)
821 {
822 device_init_wakeup(dev, false);
823 pm_runtime_disable(dev);
824
825 return 0;
826 }
827
rkvdec2_ccu_probe(struct platform_device * pdev)828 static int rkvdec2_ccu_probe(struct platform_device *pdev)
829 {
830 struct rkvdec2_ccu *ccu;
831 struct resource *res;
832 struct device *dev = &pdev->dev;
833
834 ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL);
835 if (!ccu)
836 return -ENOMEM;
837
838 ccu->dev = dev;
839 atomic_set(&ccu->power_enabled, 0);
840 platform_set_drvdata(pdev, ccu);
841
842 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ccu");
843 if (!res) {
844 dev_err(dev, "no memory resource defined\n");
845 return -ENODEV;
846 }
847
848 ccu->reg_base = devm_ioremap(dev, res->start, resource_size(res));
849 if (!ccu->reg_base) {
850 dev_err(dev, "ioremap failed for resource %pR\n", res);
851 return -ENODEV;
852 }
853
854 device_init_wakeup(dev, true);
855 pm_runtime_enable(dev);
856
857 ccu->aclk_info.clk = devm_clk_get(dev, "aclk_ccu");
858 if (!ccu->aclk_info.clk)
859 mpp_err("failed on clk_get ccu aclk\n");
860
861 ccu->rst_a = devm_reset_control_get(dev, "video_ccu");
862 if (ccu->rst_a)
863 mpp_safe_unreset(ccu->rst_a);
864 else
865 mpp_err("failed on clk_get ccu reset\n");
866
867 return 0;
868 }
869
rkvdec2_alloc_rcbbuf(struct platform_device * pdev,struct rkvdec2_dev * dec)870 static int rkvdec2_alloc_rcbbuf(struct platform_device *pdev, struct rkvdec2_dev *dec)
871 {
872 int ret;
873 u32 vals[2];
874 dma_addr_t iova;
875 u32 rcb_size, sram_size;
876 struct device_node *sram_np;
877 struct resource sram_res;
878 resource_size_t sram_start, sram_end;
879 struct iommu_domain *domain;
880 struct device *dev = &pdev->dev;
881
882 /* get rcb iova start and size */
883 ret = device_property_read_u32_array(dev, "rockchip,rcb-iova", vals, 2);
884 if (ret) {
885 dev_err(dev, "could not find property rcb-iova\n");
886 return ret;
887 }
888 iova = PAGE_ALIGN(vals[0]);
889 rcb_size = PAGE_ALIGN(vals[1]);
890 if (!rcb_size) {
891 dev_err(dev, "rcb_size invalid.\n");
892 return -EINVAL;
893 }
894 /* alloc reserve iova for rcb */
895 ret = iommu_dma_reserve_iova(dev, iova, rcb_size);
896 if (ret) {
897 dev_err(dev, "alloc rcb iova error.\n");
898 return ret;
899 }
900 /* get sram device node */
901 sram_np = of_parse_phandle(dev->of_node, "rockchip,sram", 0);
902 if (!sram_np) {
903 dev_err(dev, "could not find phandle sram\n");
904 return -ENODEV;
905 }
906 /* get sram start and size */
907 ret = of_address_to_resource(sram_np, 0, &sram_res);
908 of_node_put(sram_np);
909 if (ret) {
910 dev_err(dev, "find sram res error\n");
911 return ret;
912 }
913 /* check sram start and size is PAGE_SIZE align */
914 sram_start = round_up(sram_res.start, PAGE_SIZE);
915 sram_end = round_down(sram_res.start + resource_size(&sram_res), PAGE_SIZE);
916 if (sram_end <= sram_start) {
917 dev_err(dev, "no available sram, phy_start %pa, phy_end %pa\n",
918 &sram_start, &sram_end);
919 return -ENOMEM;
920 }
921 sram_size = sram_end - sram_start;
922 sram_size = rcb_size < sram_size ? rcb_size : sram_size;
923 /* iova map to sram */
924 domain = dec->mpp.iommu_info->domain;
925 ret = iommu_map(domain, iova, sram_start, sram_size, IOMMU_READ | IOMMU_WRITE);
926 if (ret) {
927 dev_err(dev, "sram iommu_map error.\n");
928 return ret;
929 }
930 /* alloc dma for the remaining buffer, sram + dma */
931 if (sram_size < rcb_size) {
932 struct page *page;
933 size_t page_size = PAGE_ALIGN(rcb_size - sram_size);
934
935 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(page_size));
936 if (!page) {
937 dev_err(dev, "unable to allocate pages\n");
938 ret = -ENOMEM;
939 goto err_sram_map;
940 }
941 /* iova map to dma */
942 ret = iommu_map(domain, iova + sram_size, page_to_phys(page),
943 page_size, IOMMU_READ | IOMMU_WRITE);
944 if (ret) {
945 dev_err(dev, "page iommu_map error.\n");
946 __free_pages(page, get_order(page_size));
947 goto err_sram_map;
948 }
949 dec->rcb_page = page;
950 }
951 dec->sram_size = sram_size;
952 dec->rcb_size = rcb_size;
953 dec->rcb_iova = iova;
954 dev_info(dev, "sram_start %pa\n", &sram_start);
955 dev_info(dev, "rcb_iova %pad\n", &dec->rcb_iova);
956 dev_info(dev, "sram_size %u\n", dec->sram_size);
957 dev_info(dev, "rcb_size %u\n", dec->rcb_size);
958
959 ret = of_property_read_u32(dev->of_node, "rockchip,rcb-min-width", &dec->rcb_min_width);
960 if (!ret && dec->rcb_min_width)
961 dev_info(dev, "min_width %u\n", dec->rcb_min_width);
962
963 return 0;
964
965 err_sram_map:
966 iommu_unmap(domain, iova, sram_size);
967
968 return ret;
969 }
970
rkvdec2_core_probe(struct platform_device * pdev)971 static int rkvdec2_core_probe(struct platform_device *pdev)
972 {
973 int ret;
974 struct rkvdec2_dev *dec;
975 struct mpp_dev *mpp;
976 struct device *dev = &pdev->dev;
977
978 dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
979 if (!dec)
980 return -ENOMEM;
981
982 mpp = &dec->mpp;
983 platform_set_drvdata(pdev, mpp);
984
985 if (dev->of_node) {
986 struct device_node *np = pdev->dev.of_node;
987 const struct of_device_id *match;
988
989 match = of_match_node(mpp_rkvdec2_dt_match, dev->of_node);
990 if (match)
991 mpp->var = (struct mpp_dev_var *)match->data;
992 mpp->core_id = of_alias_get_id(np, "rkvdec");
993 }
994
995 ret = mpp_dev_probe(mpp, pdev);
996 if (ret) {
997 dev_err(dev, "probe sub driver failed\n");
998 return ret;
999 }
1000 /* attach core to ccu */
1001 ret = rkvdec2_attach_ccu(dev, dec);
1002 if (ret) {
1003 dev_err(dev, "attach ccu failed\n");
1004 return ret;
1005 }
1006
1007 /* alloc rcb buffer */
1008 rkvdec2_alloc_rcbbuf(pdev, dec);
1009
1010 /* set device for link */
1011 rkvdec2_ccu_link_init(pdev, dec);
1012
1013 mpp->dev_ops->alloc_task = rkvdec2_ccu_alloc_task;
1014 mpp->dev_ops->task_worker = rkvdec2_soft_ccu_worker;
1015 kthread_init_work(&mpp->work, rkvdec2_soft_ccu_worker);
1016
1017 mpp->iommu_info->hdl = rkvdec2_ccu_iommu_fault_handle;
1018 /* get irq request */
1019 ret = devm_request_threaded_irq(dev, mpp->irq, rkvdec2_soft_ccu_irq, NULL,
1020 IRQF_SHARED, dev_name(dev), mpp);
1021 if (ret) {
1022 dev_err(dev, "register interrupter runtime failed\n");
1023 return -EINVAL;
1024 }
1025 mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
1026 rkvdec2_procfs_init(mpp);
1027
1028 /* if is main-core, register to mpp service */
1029 if (mpp->core_id == 0)
1030 mpp_dev_register_srv(mpp, mpp->srv);
1031
1032 return ret;
1033 }
1034
rkvdec2_probe_default(struct platform_device * pdev)1035 static int rkvdec2_probe_default(struct platform_device *pdev)
1036 {
1037 struct device *dev = &pdev->dev;
1038 struct rkvdec2_dev *dec = NULL;
1039 struct mpp_dev *mpp = NULL;
1040 const struct of_device_id *match = NULL;
1041 int ret = 0;
1042
1043 dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1044 if (!dec)
1045 return -ENOMEM;
1046
1047 mpp = &dec->mpp;
1048 platform_set_drvdata(pdev, mpp);
1049
1050 if (pdev->dev.of_node) {
1051 match = of_match_node(mpp_rkvdec2_dt_match, pdev->dev.of_node);
1052 if (match)
1053 mpp->var = (struct mpp_dev_var *)match->data;
1054 }
1055
1056 ret = mpp_dev_probe(mpp, pdev);
1057 if (ret) {
1058 dev_err(dev, "probe sub driver failed\n");
1059 return ret;
1060 }
1061
1062 rkvdec2_alloc_rcbbuf(pdev, dec);
1063 rkvdec2_link_init(pdev, dec);
1064
1065 if (dec->link_dec) {
1066 ret = devm_request_threaded_irq(dev, mpp->irq,
1067 rkvdec2_link_irq_proc, NULL,
1068 IRQF_SHARED, dev_name(dev), mpp);
1069 mpp->dev_ops->process_task = rkvdec2_link_process_task;
1070 mpp->dev_ops->wait_result = rkvdec2_link_wait_result;
1071 mpp->dev_ops->task_worker = rkvdec2_link_worker;
1072 mpp->dev_ops->deinit = rkvdec2_link_session_deinit;
1073 kthread_init_work(&mpp->work, rkvdec2_link_worker);
1074 } else {
1075 ret = devm_request_threaded_irq(dev, mpp->irq,
1076 mpp_dev_irq, mpp_dev_isr_sched,
1077 IRQF_SHARED, dev_name(dev), mpp);
1078 }
1079 if (ret) {
1080 dev_err(dev, "register interrupter runtime failed\n");
1081 return -EINVAL;
1082 }
1083
1084 mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
1085 rkvdec2_procfs_init(mpp);
1086 rkvdec2_link_procfs_init(mpp);
1087 /* register current device to mpp service */
1088 mpp_dev_register_srv(mpp, mpp->srv);
1089
1090 return ret;
1091 }
1092
rkvdec2_probe(struct platform_device * pdev)1093 static int rkvdec2_probe(struct platform_device *pdev)
1094 {
1095 int ret;
1096 struct device *dev = &pdev->dev;
1097 struct device_node *np = dev->of_node;
1098
1099 dev_info(dev, "%s, probing start\n", np->name);
1100
1101 if (strstr(np->name, "ccu"))
1102 ret = rkvdec2_ccu_probe(pdev);
1103 else if (strstr(np->name, "core"))
1104 ret = rkvdec2_core_probe(pdev);
1105 else
1106 ret = rkvdec2_probe_default(pdev);
1107
1108 dev_info(dev, "probing finish\n");
1109
1110 return ret;
1111 }
1112
rkvdec2_free_rcbbuf(struct platform_device * pdev,struct rkvdec2_dev * dec)1113 static int rkvdec2_free_rcbbuf(struct platform_device *pdev, struct rkvdec2_dev *dec)
1114 {
1115 struct iommu_domain *domain;
1116
1117 if (dec->rcb_page) {
1118 size_t page_size = PAGE_ALIGN(dec->rcb_size - dec->sram_size);
1119
1120 __free_pages(dec->rcb_page, get_order(page_size));
1121 }
1122 if (dec->rcb_iova) {
1123 domain = dec->mpp.iommu_info->domain;
1124 iommu_unmap(domain, dec->rcb_iova, dec->rcb_size);
1125 }
1126
1127 return 0;
1128 }
1129
rkvdec2_remove(struct platform_device * pdev)1130 static int rkvdec2_remove(struct platform_device *pdev)
1131 {
1132 struct device *dev = &pdev->dev;
1133
1134 if (strstr(dev_name(dev), "ccu")) {
1135 dev_info(dev, "remove ccu device\n");
1136 rkvdec2_ccu_remove(dev);
1137 } else {
1138 struct mpp_dev *mpp = dev_get_drvdata(dev);
1139 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1140
1141 dev_info(dev, "remove device\n");
1142
1143 rkvdec2_free_rcbbuf(pdev, dec);
1144 mpp_dev_remove(mpp);
1145 rkvdec2_procfs_remove(mpp);
1146 rkvdec2_link_remove(mpp, dec->link_dec);
1147 }
1148
1149 return 0;
1150 }
1151
rkvdec2_shutdown(struct platform_device * pdev)1152 static void rkvdec2_shutdown(struct platform_device *pdev)
1153 {
1154 struct device *dev = &pdev->dev;
1155
1156 if (!strstr(dev_name(dev), "ccu"))
1157 mpp_dev_shutdown(pdev);
1158 }
1159
1160 struct platform_driver rockchip_rkvdec2_driver = {
1161 .probe = rkvdec2_probe,
1162 .remove = rkvdec2_remove,
1163 .shutdown = rkvdec2_shutdown,
1164 .driver = {
1165 .name = RKVDEC_DRIVER_NAME,
1166 .of_match_table = of_match_ptr(mpp_rkvdec2_dt_match),
1167 },
1168 };
1169 EXPORT_SYMBOL(rockchip_rkvdec2_driver);
1170