• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24 
25 #include "mpp_debug.h"
26 #include "mpp_common.h"
27 #include "mpp_iommu.h"
28 #include "hack/mpp_hack_px30.h"
29 
30 #define VDPU2_DRIVER_NAME		"mpp_vdpu2"
31 
32 #define	VDPU2_SESSION_MAX_BUFFERS	40
33 /* The maximum registers number of all the version */
34 #define VDPU2_REG_NUM			159
35 #define VDPU2_REG_HW_ID_INDEX		-1 /* INVALID */
36 #define VDPU2_REG_START_INDEX		50
37 #define VDPU2_REG_END_INDEX		158
38 
39 #define VDPU2_REG_SYS_CTRL			0x0d4
40 #define VDPU2_REG_SYS_CTRL_INDEX		(53)
41 #define VDPU2_GET_FORMAT(x)			((x) & 0xf)
42 #define VDPU2_FMT_H264D				0
43 #define VDPU2_FMT_MPEG4D			1
44 #define VDPU2_FMT_H263D				2
45 #define VDPU2_FMT_JPEGD				3
46 #define VDPU2_FMT_VC1D				4
47 #define VDPU2_FMT_MPEG2D			5
48 #define VDPU2_FMT_MPEG1D			6
49 #define VDPU2_FMT_VP6D				7
50 #define VDPU2_FMT_RESERVED			8
51 #define VDPU2_FMT_VP7D				9
52 #define VDPU2_FMT_VP8D				10
53 #define VDPU2_FMT_AVSD				11
54 
55 #define VDPU2_REG_DEC_INT			0x0dc
56 #define VDPU2_REG_DEC_INT_INDEX			(55)
57 #define VDPU2_INT_TIMEOUT			BIT(13)
58 #define VDPU2_INT_STRM_ERROR			BIT(12)
59 #define VDPU2_INT_SLICE				BIT(9)
60 #define VDPU2_INT_ASO_ERROR			BIT(8)
61 #define VDPU2_INT_BUF_EMPTY			BIT(6)
62 #define VDPU2_INT_BUS_ERROR			BIT(5)
63 #define	VDPU2_DEC_INT				BIT(4)
64 #define VDPU2_DEC_IRQ_DIS			BIT(1)
65 #define VDPU2_DEC_INT_RAW			BIT(0)
66 
67 #define VDPU2_REG_DEC_EN			0x0e4
68 #define VDPU2_REG_DEC_EN_INDEX			(57)
69 #define VDPU2_DEC_CLOCK_GATE_EN			BIT(4)
70 #define VDPU2_DEC_START				BIT(0)
71 
72 #define VDPU2_REG_DIR_MV_BASE			0x0f8
73 #define VDPU2_REG_DIR_MV_BASE_INDEX		(62)
74 
75 #define VDPU2_REG_STREAM_RLC_BASE		0x100
76 #define VDPU2_REG_STREAM_RLC_BASE_INDEX		(64)
77 
78 #define VDPU2_REG_CLR_CACHE_BASE		0x810
79 
80 #define to_vdpu_task(task)		\
81 		container_of(task, struct vdpu_task, mpp_task)
82 #define to_vdpu_dev(dev)		\
83 		container_of(dev, struct vdpu_dev, mpp)
84 
85 struct vdpu_task {
86 	struct mpp_task mpp_task;
87 
88 	enum MPP_CLOCK_MODE clk_mode;
89 	u32 reg[VDPU2_REG_NUM];
90 
91 	struct reg_offset_info off_inf;
92 	u32 strm_addr;
93 	u32 irq_status;
94 	/* req for current task */
95 	u32 w_req_cnt;
96 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
97 	u32 r_req_cnt;
98 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
99 };
100 
101 struct vdpu_dev {
102 	struct mpp_dev mpp;
103 
104 	struct mpp_clk_info aclk_info;
105 	struct mpp_clk_info hclk_info;
106 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
107 	struct proc_dir_entry *procfs;
108 #endif
109 	struct reset_control *rst_a;
110 	struct reset_control *rst_h;
111 };
112 
113 static struct mpp_hw_info vdpu_v2_hw_info = {
114 	.reg_num = VDPU2_REG_NUM,
115 	.reg_id = VDPU2_REG_HW_ID_INDEX,
116 	.reg_start = VDPU2_REG_START_INDEX,
117 	.reg_end = VDPU2_REG_END_INDEX,
118 	.reg_en = VDPU2_REG_DEC_EN_INDEX,
119 };
120 
121 /*
122  * file handle translate information
123  */
124 static const u16 trans_tbl_default[] = {
125 	61, 62, 63, 64, 131, 134, 135, 148
126 };
127 
128 static const u16 trans_tbl_jpegd[] = {
129 	21, 22, 61, 63, 64, 131
130 };
131 
132 static const u16 trans_tbl_h264d[] = {
133 	61, 63, 64, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
134 	98, 99
135 };
136 
137 static const u16 trans_tbl_vc1d[] = {
138 	62, 63, 64, 131, 134, 135, 145, 148
139 };
140 
141 static const u16 trans_tbl_vp6d[] = {
142 	61, 63, 64, 131, 136, 145
143 };
144 
145 static const u16 trans_tbl_vp8d[] = {
146 	61, 63, 64, 131, 136, 137, 140, 141, 142, 143, 144, 145, 146, 147, 149
147 };
148 
149 static struct mpp_trans_info vdpu_v2_trans[] = {
150 	[VDPU2_FMT_H264D] = {
151 		.count = ARRAY_SIZE(trans_tbl_h264d),
152 		.table = trans_tbl_h264d,
153 	},
154 	[VDPU2_FMT_H263D] = {
155 		.count = ARRAY_SIZE(trans_tbl_default),
156 		.table = trans_tbl_default,
157 	},
158 	[VDPU2_FMT_MPEG4D] = {
159 		.count = ARRAY_SIZE(trans_tbl_default),
160 		.table = trans_tbl_default,
161 	},
162 	[VDPU2_FMT_JPEGD] = {
163 		.count = ARRAY_SIZE(trans_tbl_jpegd),
164 		.table = trans_tbl_jpegd,
165 	},
166 	[VDPU2_FMT_VC1D] = {
167 		.count = ARRAY_SIZE(trans_tbl_vc1d),
168 		.table = trans_tbl_vc1d,
169 	},
170 	[VDPU2_FMT_MPEG2D] = {
171 		.count = ARRAY_SIZE(trans_tbl_default),
172 		.table = trans_tbl_default,
173 	},
174 	[VDPU2_FMT_MPEG1D] = {
175 		.count = ARRAY_SIZE(trans_tbl_default),
176 		.table = trans_tbl_default,
177 	},
178 	[VDPU2_FMT_VP6D] = {
179 		.count = ARRAY_SIZE(trans_tbl_vp6d),
180 		.table = trans_tbl_vp6d,
181 	},
182 	[VDPU2_FMT_RESERVED] = {
183 		.count = 0,
184 		.table = NULL,
185 	},
186 	[VDPU2_FMT_VP7D] = {
187 		.count = ARRAY_SIZE(trans_tbl_default),
188 		.table = trans_tbl_default,
189 	},
190 	[VDPU2_FMT_VP8D] = {
191 		.count = ARRAY_SIZE(trans_tbl_vp8d),
192 		.table = trans_tbl_vp8d,
193 	},
194 	[VDPU2_FMT_AVSD] = {
195 		.count = ARRAY_SIZE(trans_tbl_default),
196 		.table = trans_tbl_default,
197 	},
198 };
199 
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)200 static int vdpu_process_reg_fd(struct mpp_session *session,
201 			       struct vdpu_task *task,
202 			       struct mpp_task_msgs *msgs)
203 {
204 	int ret = 0;
205 	int fmt = VDPU2_GET_FORMAT(task->reg[VDPU2_REG_SYS_CTRL_INDEX]);
206 
207 	ret = mpp_translate_reg_address(session, &task->mpp_task,
208 					fmt, task->reg, &task->off_inf);
209 	if (ret)
210 		return ret;
211 
212 	if (likely(fmt == VDPU2_FMT_H264D)) {
213 		int fd;
214 		u32 offset;
215 		dma_addr_t iova = 0;
216 		struct mpp_mem_region *mem_region = NULL;
217 		int idx = VDPU2_REG_DIR_MV_BASE_INDEX;
218 
219 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
220 			fd = task->reg[idx];
221 			offset = 0;
222 		} else {
223 			fd = task->reg[idx] & 0x3ff;
224 			offset = task->reg[idx] >> 10 << 4;
225 		}
226 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
227 		if (IS_ERR(mem_region))
228 			return -EFAULT;
229 
230 		iova = mem_region->iova;
231 		mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n",
232 			  idx, fd, &iova, offset);
233 		task->reg[idx] = iova + offset;
234 	}
235 	mpp_translate_reg_offset_info(&task->mpp_task,
236 				      &task->off_inf, task->reg);
237 	return 0;
238 }
239 
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)240 static int vdpu_extract_task_msg(struct vdpu_task *task,
241 				 struct mpp_task_msgs *msgs)
242 {
243 	u32 i;
244 	int ret;
245 	struct mpp_request *req;
246 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
247 
248 	for (i = 0; i < msgs->req_cnt; i++) {
249 		u32 off_s, off_e;
250 
251 		req = &msgs->reqs[i];
252 		if (!req->size)
253 			continue;
254 
255 		switch (req->cmd) {
256 		case MPP_CMD_SET_REG_WRITE: {
257 			off_s = hw_info->reg_start * sizeof(u32);
258 			off_e = hw_info->reg_end * sizeof(u32);
259 			ret = mpp_check_req(req, 0, sizeof(task->reg),
260 					    off_s, off_e);
261 			if (ret)
262 				continue;
263 			if (copy_from_user((u8 *)task->reg + req->offset,
264 					   req->data, req->size)) {
265 				mpp_err("copy_from_user reg failed\n");
266 				return -EIO;
267 			}
268 			memcpy(&task->w_reqs[task->w_req_cnt++],
269 			       req, sizeof(*req));
270 		} break;
271 		case MPP_CMD_SET_REG_READ: {
272 			off_s = hw_info->reg_start * sizeof(u32);
273 			off_e = hw_info->reg_end * sizeof(u32);
274 			ret = mpp_check_req(req, 0, sizeof(task->reg),
275 					    off_s, off_e);
276 			if (ret)
277 				continue;
278 			memcpy(&task->r_reqs[task->r_req_cnt++],
279 			       req, sizeof(*req));
280 		} break;
281 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
282 			mpp_extract_reg_offset_info(&task->off_inf, req);
283 		} break;
284 		default:
285 			break;
286 		}
287 	}
288 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
289 		  task->w_req_cnt, task->r_req_cnt);
290 
291 	return 0;
292 }
293 
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)294 static void *vdpu_alloc_task(struct mpp_session *session,
295 			     struct mpp_task_msgs *msgs)
296 {
297 	int ret;
298 	struct mpp_task *mpp_task = NULL;
299 	struct vdpu_task *task = NULL;
300 	struct mpp_dev *mpp = session->mpp;
301 
302 	mpp_debug_enter();
303 
304 	task = kzalloc(sizeof(*task), GFP_KERNEL);
305 	if (!task)
306 		return NULL;
307 
308 	mpp_task = &task->mpp_task;
309 	mpp_task_init(session, mpp_task);
310 	mpp_task->hw_info = mpp->var->hw_info;
311 	mpp_task->reg = task->reg;
312 	/* extract reqs for current task */
313 	ret = vdpu_extract_task_msg(task, msgs);
314 	if (ret)
315 		goto fail;
316 	/* process fd in register */
317 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
318 		ret = vdpu_process_reg_fd(session, task, msgs);
319 		if (ret)
320 			goto fail;
321 	}
322 	task->strm_addr = task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX];
323 	task->clk_mode = CLK_MODE_NORMAL;
324 
325 	mpp_debug_leave();
326 
327 	return mpp_task;
328 
329 fail:
330 	mpp_task_dump_mem_region(mpp, mpp_task);
331 	mpp_task_dump_reg(mpp, mpp_task);
332 	mpp_task_finalize(session, mpp_task);
333 	kfree(task);
334 	return NULL;
335 }
336 
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)337 static int vdpu_run(struct mpp_dev *mpp,
338 		    struct mpp_task *mpp_task)
339 {
340 	u32 i;
341 	u32 reg_en;
342 	struct vdpu_task *task = to_vdpu_task(mpp_task);
343 
344 	mpp_debug_enter();
345 
346 	/* clear cache */
347 	mpp_write_relaxed(mpp, VDPU2_REG_CLR_CACHE_BASE, 1);
348 	/* set registers for hardware */
349 	 reg_en = mpp_task->hw_info->reg_en;
350 	for (i = 0; i < task->w_req_cnt; i++) {
351 		struct mpp_request *req = &task->w_reqs[i];
352 		int s = req->offset / sizeof(u32);
353 		int e = s + req->size / sizeof(u32);
354 
355 		mpp_write_req(mpp, task->reg, s, e, reg_en);
356 	}
357 	/* init current task */
358 	mpp->cur_task = mpp_task;
359 	/* Flush the registers */
360 	wmb();
361 	mpp_write(mpp, VDPU2_REG_DEC_EN,
362 		  task->reg[reg_en] | VDPU2_DEC_START);
363 
364 	mpp_debug_leave();
365 
366 	return 0;
367 }
368 
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)369 static int vdpu_finish(struct mpp_dev *mpp,
370 		       struct mpp_task *mpp_task)
371 {
372 	u32 i;
373 	u32 s, e;
374 	u32 dec_get;
375 	s32 dec_length;
376 	struct mpp_request *req;
377 	struct vdpu_task *task = to_vdpu_task(mpp_task);
378 
379 	mpp_debug_enter();
380 
381 	/* read register after running */
382 	for (i = 0; i < task->r_req_cnt; i++) {
383 		req = &task->r_reqs[i];
384 		s = req->offset / sizeof(u32);
385 		e = s + req->size / sizeof(u32);
386 		mpp_read_req(mpp, task->reg, s, e);
387 	}
388 	/* revert hack for irq status */
389 	task->reg[VDPU2_REG_DEC_INT_INDEX] = task->irq_status;
390 	/* revert hack for decoded length */
391 	dec_get = mpp_read_relaxed(mpp, VDPU2_REG_STREAM_RLC_BASE);
392 	dec_length = dec_get - task->strm_addr;
393 	task->reg[VDPU2_REG_STREAM_RLC_BASE_INDEX] = dec_length << 10;
394 	mpp_debug(DEBUG_REGISTER,
395 		  "dec_get %08x dec_length %d\n", dec_get, dec_length);
396 
397 	mpp_debug_leave();
398 
399 	return 0;
400 }
401 
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)402 static int vdpu_result(struct mpp_dev *mpp,
403 		       struct mpp_task *mpp_task,
404 		       struct mpp_task_msgs *msgs)
405 {
406 	u32 i;
407 	struct mpp_request *req;
408 	struct vdpu_task *task = to_vdpu_task(mpp_task);
409 
410 	/* FIXME may overflow the kernel */
411 	for (i = 0; i < task->r_req_cnt; i++) {
412 		req = &task->r_reqs[i];
413 
414 		if (copy_to_user(req->data,
415 				 (u8 *)task->reg + req->offset,
416 				 req->size)) {
417 			mpp_err("copy_to_user reg fail\n");
418 			return -EIO;
419 		}
420 	}
421 
422 	return 0;
423 }
424 
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)425 static int vdpu_free_task(struct mpp_session *session,
426 			  struct mpp_task *mpp_task)
427 {
428 	struct vdpu_task *task = to_vdpu_task(mpp_task);
429 
430 	mpp_task_finalize(session, mpp_task);
431 	kfree(task);
432 
433 	return 0;
434 }
435 
436 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)437 static int vdpu_procfs_remove(struct mpp_dev *mpp)
438 {
439 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
440 
441 	if (dec->procfs) {
442 		proc_remove(dec->procfs);
443 		dec->procfs = NULL;
444 	}
445 
446 	return 0;
447 }
448 
vdpu_procfs_init(struct mpp_dev * mpp)449 static int vdpu_procfs_init(struct mpp_dev *mpp)
450 {
451 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
452 
453 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
454 	if (IS_ERR_OR_NULL(dec->procfs)) {
455 		mpp_err("failed on open procfs\n");
456 		dec->procfs = NULL;
457 		return -EIO;
458 	}
459 	mpp_procfs_create_u32("aclk", 0644,
460 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
461 	mpp_procfs_create_u32("session_buffers", 0644,
462 			      dec->procfs, &mpp->session_max_buffers);
463 
464 	return 0;
465 }
466 #else
vdpu_procfs_remove(struct mpp_dev * mpp)467 static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
468 {
469 	return 0;
470 }
471 
vdpu_procfs_init(struct mpp_dev * mpp)472 static inline int vdpu_procfs_init(struct mpp_dev *mpp)
473 {
474 	return 0;
475 }
476 #endif
477 
vdpu_init(struct mpp_dev * mpp)478 static int vdpu_init(struct mpp_dev *mpp)
479 {
480 	int ret;
481 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
482 
483 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU2];
484 
485 	/* Get clock info from dtsi */
486 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
487 	if (ret)
488 		mpp_err("failed on clk_get aclk_vcodec\n");
489 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
490 	if (ret)
491 		mpp_err("failed on clk_get hclk_vcodec\n");
492 	/* Set default rates */
493 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
494 
495 	/* Get reset control from dtsi */
496 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
497 	if (!dec->rst_a)
498 		mpp_err("No aclk reset resource define\n");
499 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
500 	if (!dec->rst_h)
501 		mpp_err("No hclk reset resource define\n");
502 
503 	return 0;
504 }
505 
vdpu_px30_init(struct mpp_dev * mpp)506 static int vdpu_px30_init(struct mpp_dev *mpp)
507 {
508 	vdpu_init(mpp);
509 	return px30_workaround_combo_init(mpp);
510 }
511 
vdpu_clk_on(struct mpp_dev * mpp)512 static int vdpu_clk_on(struct mpp_dev *mpp)
513 {
514 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
515 
516 	mpp_clk_safe_enable(dec->aclk_info.clk);
517 	mpp_clk_safe_enable(dec->hclk_info.clk);
518 
519 	return 0;
520 }
521 
vdpu_clk_off(struct mpp_dev * mpp)522 static int vdpu_clk_off(struct mpp_dev *mpp)
523 {
524 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
525 
526 	mpp_clk_safe_disable(dec->aclk_info.clk);
527 	mpp_clk_safe_disable(dec->hclk_info.clk);
528 
529 	return 0;
530 }
531 
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)532 static int vdpu_set_freq(struct mpp_dev *mpp,
533 			 struct mpp_task *mpp_task)
534 {
535 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
536 	struct vdpu_task *task = to_vdpu_task(mpp_task);
537 
538 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
539 
540 	return 0;
541 }
542 
vdpu_reduce_freq(struct mpp_dev * mpp)543 static int vdpu_reduce_freq(struct mpp_dev *mpp)
544 {
545 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
546 
547 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
548 
549 	return 0;
550 }
551 
vdpu_irq(struct mpp_dev * mpp)552 static int vdpu_irq(struct mpp_dev *mpp)
553 {
554 	mpp->irq_status = mpp_read(mpp, VDPU2_REG_DEC_INT);
555 	if (!(mpp->irq_status & VDPU2_DEC_INT_RAW))
556 		return IRQ_NONE;
557 
558 	mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
559 	/* set clock gating to save power */
560 	mpp_write(mpp, VDPU2_REG_DEC_EN, VDPU2_DEC_CLOCK_GATE_EN);
561 
562 	return IRQ_WAKE_THREAD;
563 }
564 
vdpu_isr(struct mpp_dev * mpp)565 static int vdpu_isr(struct mpp_dev *mpp)
566 {
567 	u32 err_mask;
568 	struct vdpu_task *task = NULL;
569 	struct mpp_task *mpp_task = mpp->cur_task;
570 
571 	/* FIXME use a spin lock here */
572 	if (!mpp_task) {
573 		dev_err(mpp->dev, "no current task\n");
574 		return IRQ_HANDLED;
575 	}
576 	mpp_time_diff(mpp_task);
577 	mpp->cur_task = NULL;
578 	task = to_vdpu_task(mpp_task);
579 	task->irq_status = mpp->irq_status;
580 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
581 		  task->irq_status);
582 
583 	err_mask = VDPU2_INT_TIMEOUT
584 		| VDPU2_INT_STRM_ERROR
585 		| VDPU2_INT_ASO_ERROR
586 		| VDPU2_INT_BUF_EMPTY
587 		| VDPU2_INT_BUS_ERROR;
588 
589 	if (err_mask & task->irq_status)
590 		atomic_inc(&mpp->reset_request);
591 
592 	mpp_task_finish(mpp_task->session, mpp_task);
593 
594 	mpp_debug_leave();
595 
596 	return IRQ_HANDLED;
597 }
598 
vdpu_reset(struct mpp_dev * mpp)599 static int vdpu_reset(struct mpp_dev *mpp)
600 {
601 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
602 
603 	mpp_write(mpp, VDPU2_REG_DEC_EN, 0);
604 	mpp_write(mpp, VDPU2_REG_DEC_INT, 0);
605 	if (dec->rst_a && dec->rst_h) {
606 		/* Don't skip this or iommu won't work after reset */
607 		mpp_pmu_idle_request(mpp, true);
608 		mpp_safe_reset(dec->rst_a);
609 		mpp_safe_reset(dec->rst_h);
610 		udelay(5);
611 		mpp_safe_unreset(dec->rst_a);
612 		mpp_safe_unreset(dec->rst_h);
613 		mpp_pmu_idle_request(mpp, false);
614 	}
615 
616 	return 0;
617 }
618 
619 static struct mpp_hw_ops vdpu_v2_hw_ops = {
620 	.init = vdpu_init,
621 	.clk_on = vdpu_clk_on,
622 	.clk_off = vdpu_clk_off,
623 	.set_freq = vdpu_set_freq,
624 	.reduce_freq = vdpu_reduce_freq,
625 	.reset = vdpu_reset,
626 };
627 
628 static struct mpp_hw_ops vdpu_px30_hw_ops = {
629 	.init = vdpu_px30_init,
630 	.clk_on = vdpu_clk_on,
631 	.clk_off = vdpu_clk_off,
632 	.set_freq = vdpu_set_freq,
633 	.reduce_freq = vdpu_reduce_freq,
634 	.reset = vdpu_reset,
635 	.set_grf = px30_workaround_combo_switch_grf,
636 };
637 
638 static struct mpp_dev_ops vdpu_v2_dev_ops = {
639 	.alloc_task = vdpu_alloc_task,
640 	.run = vdpu_run,
641 	.irq = vdpu_irq,
642 	.isr = vdpu_isr,
643 	.finish = vdpu_finish,
644 	.result = vdpu_result,
645 	.free_task = vdpu_free_task,
646 };
647 
648 static const struct mpp_dev_var vdpu_v2_data = {
649 	.device_type = MPP_DEVICE_VDPU2,
650 	.hw_info = &vdpu_v2_hw_info,
651 	.trans_info = vdpu_v2_trans,
652 	.hw_ops = &vdpu_v2_hw_ops,
653 	.dev_ops = &vdpu_v2_dev_ops,
654 };
655 
656 static const struct mpp_dev_var vdpu_px30_data = {
657 	.device_type = MPP_DEVICE_VDPU2,
658 	.hw_info = &vdpu_v2_hw_info,
659 	.trans_info = vdpu_v2_trans,
660 	.hw_ops = &vdpu_px30_hw_ops,
661 	.dev_ops = &vdpu_v2_dev_ops,
662 };
663 
664 static const struct of_device_id mpp_vdpu2_dt_match[] = {
665 	{
666 		.compatible = "rockchip,vpu-decoder-v2",
667 		.data = &vdpu_v2_data,
668 	},
669 #ifdef CONFIG_CPU_PX30
670 	{
671 		.compatible = "rockchip,vpu-decoder-px30",
672 		.data = &vdpu_px30_data,
673 	},
674 #endif
675 	{},
676 };
677 
vdpu_probe(struct platform_device * pdev)678 static int vdpu_probe(struct platform_device *pdev)
679 {
680 	int ret = 0;
681 	struct device *dev = &pdev->dev;
682 	struct vdpu_dev *dec = NULL;
683 	struct mpp_dev *mpp = NULL;
684 	const struct of_device_id *match = NULL;
685 
686 	dev_info(dev, "probe device\n");
687 	dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
688 	if (!dec)
689 		return -ENOMEM;
690 	platform_set_drvdata(pdev, dec);
691 
692 	mpp = &dec->mpp;
693 
694 	if (pdev->dev.of_node) {
695 		match = of_match_node(mpp_vdpu2_dt_match,
696 				      pdev->dev.of_node);
697 		if (match)
698 			mpp->var = (struct mpp_dev_var *)match->data;
699 	}
700 
701 	ret = mpp_dev_probe(mpp, pdev);
702 	if (ret) {
703 		dev_err(dev, "probe sub driver failed\n");
704 		return -EINVAL;
705 	}
706 
707 	ret = devm_request_threaded_irq(dev, mpp->irq,
708 					mpp_dev_irq,
709 					mpp_dev_isr_sched,
710 					IRQF_SHARED,
711 					dev_name(dev), mpp);
712 	if (ret) {
713 		dev_err(dev, "register interrupter runtime failed\n");
714 		return -EINVAL;
715 	}
716 
717 	if (mpp->var->device_type == MPP_DEVICE_VDPU2) {
718 		mpp->srv->sub_devices[MPP_DEVICE_VDPU2_PP] = mpp;
719 		set_bit(MPP_DEVICE_VDPU2_PP, &mpp->srv->hw_support);
720 	}
721 
722 	mpp->session_max_buffers = VDPU2_SESSION_MAX_BUFFERS;
723 	vdpu_procfs_init(mpp);
724 	/* register current device to mpp service */
725 	mpp_dev_register_srv(mpp, mpp->srv);
726 	dev_info(dev, "probing finish\n");
727 
728 	return 0;
729 }
730 
vdpu_remove(struct platform_device * pdev)731 static int vdpu_remove(struct platform_device *pdev)
732 {
733 	struct device *dev = &pdev->dev;
734 	struct vdpu_dev *dec = platform_get_drvdata(pdev);
735 
736 	dev_info(dev, "remove device\n");
737 	mpp_dev_remove(&dec->mpp);
738 	vdpu_procfs_remove(&dec->mpp);
739 
740 	return 0;
741 }
742 
vdpu_shutdown(struct platform_device * pdev)743 static void vdpu_shutdown(struct platform_device *pdev)
744 {
745 	int ret;
746 	int val;
747 	struct device *dev = &pdev->dev;
748 	struct vdpu_dev *dec = platform_get_drvdata(pdev);
749 	struct mpp_dev *mpp = &dec->mpp;
750 
751 	dev_info(dev, "shutdown device\n");
752 
753 	atomic_inc(&mpp->srv->shutdown_request);
754 	ret = readx_poll_timeout(atomic_read,
755 				 &mpp->task_count,
756 				 val, val == 0, 20000, 200000);
757 	if (ret == -ETIMEDOUT)
758 		dev_err(dev, "wait total running time out\n");
759 }
760 
761 struct platform_driver rockchip_vdpu2_driver = {
762 	.probe = vdpu_probe,
763 	.remove = vdpu_remove,
764 	.shutdown = vdpu_shutdown,
765 	.driver = {
766 		.name = VDPU2_DRIVER_NAME,
767 		.of_match_table = of_match_ptr(mpp_vdpu2_dt_match),
768 	},
769 };
770 EXPORT_SYMBOL(rockchip_vdpu2_driver);
771