• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24 
25 #include "mpp_debug.h"
26 #include "mpp_common.h"
27 #include "mpp_iommu.h"
28 
29 #define VDPU1_DRIVER_NAME		"mpp_vdpu1"
30 
31 #define	VDPU1_SESSION_MAX_BUFFERS	40
32 /* The maximum registers number of all the version */
33 #define VDPU1_REG_NUM			60
34 #define VDPU1_REG_HW_ID_INDEX		0
35 #define VDPU1_REG_START_INDEX		0
36 #define VDPU1_REG_END_INDEX		59
37 
38 #define VDPU1_REG_PP_NUM		101
39 #define VDPU1_REG_PP_START_INDEX	0
40 #define VDPU1_REG_PP_END_INDEX		100
41 
42 #define VDPU1_REG_DEC_INT_EN		0x004
43 #define VDPU1_REG_DEC_INT_EN_INDEX	(1)
44 /* B slice detected, used in 8190 decoder and later */
45 #define	VDPU1_INT_PIC_INF		BIT(24)
46 #define	VDPU1_INT_TIMEOUT		BIT(18)
47 #define	VDPU1_INT_SLICE			BIT(17)
48 #define	VDPU1_INT_STRM_ERROR		BIT(16)
49 #define	VDPU1_INT_ASO_ERROR		BIT(15)
50 #define	VDPU1_INT_BUF_EMPTY		BIT(14)
51 #define	VDPU1_INT_BUS_ERROR		BIT(13)
52 #define	VDPU1_DEC_INT			BIT(12)
53 #define	VDPU1_DEC_INT_RAW		BIT(8)
54 #define	VDPU1_DEC_IRQ_DIS		BIT(4)
55 #define	VDPU1_DEC_START			BIT(0)
56 
57 /* NOTE: Don't enable it or decoding AVC would meet problem at rk3288 */
58 #define VDPU1_REG_DEC_EN		0x008
59 #define	VDPU1_CLOCK_GATE_EN		BIT(10)
60 
61 #define VDPU1_REG_SYS_CTRL		0x00c
62 #define VDPU1_REG_SYS_CTRL_INDEX	(3)
63 #define VDPU1_RGE_WIDTH_INDEX		(4)
64 #define	VDPU1_GET_FORMAT(x)		(((x) >> 28) & 0xf)
65 #define VDPU1_GET_PROD_NUM(x)		(((x) >> 16) & 0xffff)
66 #define VDPU1_GET_WIDTH(x)		(((x) & 0xff800000) >> 19)
67 #define	VDPU1_FMT_H264D			0
68 #define	VDPU1_FMT_MPEG4D		1
69 #define	VDPU1_FMT_H263D			2
70 #define	VDPU1_FMT_JPEGD			3
71 #define	VDPU1_FMT_VC1D			4
72 #define	VDPU1_FMT_MPEG2D		5
73 #define	VDPU1_FMT_MPEG1D		6
74 #define	VDPU1_FMT_VP6D			7
75 #define	VDPU1_FMT_RESERVED		8
76 #define	VDPU1_FMT_VP7D			9
77 #define	VDPU1_FMT_VP8D			10
78 #define	VDPU1_FMT_AVSD			11
79 
80 #define VDPU1_REG_STREAM_RLC_BASE	0x030
81 #define VDPU1_REG_STREAM_RLC_BASE_INDEX	(12)
82 
83 #define VDPU1_REG_DIR_MV_BASE		0x0a4
84 #define VDPU1_REG_DIR_MV_BASE_INDEX	(41)
85 
86 #define VDPU1_REG_CLR_CACHE_BASE	0x810
87 
88 #define to_vdpu_task(task)		\
89 		container_of(task, struct vdpu_task, mpp_task)
90 #define to_vdpu_dev(dev)		\
91 		container_of(dev, struct vdpu_dev, mpp)
92 
93 enum VPUD1_HW_ID {
94 	VDPU1_ID_0102 = 0x0102,
95 	VDPU1_ID_9190 = 0x6731,
96 };
97 
98 struct vdpu_task {
99 	struct mpp_task mpp_task;
100 	/* enable of post process */
101 	bool pp_enable;
102 
103 	enum MPP_CLOCK_MODE clk_mode;
104 	u32 reg[VDPU1_REG_PP_NUM];
105 
106 	struct reg_offset_info off_inf;
107 	u32 strm_addr;
108 	u32 irq_status;
109 	/* req for current task */
110 	u32 w_req_cnt;
111 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
112 	u32 r_req_cnt;
113 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
114 };
115 
116 struct vdpu_dev {
117 	struct mpp_dev mpp;
118 
119 	struct mpp_clk_info aclk_info;
120 	struct mpp_clk_info hclk_info;
121 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
122 	struct proc_dir_entry *procfs;
123 #endif
124 	struct reset_control *rst_a;
125 	struct reset_control *rst_h;
126 };
127 
128 static struct mpp_hw_info vdpu_v1_hw_info = {
129 	.reg_num = VDPU1_REG_NUM,
130 	.reg_id = VDPU1_REG_HW_ID_INDEX,
131 	.reg_start = VDPU1_REG_START_INDEX,
132 	.reg_end = VDPU1_REG_END_INDEX,
133 	.reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
134 };
135 
136 static struct mpp_hw_info vdpu_pp_v1_hw_info = {
137 	.reg_num = VDPU1_REG_PP_NUM,
138 	.reg_id = VDPU1_REG_HW_ID_INDEX,
139 	.reg_start = VDPU1_REG_PP_START_INDEX,
140 	.reg_end = VDPU1_REG_PP_END_INDEX,
141 	.reg_en = VDPU1_REG_DEC_INT_EN_INDEX,
142 };
143 
144 /*
145  * file handle translate information
146  */
147 static const u16 trans_tbl_avsd[] = {
148 	12, 13, 14, 15, 16, 17, 40, 41, 45
149 };
150 
151 static const u16 trans_tbl_default[] = {
152 	12, 13, 14, 15, 16, 17, 40, 41
153 };
154 
155 static const u16 trans_tbl_jpegd[] = {
156 	12, 13, 14, 40, 66, 67
157 };
158 
159 static const u16 trans_tbl_h264d[] = {
160 	12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
161 	28, 29, 40
162 };
163 
164 static const u16 trans_tbl_vc1d[] = {
165 	12, 13, 14, 15, 16, 17, 27, 41
166 };
167 
168 static const u16 trans_tbl_vp6d[] = {
169 	12, 13, 14, 18, 27, 40
170 };
171 
172 static const u16 trans_tbl_vp8d[] = {
173 	10, 12, 13, 14, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 40
174 };
175 
176 static struct mpp_trans_info vdpu_v1_trans[] = {
177 	[VDPU1_FMT_H264D] = {
178 		.count = ARRAY_SIZE(trans_tbl_h264d),
179 		.table = trans_tbl_h264d,
180 	},
181 	[VDPU1_FMT_H263D] = {
182 		.count = ARRAY_SIZE(trans_tbl_default),
183 		.table = trans_tbl_default,
184 	},
185 	[VDPU1_FMT_MPEG4D] = {
186 		.count = ARRAY_SIZE(trans_tbl_default),
187 		.table = trans_tbl_default,
188 	},
189 	[VDPU1_FMT_JPEGD] = {
190 		.count = ARRAY_SIZE(trans_tbl_jpegd),
191 		.table = trans_tbl_jpegd,
192 	},
193 	[VDPU1_FMT_VC1D] = {
194 		.count = ARRAY_SIZE(trans_tbl_vc1d),
195 		.table = trans_tbl_vc1d,
196 	},
197 	[VDPU1_FMT_MPEG2D] = {
198 		.count = ARRAY_SIZE(trans_tbl_default),
199 		.table = trans_tbl_default,
200 	},
201 	[VDPU1_FMT_MPEG1D] = {
202 		.count = ARRAY_SIZE(trans_tbl_default),
203 		.table = trans_tbl_default,
204 	},
205 	[VDPU1_FMT_VP6D] = {
206 		.count = ARRAY_SIZE(trans_tbl_vp6d),
207 		.table = trans_tbl_vp6d,
208 	},
209 	[VDPU1_FMT_RESERVED] = {
210 		.count = 0,
211 		.table = NULL,
212 	},
213 	[VDPU1_FMT_VP7D] = {
214 		.count = ARRAY_SIZE(trans_tbl_default),
215 		.table = trans_tbl_default,
216 	},
217 	[VDPU1_FMT_VP8D] = {
218 		.count = ARRAY_SIZE(trans_tbl_vp8d),
219 		.table = trans_tbl_vp8d,
220 	},
221 	[VDPU1_FMT_AVSD] = {
222 		.count = ARRAY_SIZE(trans_tbl_avsd),
223 		.table = trans_tbl_avsd,
224 	},
225 };
226 
vdpu_process_reg_fd(struct mpp_session * session,struct vdpu_task * task,struct mpp_task_msgs * msgs)227 static int vdpu_process_reg_fd(struct mpp_session *session,
228 			       struct vdpu_task *task,
229 			       struct mpp_task_msgs *msgs)
230 {
231 	int ret = 0;
232 	int fmt = VDPU1_GET_FORMAT(task->reg[VDPU1_REG_SYS_CTRL_INDEX]);
233 
234 	ret = mpp_translate_reg_address(session, &task->mpp_task,
235 					fmt, task->reg, &task->off_inf);
236 	if (ret)
237 		return ret;
238 	/*
239 	 * special offset scale case
240 	 *
241 	 * This translation is for fd + offset translation.
242 	 * One register has 32bits. We need to transfer both buffer file
243 	 * handle and the start address offset so we packet file handle
244 	 * and offset together using below format.
245 	 *
246 	 *  0~9  bit for buffer file handle range 0 ~ 1023
247 	 * 10~31 bit for offset range 0 ~ 4M
248 	 *
249 	 * But on 4K case the offset can be larger the 4M
250 	 */
251 	if (likely(fmt == VDPU1_FMT_H264D)) {
252 		int fd;
253 		u32 offset;
254 		dma_addr_t iova = 0;
255 		u32 idx = VDPU1_REG_DIR_MV_BASE_INDEX;
256 		struct mpp_mem_region *mem_region = NULL;
257 
258 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
259 			fd = task->reg[idx];
260 			offset = 0;
261 		} else {
262 			fd = task->reg[idx] & 0x3ff;
263 			offset = task->reg[idx] >> 10 << 4;
264 		}
265 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
266 		if (IS_ERR(mem_region))
267 			goto fail;
268 
269 		iova = mem_region->iova;
270 		mpp_debug(DEBUG_IOMMU, "DMV[%3d]: %3d => %pad + offset %10d\n",
271 			  idx, fd, &iova, offset);
272 		task->reg[idx] = iova + offset;
273 	}
274 
275 	mpp_translate_reg_offset_info(&task->mpp_task,
276 				      &task->off_inf, task->reg);
277 	return 0;
278 fail:
279 	return -EFAULT;
280 }
281 
vdpu_extract_task_msg(struct vdpu_task * task,struct mpp_task_msgs * msgs)282 static int vdpu_extract_task_msg(struct vdpu_task *task,
283 				 struct mpp_task_msgs *msgs)
284 {
285 	u32 i;
286 	int ret;
287 	struct mpp_request *req;
288 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
289 
290 	for (i = 0; i < msgs->req_cnt; i++) {
291 		u32 off_s, off_e;
292 
293 		req = &msgs->reqs[i];
294 		if (!req->size)
295 			continue;
296 
297 		switch (req->cmd) {
298 		case MPP_CMD_SET_REG_WRITE: {
299 			off_s = hw_info->reg_start * sizeof(u32);
300 			off_e = hw_info->reg_end * sizeof(u32);
301 			ret = mpp_check_req(req, 0, sizeof(task->reg),
302 					    off_s, off_e);
303 			if (ret)
304 				continue;
305 			if (copy_from_user((u8 *)task->reg + req->offset,
306 					   req->data, req->size)) {
307 				mpp_err("copy_from_user reg failed\n");
308 				return -EIO;
309 			}
310 			memcpy(&task->w_reqs[task->w_req_cnt++],
311 			       req, sizeof(*req));
312 		} break;
313 		case MPP_CMD_SET_REG_READ: {
314 			off_s = hw_info->reg_start * sizeof(u32);
315 			off_e = hw_info->reg_end * sizeof(u32);
316 			ret = mpp_check_req(req, 0, sizeof(task->reg),
317 					    off_s, off_e);
318 			if (ret)
319 				continue;
320 			memcpy(&task->r_reqs[task->r_req_cnt++],
321 			       req, sizeof(*req));
322 		} break;
323 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
324 			mpp_extract_reg_offset_info(&task->off_inf, req);
325 		} break;
326 		default:
327 			break;
328 		}
329 	}
330 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
331 		  task->w_req_cnt, task->r_req_cnt);
332 
333 	return 0;
334 }
335 
vdpu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)336 static void *vdpu_alloc_task(struct mpp_session *session,
337 			     struct mpp_task_msgs *msgs)
338 {
339 	int ret;
340 	struct mpp_task *mpp_task = NULL;
341 	struct vdpu_task *task = NULL;
342 	struct mpp_dev *mpp = session->mpp;
343 
344 	mpp_debug_enter();
345 
346 	task = kzalloc(sizeof(*task), GFP_KERNEL);
347 	if (!task)
348 		return NULL;
349 
350 	mpp_task = &task->mpp_task;
351 	mpp_task_init(session, mpp_task);
352 	if (session->device_type == MPP_DEVICE_VDPU1_PP) {
353 		task->pp_enable = true;
354 		mpp_task->hw_info = &vdpu_pp_v1_hw_info;
355 	} else {
356 		mpp_task->hw_info = mpp->var->hw_info;
357 	}
358 	mpp_task->reg = task->reg;
359 	/* extract reqs for current task */
360 	ret = vdpu_extract_task_msg(task, msgs);
361 	if (ret)
362 		goto fail;
363 	/* process fd in register */
364 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
365 		ret = vdpu_process_reg_fd(session, task, msgs);
366 		if (ret)
367 			goto fail;
368 	}
369 	task->strm_addr = task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX];
370 	task->clk_mode = CLK_MODE_NORMAL;
371 
372 	mpp_debug_leave();
373 
374 	return mpp_task;
375 
376 fail:
377 	mpp_task_dump_mem_region(mpp, mpp_task);
378 	mpp_task_dump_reg(mpp, mpp_task);
379 	mpp_task_finalize(session, mpp_task);
380 	kfree(task);
381 	return NULL;
382 }
383 
vdpu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)384 static int vdpu_run(struct mpp_dev *mpp,
385 		    struct mpp_task *mpp_task)
386 {
387 	u32 i;
388 	u32 reg_en;
389 	struct vdpu_task *task = to_vdpu_task(mpp_task);
390 
391 	mpp_debug_enter();
392 
393 	/* clear cache */
394 	mpp_write_relaxed(mpp, VDPU1_REG_CLR_CACHE_BASE, 1);
395 	/* set registers for hardware */
396 	reg_en = mpp_task->hw_info->reg_en;
397 	for (i = 0; i < task->w_req_cnt; i++) {
398 		struct mpp_request *req = &task->w_reqs[i];
399 		int s = req->offset / sizeof(u32);
400 		int e = s + req->size / sizeof(u32);
401 
402 		mpp_write_req(mpp, task->reg, s, e, reg_en);
403 	}
404 	/* init current task */
405 	mpp->cur_task = mpp_task;
406 	/* Flush the register before the start the device */
407 	wmb();
408 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN,
409 		  task->reg[reg_en] | VDPU1_DEC_START);
410 
411 	mpp_debug_leave();
412 
413 	return 0;
414 }
415 
vdpu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)416 static int vdpu_finish(struct mpp_dev *mpp,
417 		       struct mpp_task *mpp_task)
418 {
419 	u32 i;
420 	u32 s, e;
421 	u32 dec_get;
422 	s32 dec_length;
423 	struct mpp_request *req;
424 	struct vdpu_task *task = to_vdpu_task(mpp_task);
425 
426 	mpp_debug_enter();
427 
428 	/* read register after running */
429 	for (i = 0; i < task->r_req_cnt; i++) {
430 		req = &task->r_reqs[i];
431 		s = req->offset / sizeof(u32);
432 		e = s + req->size / sizeof(u32);
433 		mpp_read_req(mpp, task->reg, s, e);
434 	}
435 	/* revert hack for irq status */
436 	task->reg[VDPU1_REG_DEC_INT_EN_INDEX] = task->irq_status;
437 	/* revert hack for decoded length */
438 	dec_get = mpp_read_relaxed(mpp, VDPU1_REG_STREAM_RLC_BASE);
439 	dec_length = dec_get - task->strm_addr;
440 	task->reg[VDPU1_REG_STREAM_RLC_BASE_INDEX] = dec_length << 10;
441 	mpp_debug(DEBUG_REGISTER,
442 		  "dec_get %08x dec_length %d\n", dec_get, dec_length);
443 
444 	mpp_debug_leave();
445 
446 	return 0;
447 }
448 
vdpu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)449 static int vdpu_result(struct mpp_dev *mpp,
450 		       struct mpp_task *mpp_task,
451 		       struct mpp_task_msgs *msgs)
452 {
453 	u32 i;
454 	struct mpp_request *req;
455 	struct vdpu_task *task = to_vdpu_task(mpp_task);
456 
457 	/* FIXME may overflow the kernel */
458 	for (i = 0; i < task->r_req_cnt; i++) {
459 		req = &task->r_reqs[i];
460 
461 		if (copy_to_user(req->data,
462 				 (u8 *)task->reg + req->offset,
463 				 req->size)) {
464 			mpp_err("copy_to_user reg fail\n");
465 			return -EIO;
466 		}
467 	}
468 
469 	return 0;
470 }
471 
vdpu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)472 static int vdpu_free_task(struct mpp_session *session,
473 			  struct mpp_task *mpp_task)
474 {
475 	struct vdpu_task *task = to_vdpu_task(mpp_task);
476 
477 	mpp_task_finalize(session, mpp_task);
478 	kfree(task);
479 
480 	return 0;
481 }
482 
483 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vdpu_procfs_remove(struct mpp_dev * mpp)484 static int vdpu_procfs_remove(struct mpp_dev *mpp)
485 {
486 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
487 
488 	if (dec->procfs) {
489 		proc_remove(dec->procfs);
490 		dec->procfs = NULL;
491 	}
492 
493 	return 0;
494 }
495 
vdpu_procfs_init(struct mpp_dev * mpp)496 static int vdpu_procfs_init(struct mpp_dev *mpp)
497 {
498 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
499 
500 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
501 	if (IS_ERR_OR_NULL(dec->procfs)) {
502 		mpp_err("failed on open procfs\n");
503 		dec->procfs = NULL;
504 		return -EIO;
505 	}
506 	mpp_procfs_create_u32("aclk", 0644,
507 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
508 	mpp_procfs_create_u32("session_buffers", 0644,
509 			      dec->procfs, &mpp->session_max_buffers);
510 
511 	return 0;
512 }
513 #else
vdpu_procfs_remove(struct mpp_dev * mpp)514 static inline int vdpu_procfs_remove(struct mpp_dev *mpp)
515 {
516 	return 0;
517 }
518 
vdpu_procfs_init(struct mpp_dev * mpp)519 static inline int vdpu_procfs_init(struct mpp_dev *mpp)
520 {
521 	return 0;
522 }
523 #endif
524 
vdpu_init(struct mpp_dev * mpp)525 static int vdpu_init(struct mpp_dev *mpp)
526 {
527 	int ret;
528 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
529 
530 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VDPU1];
531 
532 	/* Get clock info from dtsi */
533 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
534 	if (ret)
535 		mpp_err("failed on clk_get aclk_vcodec\n");
536 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
537 	if (ret)
538 		mpp_err("failed on clk_get hclk_vcodec\n");
539 	/* Set default rates */
540 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
541 
542 	/* Get reset control from dtsi */
543 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
544 	if (!dec->rst_a)
545 		mpp_err("No aclk reset resource define\n");
546 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
547 	if (!dec->rst_h)
548 		mpp_err("No hclk reset resource define\n");
549 
550 	return 0;
551 }
552 
vdpu_clk_on(struct mpp_dev * mpp)553 static int vdpu_clk_on(struct mpp_dev *mpp)
554 {
555 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
556 
557 	mpp_clk_safe_enable(dec->aclk_info.clk);
558 	mpp_clk_safe_enable(dec->hclk_info.clk);
559 
560 	return 0;
561 }
562 
vdpu_clk_off(struct mpp_dev * mpp)563 static int vdpu_clk_off(struct mpp_dev *mpp)
564 {
565 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
566 
567 	mpp_clk_safe_disable(dec->aclk_info.clk);
568 	mpp_clk_safe_disable(dec->hclk_info.clk);
569 
570 	return 0;
571 }
572 
vdpu_3288_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)573 static int vdpu_3288_get_freq(struct mpp_dev *mpp,
574 			      struct mpp_task *mpp_task)
575 {
576 	u32 width;
577 	struct vdpu_task *task = to_vdpu_task(mpp_task);
578 
579 	width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
580 	if (width > 2560)
581 		task->clk_mode = CLK_MODE_ADVANCED;
582 
583 	return 0;
584 }
585 
vdpu_3368_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)586 static int vdpu_3368_get_freq(struct mpp_dev *mpp,
587 			      struct mpp_task *mpp_task)
588 {
589 	u32 width;
590 	struct vdpu_task *task = to_vdpu_task(mpp_task);
591 
592 	width = VDPU1_GET_WIDTH(task->reg[VDPU1_RGE_WIDTH_INDEX]);
593 	if (width > 2560)
594 		task->clk_mode = CLK_MODE_ADVANCED;
595 
596 	return 0;
597 }
598 
vdpu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)599 static int vdpu_set_freq(struct mpp_dev *mpp,
600 			 struct mpp_task *mpp_task)
601 {
602 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
603 	struct vdpu_task *task = to_vdpu_task(mpp_task);
604 
605 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
606 
607 	return 0;
608 }
609 
vdpu_reduce_freq(struct mpp_dev * mpp)610 static int vdpu_reduce_freq(struct mpp_dev *mpp)
611 {
612 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
613 
614 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
615 
616 	return 0;
617 }
618 
vdpu_irq(struct mpp_dev * mpp)619 static int vdpu_irq(struct mpp_dev *mpp)
620 {
621 	mpp->irq_status = mpp_read(mpp, VDPU1_REG_DEC_INT_EN);
622 	if (!(mpp->irq_status & VDPU1_DEC_INT_RAW))
623 		return IRQ_NONE;
624 
625 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
626 	/* set clock gating to save power */
627 	mpp_write(mpp, VDPU1_REG_DEC_EN, VDPU1_CLOCK_GATE_EN);
628 
629 	return IRQ_WAKE_THREAD;
630 }
631 
vdpu_isr(struct mpp_dev * mpp)632 static int vdpu_isr(struct mpp_dev *mpp)
633 {
634 	u32 err_mask;
635 	struct vdpu_task *task = NULL;
636 	struct mpp_task *mpp_task = mpp->cur_task;
637 
638 	/* FIXME use a spin lock here */
639 	if (!mpp_task) {
640 		dev_err(mpp->dev, "no current task\n");
641 		return IRQ_HANDLED;
642 	}
643 	mpp_time_diff(mpp_task);
644 	mpp->cur_task = NULL;
645 	task = to_vdpu_task(mpp_task);
646 	task->irq_status = mpp->irq_status;
647 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
648 		  task->irq_status);
649 
650 	err_mask = VDPU1_INT_TIMEOUT
651 		| VDPU1_INT_STRM_ERROR
652 		| VDPU1_INT_ASO_ERROR
653 		| VDPU1_INT_BUF_EMPTY
654 		| VDPU1_INT_BUS_ERROR;
655 
656 	if (err_mask & task->irq_status)
657 		atomic_inc(&mpp->reset_request);
658 
659 	mpp_task_finish(mpp_task->session, mpp_task);
660 
661 	mpp_debug_leave();
662 
663 	return IRQ_HANDLED;
664 }
665 
vdpu_reset(struct mpp_dev * mpp)666 static int vdpu_reset(struct mpp_dev *mpp)
667 {
668 	struct vdpu_dev *dec = to_vdpu_dev(mpp);
669 
670 	if (dec->rst_a && dec->rst_h) {
671 		mpp_debug(DEBUG_RESET, "reset in\n");
672 
673 		/* Don't skip this or iommu won't work after reset */
674 		mpp_pmu_idle_request(mpp, true);
675 		mpp_safe_reset(dec->rst_a);
676 		mpp_safe_reset(dec->rst_h);
677 		udelay(5);
678 		mpp_safe_unreset(dec->rst_a);
679 		mpp_safe_unreset(dec->rst_h);
680 		mpp_pmu_idle_request(mpp, false);
681 
682 		mpp_debug(DEBUG_RESET, "reset out\n");
683 	}
684 	mpp_write(mpp, VDPU1_REG_DEC_INT_EN, 0);
685 
686 	return 0;
687 }
688 
689 static struct mpp_hw_ops vdpu_v1_hw_ops = {
690 	.init = vdpu_init,
691 	.clk_on = vdpu_clk_on,
692 	.clk_off = vdpu_clk_off,
693 	.set_freq = vdpu_set_freq,
694 	.reduce_freq = vdpu_reduce_freq,
695 	.reset = vdpu_reset,
696 };
697 
698 static struct mpp_hw_ops vdpu_3288_hw_ops = {
699 	.init = vdpu_init,
700 	.clk_on = vdpu_clk_on,
701 	.clk_off = vdpu_clk_off,
702 	.get_freq = vdpu_3288_get_freq,
703 	.set_freq = vdpu_set_freq,
704 	.reduce_freq = vdpu_reduce_freq,
705 	.reset = vdpu_reset,
706 };
707 
708 static struct mpp_hw_ops vdpu_3368_hw_ops = {
709 	.init = vdpu_init,
710 	.clk_on = vdpu_clk_on,
711 	.clk_off = vdpu_clk_off,
712 	.get_freq = vdpu_3368_get_freq,
713 	.set_freq = vdpu_set_freq,
714 	.reduce_freq = vdpu_reduce_freq,
715 	.reset = vdpu_reset,
716 };
717 
718 static struct mpp_dev_ops vdpu_v1_dev_ops = {
719 	.alloc_task = vdpu_alloc_task,
720 	.run = vdpu_run,
721 	.irq = vdpu_irq,
722 	.isr = vdpu_isr,
723 	.finish = vdpu_finish,
724 	.result = vdpu_result,
725 	.free_task = vdpu_free_task,
726 };
727 
728 static const struct mpp_dev_var vdpu_v1_data = {
729 	.device_type = MPP_DEVICE_VDPU1,
730 	.hw_info = &vdpu_v1_hw_info,
731 	.trans_info = vdpu_v1_trans,
732 	.hw_ops = &vdpu_v1_hw_ops,
733 	.dev_ops = &vdpu_v1_dev_ops,
734 };
735 
736 static const struct mpp_dev_var vdpu_3288_data = {
737 	.device_type = MPP_DEVICE_VDPU1,
738 	.hw_info = &vdpu_v1_hw_info,
739 	.trans_info = vdpu_v1_trans,
740 	.hw_ops = &vdpu_3288_hw_ops,
741 	.dev_ops = &vdpu_v1_dev_ops,
742 };
743 
744 static const struct mpp_dev_var vdpu_3368_data = {
745 	.device_type = MPP_DEVICE_VDPU1,
746 	.hw_info = &vdpu_v1_hw_info,
747 	.trans_info = vdpu_v1_trans,
748 	.hw_ops = &vdpu_3368_hw_ops,
749 	.dev_ops = &vdpu_v1_dev_ops,
750 };
751 
752 static const struct mpp_dev_var avsd_plus_data = {
753 	.device_type = MPP_DEVICE_AVSPLUS_DEC,
754 	.hw_info = &vdpu_v1_hw_info,
755 	.trans_info = vdpu_v1_trans,
756 	.hw_ops = &vdpu_v1_hw_ops,
757 	.dev_ops = &vdpu_v1_dev_ops,
758 };
759 
760 static const struct of_device_id mpp_vdpu1_dt_match[] = {
761 	{
762 		.compatible = "rockchip,vpu-decoder-v1",
763 		.data = &vdpu_v1_data,
764 	},
765 #ifdef CONFIG_CPU_RK3288
766 	{
767 		.compatible = "rockchip,vpu-decoder-rk3288",
768 		.data = &vdpu_3288_data,
769 	},
770 #endif
771 #ifdef CONFIG_CPU_RK3368
772 	{
773 		.compatible = "rockchip,vpu-decoder-rk3368",
774 		.data = &vdpu_3368_data,
775 	},
776 #endif
777 #ifdef CONFIG_CPU_RK3328
778 	{
779 		.compatible = "rockchip,avs-plus-decoder",
780 		.data = &avsd_plus_data,
781 	},
782 #endif
783 	{},
784 };
785 
vdpu_probe(struct platform_device * pdev)786 static int vdpu_probe(struct platform_device *pdev)
787 {
788 	struct device *dev = &pdev->dev;
789 	struct vdpu_dev *dec = NULL;
790 	struct mpp_dev *mpp = NULL;
791 	const struct of_device_id *match = NULL;
792 	int ret = 0;
793 
794 	dev_info(dev, "probe device\n");
795 	dec = devm_kzalloc(dev, sizeof(struct vdpu_dev), GFP_KERNEL);
796 	if (!dec)
797 		return -ENOMEM;
798 	platform_set_drvdata(pdev, dec);
799 
800 	mpp = &dec->mpp;
801 	if (pdev->dev.of_node) {
802 		match = of_match_node(mpp_vdpu1_dt_match, pdev->dev.of_node);
803 		if (match)
804 			mpp->var = (struct mpp_dev_var *)match->data;
805 	}
806 
807 	ret = mpp_dev_probe(mpp, pdev);
808 	if (ret) {
809 		dev_err(dev, "probe sub driver failed\n");
810 		return -EINVAL;
811 	}
812 
813 	ret = devm_request_threaded_irq(dev, mpp->irq,
814 					mpp_dev_irq,
815 					mpp_dev_isr_sched,
816 					IRQF_SHARED,
817 					dev_name(dev), mpp);
818 	if (ret) {
819 		dev_err(dev, "register interrupter runtime failed\n");
820 		return -EINVAL;
821 	}
822 
823 	if (mpp->var->device_type == MPP_DEVICE_VDPU1) {
824 		mpp->srv->sub_devices[MPP_DEVICE_VDPU1_PP] = mpp;
825 		set_bit(MPP_DEVICE_VDPU1_PP, &mpp->srv->hw_support);
826 	}
827 
828 	mpp->session_max_buffers = VDPU1_SESSION_MAX_BUFFERS;
829 	vdpu_procfs_init(mpp);
830 	/* register current device to mpp service */
831 	mpp_dev_register_srv(mpp, mpp->srv);
832 	dev_info(dev, "probing finish\n");
833 
834 	return 0;
835 }
836 
vdpu_remove(struct platform_device * pdev)837 static int vdpu_remove(struct platform_device *pdev)
838 {
839 	struct device *dev = &pdev->dev;
840 	struct vdpu_dev *dec = platform_get_drvdata(pdev);
841 
842 	dev_info(dev, "remove device\n");
843 	mpp_dev_remove(&dec->mpp);
844 	vdpu_procfs_remove(&dec->mpp);
845 
846 	return 0;
847 }
848 
vdpu_shutdown(struct platform_device * pdev)849 static void vdpu_shutdown(struct platform_device *pdev)
850 {
851 	int ret;
852 	int val;
853 	struct device *dev = &pdev->dev;
854 	struct vdpu_dev *dec = platform_get_drvdata(pdev);
855 	struct mpp_dev *mpp = &dec->mpp;
856 
857 	dev_info(dev, "shutdown device\n");
858 
859 	atomic_inc(&mpp->srv->shutdown_request);
860 	ret = readx_poll_timeout(atomic_read,
861 				 &mpp->task_count,
862 				 val, val == 0, 20000, 200000);
863 	if (ret == -ETIMEDOUT)
864 		dev_err(dev, "wait total running time out\n");
865 }
866 
867 struct platform_driver rockchip_vdpu1_driver = {
868 	.probe = vdpu_probe,
869 	.remove = vdpu_remove,
870 	.shutdown = vdpu_shutdown,
871 	.driver = {
872 		.name = VDPU1_DRIVER_NAME,
873 		.of_match_table = of_match_ptr(mpp_vdpu1_dt_match),
874 	},
875 };
876 EXPORT_SYMBOL(rockchip_vdpu1_driver);
877