• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2021 Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Ding Wei, leo.ding@rock-chips.com
7  *
8  */
9 
10 #include <asm/cacheflush.h>
11 #include <linux/delay.h>
12 #include <linux/devfreq.h>
13 #include <linux/devfreq_cooling.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/of_platform.h>
20 #include <linux/of_address.h>
21 #include <linux/slab.h>
22 #include <linux/seq_file.h>
23 #include <linux/uaccess.h>
24 #include <linux/regmap.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/proc_fs.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/nospec.h>
29 #include <linux/workqueue.h>
30 #include <linux/dma-iommu.h>
31 #include <soc/rockchip/pm_domains.h>
32 #include <soc/rockchip/rockchip_ipa.h>
33 #include <soc/rockchip/rockchip_opp_select.h>
34 #include <soc/rockchip/rockchip_system_monitor.h>
35 
36 #include "mpp_debug.h"
37 #include "mpp_iommu.h"
38 #include "mpp_common.h"
39 
40 #define RKVENC_DRIVER_NAME			"mpp_rkvenc2"
41 
42 #define	RKVENC_SESSION_MAX_BUFFERS		40
43 #define RKVENC_MAX_CORE_NUM			4
44 
45 #define to_rkvenc_info(info)		\
46 		container_of(info, struct rkvenc_hw_info, hw)
47 #define to_rkvenc_task(ctx)		\
48 		container_of(ctx, struct rkvenc_task, mpp_task)
49 #define to_rkvenc_dev(dev)		\
50 		container_of(dev, struct rkvenc_dev, mpp)
51 
52 
53 enum RKVENC_FORMAT_TYPE {
54 	RKVENC_FMT_BASE		= 0x0000,
55 	RKVENC_FMT_H264E	= RKVENC_FMT_BASE + 0,
56 	RKVENC_FMT_H265E	= RKVENC_FMT_BASE + 1,
57 
58 	RKVENC_FMT_OSD_BASE	= 0x1000,
59 	RKVENC_FMT_H264E_OSD	= RKVENC_FMT_OSD_BASE + 0,
60 	RKVENC_FMT_H265E_OSD	= RKVENC_FMT_OSD_BASE + 1,
61 	RKVENC_FMT_BUTT,
62 };
63 
64 enum RKVENC_CLASS_TYPE {
65 	RKVENC_CLASS_BASE	= 0,	/* base */
66 	RKVENC_CLASS_PIC	= 1,	/* picture configure */
67 	RKVENC_CLASS_RC		= 2,	/* rate control */
68 	RKVENC_CLASS_PAR	= 3,	/* parameter */
69 	RKVENC_CLASS_SQI	= 4,	/* subjective Adjust */
70 	RKVENC_CLASS_SCL	= 5,	/* scaling list */
71 	RKVENC_CLASS_OSD	= 6,	/* osd */
72 	RKVENC_CLASS_ST		= 7,	/* status */
73 	RKVENC_CLASS_DEBUG	= 8,	/* debug */
74 	RKVENC_CLASS_BUTT,
75 };
76 
77 enum RKVENC_CLASS_FD_TYPE {
78 	RKVENC_CLASS_FD_BASE	= 0,	/* base */
79 	RKVENC_CLASS_FD_OSD	= 1,	/* osd */
80 	RKVENC_CLASS_FD_BUTT,
81 };
82 
83 struct rkvenc_reg_msg {
84 	u32 base_s;
85 	u32 base_e;
86 };
87 
88 struct rkvenc_hw_info {
89 	struct mpp_hw_info hw;
90 	/* for register range check */
91 	u32 reg_class;
92 	struct rkvenc_reg_msg reg_msg[RKVENC_CLASS_BUTT];
93 	/* for fd translate */
94 	u32 fd_class;
95 	struct {
96 		u32 class;
97 		u32 base_fmt;
98 	} fd_reg[RKVENC_CLASS_FD_BUTT];
99 	/* for get format */
100 	struct {
101 		u32 class;
102 		u32 base;
103 		u32 bitpos;
104 		u32 bitlen;
105 	} fmt_reg;
106 	/* register info */
107 	u32 enc_start_base;
108 	u32 enc_clr_base;
109 	u32 int_en_base;
110 	u32 int_mask_base;
111 	u32 int_clr_base;
112 	u32 int_sta_base;
113 	u32 enc_wdg_base;
114 	u32 err_mask;
115 };
116 
117 #define DCHS_REG_OFFSET		(0x304)
118 #define DCHS_CLASS_OFFSET	(33)
119 #define DCHS_TXE		(0x10)
120 #define DCHS_RXE		(0x20)
121 
122 /* dual core hand-shake info */
123 union rkvenc2_dual_core_handshake_id {
124 	u64 val;
125 	struct {
126 		u32 txid	: 2;
127 		u32 rxid	: 2;
128 		u32 txe		: 1;
129 		u32 rxe		: 1;
130 		u32 working	: 1;
131 		u32 reserve0	: 9;
132 		u32 offset	: 11;
133 		u32 reserve1	: 5;
134 		u32 session_id;
135 	};
136 };
137 
138 #define RKVENC2_REG_INT_EN		(8)
139 #define RKVENC2_BIT_SLICE_DONE_EN	BIT(3)
140 
141 #define RKVENC2_REG_INT_MASK		(9)
142 #define RKVENC2_BIT_SLICE_DONE_MASK	BIT(3)
143 
144 #define RKVENC2_REG_ENC_PIC		(32)
145 #define RKVENC2_BIT_SLEN_FIFO		BIT(30)
146 
147 #define RKVENC2_REG_SLI_SPLIT		(56)
148 #define RKVENC2_BIT_SLI_SPLIT		BIT(0)
149 #define RKVENC2_BIT_SLI_FLUSH		BIT(15)
150 
151 #define RKVENC2_REG_SLICE_NUM_BASE	(0x4034)
152 #define RKVENC2_REG_SLICE_LEN_BASE	(0x4038)
153 
154 struct rkvenc_poll_slice_cfg {
155 	s32 poll_type;
156 	s32 poll_ret;
157 	s32 count_max;
158 	s32 count_ret;
159 	s32 slice_len[];
160 };
161 
162 struct rkvenc_task {
163 	struct mpp_task mpp_task;
164 	int fmt;
165 	struct rkvenc_hw_info *hw_info;
166 
167 	/* class register */
168 	struct {
169 		u32 valid;
170 		u32 *data;
171 		u32 size;
172 	} reg[RKVENC_CLASS_BUTT];
173 	/* register offset info */
174 	struct reg_offset_info off_inf;
175 
176 	enum MPP_CLOCK_MODE clk_mode;
177 	u32 irq_status;
178 	/* req for current task */
179 	u32 w_req_cnt;
180 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
181 	u32 r_req_cnt;
182 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
183 	struct mpp_dma_buffer *table;
184 
185 	union rkvenc2_dual_core_handshake_id dchs_id;
186 
187 	/* split output / slice mode info */
188 	u32 task_split;
189 	u32 task_split_done;
190 	DECLARE_KFIFO(slice_len, u32, 64);
191 };
192 
193 #define RKVENC_MAX_RCB_NUM		(4)
194 
195 struct rcb_info_elem {
196 	u32 index;
197 	u32 size;
198 };
199 
200 struct rkvenc2_rcb_info {
201 	u32 cnt;
202 	struct rcb_info_elem elem[RKVENC_MAX_RCB_NUM];
203 };
204 
205 struct rkvenc2_session_priv {
206 	struct rw_semaphore rw_sem;
207 	/* codec info from user */
208 	struct {
209 		/* show mode */
210 		u32 flag;
211 		/* item data */
212 		u64 val;
213 	} codec_info[ENC_INFO_BUTT];
214 	/* rcb_info for sram */
215 	struct rkvenc2_rcb_info rcb_inf;
216 };
217 
218 struct rkvenc_dev {
219 	struct mpp_dev mpp;
220 	struct rkvenc_hw_info *hw_info;
221 
222 	struct mpp_clk_info aclk_info;
223 	struct mpp_clk_info hclk_info;
224 	struct mpp_clk_info core_clk_info;
225 	u32 default_max_load;
226 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
227 	struct proc_dir_entry *procfs;
228 #endif
229 	struct reset_control *rst_a;
230 	struct reset_control *rst_h;
231 	struct reset_control *rst_core;
232 	/* for ccu */
233 	struct rkvenc_ccu *ccu;
234 	struct list_head core_link;
235 	u32 disable_work;
236 
237 	/* internal rcb-memory */
238 	u32 sram_size;
239 	u32 sram_used;
240 	dma_addr_t sram_iova;
241 	u32 sram_enabled;
242 	struct page *rcb_page;
243 };
244 
245 struct rkvenc_ccu {
246 	u32 core_num;
247 	/* lock for core attach */
248 	struct mutex lock;
249 	struct list_head core_list;
250 	struct mpp_dev *main_core;
251 
252 	spinlock_t lock_dchs;
253 	union rkvenc2_dual_core_handshake_id dchs[RKVENC_MAX_CORE_NUM];
254 };
255 
256 static struct rkvenc_hw_info rkvenc_v2_hw_info = {
257 	.hw = {
258 		.reg_num = 254,
259 		.reg_id = 0,
260 		.reg_en = 4,
261 		.reg_start = 160,
262 		.reg_end = 253,
263 	},
264 	.reg_class = RKVENC_CLASS_BUTT,
265 	.reg_msg[RKVENC_CLASS_BASE] = {
266 		.base_s = 0x0000,
267 		.base_e = 0x0058,
268 	},
269 	.reg_msg[RKVENC_CLASS_PIC] = {
270 		.base_s = 0x0280,
271 		.base_e = 0x03f4,
272 	},
273 	.reg_msg[RKVENC_CLASS_RC] = {
274 		.base_s = 0x1000,
275 		.base_e = 0x10e0,
276 	},
277 	.reg_msg[RKVENC_CLASS_PAR] = {
278 		.base_s = 0x1700,
279 		.base_e = 0x1cd4,
280 	},
281 	.reg_msg[RKVENC_CLASS_SQI] = {
282 		.base_s = 0x2000,
283 		.base_e = 0x21e4,
284 	},
285 	.reg_msg[RKVENC_CLASS_SCL] = {
286 		.base_s = 0x2200,
287 		.base_e = 0x2c98,
288 	},
289 	.reg_msg[RKVENC_CLASS_OSD] = {
290 		.base_s = 0x3000,
291 		.base_e = 0x347c,
292 	},
293 	.reg_msg[RKVENC_CLASS_ST] = {
294 		.base_s = 0x4000,
295 		.base_e = 0x42cc,
296 	},
297 	.reg_msg[RKVENC_CLASS_DEBUG] = {
298 		.base_s = 0x5000,
299 		.base_e = 0x5354,
300 	},
301 	.fd_class = RKVENC_CLASS_FD_BUTT,
302 	.fd_reg[RKVENC_CLASS_FD_BASE] = {
303 		.class = RKVENC_CLASS_PIC,
304 		.base_fmt = RKVENC_FMT_BASE,
305 	},
306 	.fd_reg[RKVENC_CLASS_FD_OSD] = {
307 		.class = RKVENC_CLASS_OSD,
308 		.base_fmt = RKVENC_FMT_OSD_BASE,
309 	},
310 	.fmt_reg = {
311 		.class = RKVENC_CLASS_PIC,
312 		.base = 0x0300,
313 		.bitpos = 0,
314 		.bitlen = 1,
315 	},
316 	.enc_start_base = 0x0010,
317 	.enc_clr_base = 0x0014,
318 	.int_en_base = 0x0020,
319 	.int_mask_base = 0x0024,
320 	.int_clr_base = 0x0028,
321 	.int_sta_base = 0x002c,
322 	.enc_wdg_base = 0x0038,
323 	.err_mask = 0x03f0,
324 };
325 
326 /*
327  * file handle translate information for v2
328  */
329 static const u16 trans_tbl_h264e_v2[] = {
330 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
331 	10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
332 	20, 21, 22, 23,
333 };
334 
335 static const u16 trans_tbl_h264e_v2_osd[] = {
336 	20, 21, 22, 23, 24, 25, 26, 27,
337 };
338 
339 static const u16 trans_tbl_h265e_v2[] = {
340 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
341 	10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
342 	20, 21, 22, 23,
343 };
344 
345 static const u16 trans_tbl_h265e_v2_osd[] = {
346 	20, 21, 22, 23, 24, 25, 26, 27,
347 };
348 
349 static struct mpp_trans_info trans_rkvenc_v2[] = {
350 	[RKVENC_FMT_H264E] = {
351 		.count = ARRAY_SIZE(trans_tbl_h264e_v2),
352 		.table = trans_tbl_h264e_v2,
353 	},
354 	[RKVENC_FMT_H264E_OSD] = {
355 		.count = ARRAY_SIZE(trans_tbl_h264e_v2_osd),
356 		.table = trans_tbl_h264e_v2_osd,
357 	},
358 	[RKVENC_FMT_H265E] = {
359 		.count = ARRAY_SIZE(trans_tbl_h265e_v2),
360 		.table = trans_tbl_h265e_v2,
361 	},
362 	[RKVENC_FMT_H265E_OSD] = {
363 		.count = ARRAY_SIZE(trans_tbl_h265e_v2_osd),
364 		.table = trans_tbl_h265e_v2_osd,
365 	},
366 };
367 
req_over_class(struct mpp_request * req,struct rkvenc_task * task,int class)368 static bool req_over_class(struct mpp_request *req,
369 			   struct rkvenc_task *task, int class)
370 {
371 	bool ret;
372 	u32 base_s, base_e, req_e;
373 	struct rkvenc_hw_info *hw = task->hw_info;
374 
375 	base_s = hw->reg_msg[class].base_s;
376 	base_e = hw->reg_msg[class].base_e;
377 	req_e = req->offset + req->size - sizeof(u32);
378 
379 	ret = (req->offset <= base_e && req_e >= base_s) ? true : false;
380 
381 	return ret;
382 }
383 
rkvenc_free_class_msg(struct rkvenc_task * task)384 static int rkvenc_free_class_msg(struct rkvenc_task *task)
385 {
386 	u32 i;
387 	u32 reg_class = task->hw_info->reg_class;
388 
389 	for (i = 0; i < reg_class; i++) {
390 		kfree(task->reg[i].data);
391 		task->reg[i].size = 0;
392 	}
393 
394 	return 0;
395 }
396 
rkvenc_alloc_class_msg(struct rkvenc_task * task,int class)397 static int rkvenc_alloc_class_msg(struct rkvenc_task *task, int class)
398 {
399 	u32 *data;
400 	struct rkvenc_hw_info *hw = task->hw_info;
401 
402 	if (!task->reg[class].data) {
403 		u32 base_s = hw->reg_msg[class].base_s;
404 		u32 base_e = hw->reg_msg[class].base_e;
405 		u32 class_size = base_e - base_s + sizeof(u32);
406 
407 		data = kzalloc(class_size, GFP_KERNEL);
408 		if (!data)
409 			return -ENOMEM;
410 		task->reg[class].data = data;
411 		task->reg[class].size = class_size;
412 	}
413 
414 	return 0;
415 }
416 
rkvenc_update_req(struct rkvenc_task * task,int class,struct mpp_request * req_in,struct mpp_request * req_out)417 static int rkvenc_update_req(struct rkvenc_task *task, int class,
418 			     struct mpp_request *req_in,
419 			     struct mpp_request *req_out)
420 {
421 	u32 base_s, base_e, req_e, s, e;
422 	struct rkvenc_hw_info *hw = task->hw_info;
423 
424 	base_s = hw->reg_msg[class].base_s;
425 	base_e = hw->reg_msg[class].base_e;
426 	req_e = req_in->offset + req_in->size - sizeof(u32);
427 	s = max(req_in->offset, base_s);
428 	e = min(req_e, base_e);
429 
430 	req_out->offset = s;
431 	req_out->size = e - s + sizeof(u32);
432 	req_out->data = (u8 *)req_in->data + (s - req_in->offset);
433 
434 	return 0;
435 }
436 
rkvenc_get_class_msg(struct rkvenc_task * task,u32 addr,struct mpp_request * msg)437 static int rkvenc_get_class_msg(struct rkvenc_task *task,
438 				u32 addr, struct mpp_request *msg)
439 {
440 	int i;
441 	bool found = false;
442 	u32 base_s, base_e;
443 	struct rkvenc_hw_info *hw = task->hw_info;
444 
445 	if (!msg)
446 		return -EINVAL;
447 
448 	memset(msg, 0, sizeof(*msg));
449 	for (i = 0; i < hw->reg_class; i++) {
450 		base_s = hw->reg_msg[i].base_s;
451 		base_e = hw->reg_msg[i].base_e;
452 		if (addr >= base_s && addr < base_e) {
453 			found = true;
454 			msg->offset = base_s;
455 			msg->size = task->reg[i].size;
456 			msg->data = task->reg[i].data;
457 			break;
458 		}
459 	}
460 
461 	return (found ? 0 : (-EINVAL));
462 }
463 
rkvenc_get_class_reg(struct rkvenc_task * task,u32 addr)464 static u32 *rkvenc_get_class_reg(struct rkvenc_task *task, u32 addr)
465 {
466 	int i;
467 	u8 *reg = NULL;
468 	u32 base_s, base_e;
469 	struct rkvenc_hw_info *hw = task->hw_info;
470 
471 	for (i = 0; i < hw->reg_class; i++) {
472 		base_s = hw->reg_msg[i].base_s;
473 		base_e = hw->reg_msg[i].base_e;
474 		if (addr >= base_s && addr < base_e) {
475 			reg = (u8 *)task->reg[i].data + (addr - base_s);
476 			break;
477 		}
478 	}
479 
480 	return (u32 *)reg;
481 }
482 
rkvenc2_extract_rcb_info(struct rkvenc2_rcb_info * rcb_inf,struct mpp_request * req)483 static int rkvenc2_extract_rcb_info(struct rkvenc2_rcb_info *rcb_inf,
484 				    struct mpp_request *req)
485 {
486 	int max_size = ARRAY_SIZE(rcb_inf->elem);
487 	int cnt = req->size / sizeof(rcb_inf->elem[0]);
488 
489 	if (req->size > sizeof(rcb_inf->elem)) {
490 		mpp_err("count %d,max_size %d\n", cnt, max_size);
491 		return -EINVAL;
492 	}
493 	if (copy_from_user(rcb_inf->elem, req->data, req->size)) {
494 		mpp_err("copy_from_user failed\n");
495 		return -EINVAL;
496 	}
497 	rcb_inf->cnt = cnt;
498 
499 	return 0;
500 }
501 
rkvenc_extract_task_msg(struct mpp_session * session,struct rkvenc_task * task,struct mpp_task_msgs * msgs)502 static int rkvenc_extract_task_msg(struct mpp_session *session,
503 				   struct rkvenc_task *task,
504 				   struct mpp_task_msgs *msgs)
505 {
506 	int ret;
507 	u32 i, j;
508 	struct mpp_request *req;
509 	struct rkvenc_hw_info *hw = task->hw_info;
510 
511 	mpp_debug_enter();
512 
513 	for (i = 0; i < msgs->req_cnt; i++) {
514 		req = &msgs->reqs[i];
515 		if (!req->size)
516 			continue;
517 
518 		switch (req->cmd) {
519 		case MPP_CMD_SET_REG_WRITE: {
520 			void *data;
521 			struct mpp_request *wreq;
522 
523 			for (j = 0; j < hw->reg_class; j++) {
524 				if (!req_over_class(req, task, j))
525 					continue;
526 
527 				ret = rkvenc_alloc_class_msg(task, j);
528 				if (ret) {
529 					mpp_err("alloc class msg %d fail.\n", j);
530 					goto fail;
531 				}
532 				wreq = &task->w_reqs[task->w_req_cnt];
533 				rkvenc_update_req(task, j, req, wreq);
534 				data = rkvenc_get_class_reg(task, wreq->offset);
535 				if (!data)
536 					goto fail;
537 				if (copy_from_user(data, wreq->data, wreq->size)) {
538 					mpp_err("copy_from_user fail, offset %08x\n", wreq->offset);
539 					ret = -EIO;
540 					goto fail;
541 				}
542 				task->reg[j].valid = 1;
543 				task->w_req_cnt++;
544 			}
545 		} break;
546 		case MPP_CMD_SET_REG_READ: {
547 			struct mpp_request *rreq;
548 
549 			for (j = 0; j < hw->reg_class; j++) {
550 				if (!req_over_class(req, task, j))
551 					continue;
552 
553 				ret = rkvenc_alloc_class_msg(task, j);
554 				if (ret) {
555 					mpp_err("alloc class msg reg %d fail.\n", j);
556 					goto fail;
557 				}
558 				rreq = &task->r_reqs[task->r_req_cnt];
559 				rkvenc_update_req(task, j, req, rreq);
560 				task->reg[j].valid = 1;
561 				task->r_req_cnt++;
562 			}
563 		} break;
564 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
565 			mpp_extract_reg_offset_info(&task->off_inf, req);
566 		} break;
567 		case MPP_CMD_SET_RCB_INFO: {
568 			struct rkvenc2_session_priv *priv = session->priv;
569 
570 			if (priv)
571 				rkvenc2_extract_rcb_info(&priv->rcb_inf, req);
572 		} break;
573 		default:
574 			break;
575 		}
576 	}
577 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt=%d, r_req_cnt=%d\n",
578 		  task->w_req_cnt, task->r_req_cnt);
579 
580 	mpp_debug_enter();
581 	return 0;
582 
583 fail:
584 	rkvenc_free_class_msg(task);
585 
586 	mpp_debug_enter();
587 	return ret;
588 }
589 
rkvenc_task_get_format(struct mpp_dev * mpp,struct rkvenc_task * task)590 static int rkvenc_task_get_format(struct mpp_dev *mpp,
591 				  struct rkvenc_task *task)
592 {
593 	u32 offset, val;
594 
595 	struct rkvenc_hw_info *hw = task->hw_info;
596 	u32 class = hw->fmt_reg.class;
597 	u32 *class_reg = task->reg[class].data;
598 	u32 class_size = task->reg[class].size;
599 	u32 class_base = hw->reg_msg[class].base_s;
600 	u32 bitpos = hw->fmt_reg.bitpos;
601 	u32 bitlen = hw->fmt_reg.bitlen;
602 
603 	if (!class_reg || !class_size)
604 		return -EINVAL;
605 
606 	offset = hw->fmt_reg.base - class_base;
607 	val = class_reg[offset/sizeof(u32)];
608 	task->fmt = (val >> bitpos) & ((1 << bitlen) - 1);
609 
610 	return 0;
611 }
612 
rkvenc2_set_rcbbuf(struct mpp_dev * mpp,struct mpp_session * session,struct rkvenc_task * task)613 static int rkvenc2_set_rcbbuf(struct mpp_dev *mpp, struct mpp_session *session,
614 			      struct rkvenc_task *task)
615 {
616 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
617 	struct rkvenc2_session_priv *priv = session->priv;
618 	u32 sram_enabled = 0;
619 
620 	mpp_debug_enter();
621 
622 	if (priv && enc->sram_iova) {
623 		int i;
624 		u32 *reg;
625 		u32 reg_idx, rcb_size, rcb_offset;
626 		struct rkvenc2_rcb_info *rcb_inf = &priv->rcb_inf;
627 
628 		rcb_offset = 0;
629 		for (i = 0; i < rcb_inf->cnt; i++) {
630 			reg_idx = rcb_inf->elem[i].index;
631 			rcb_size = rcb_inf->elem[i].size;
632 
633 			if (rcb_offset > enc->sram_size ||
634 			    (rcb_offset + rcb_size) > enc->sram_used)
635 				continue;
636 
637 			mpp_debug(DEBUG_SRAM_INFO, "rcb: reg %d offset %d, size %d\n",
638 				  reg_idx, rcb_offset, rcb_size);
639 
640 			reg = rkvenc_get_class_reg(task, reg_idx * sizeof(u32));
641 			if (reg)
642 				*reg = enc->sram_iova + rcb_offset;
643 
644 			rcb_offset += rcb_size;
645 			sram_enabled = 1;
646 		}
647 	}
648 	if (enc->sram_enabled != sram_enabled) {
649 		mpp_debug(DEBUG_SRAM_INFO, "sram %s\n", sram_enabled ? "enabled" : "disabled");
650 		enc->sram_enabled = sram_enabled;
651 	}
652 
653 	mpp_debug_leave();
654 
655 	return 0;
656 }
657 
rkvenc2_setup_task_id(u32 session_id,struct rkvenc_task * task)658 static void rkvenc2_setup_task_id(u32 session_id, struct rkvenc_task *task)
659 {
660 	u32 val = task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET];
661 
662 	/* always enable tx */
663 	val |= DCHS_TXE;
664 
665 	task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET] = val;
666 	task->dchs_id.val = (((u64)session_id << 32) | val);
667 }
668 
rkvenc2_is_split_task(struct rkvenc_task * task)669 static int rkvenc2_is_split_task(struct rkvenc_task *task)
670 {
671 	u32 slc_done_en;
672 	u32 slc_done_msk;
673 	u32 slen_fifo_en;
674 	u32 sli_split_en;
675 	u32 sli_flsh_en;
676 
677 	if (task->reg[RKVENC_CLASS_BASE].valid) {
678 		u32 *reg = task->reg[RKVENC_CLASS_BASE].data;
679 
680 		slc_done_en  = (reg[RKVENC2_REG_INT_EN] & RKVENC2_BIT_SLICE_DONE_EN) ? 1 : 0;
681 		slc_done_msk = (reg[RKVENC2_REG_INT_MASK] & RKVENC2_BIT_SLICE_DONE_MASK) ? 1 : 0;
682 	} else {
683 		slc_done_en  = 0;
684 		slc_done_msk = 0;
685 	}
686 
687 	if (task->reg[RKVENC_CLASS_PIC].valid) {
688 		u32 *reg = task->reg[RKVENC_CLASS_PIC].data;
689 
690 		slen_fifo_en = (reg[RKVENC2_REG_ENC_PIC] & RKVENC2_BIT_SLEN_FIFO) ? 1 : 0;
691 		sli_split_en = (reg[RKVENC2_REG_SLI_SPLIT] & RKVENC2_BIT_SLI_SPLIT) ? 1 : 0;
692 		sli_flsh_en  = (reg[RKVENC2_REG_SLI_SPLIT] & RKVENC2_BIT_SLI_FLUSH) ? 1 : 0;
693 	} else {
694 		slen_fifo_en = 0;
695 		sli_split_en = 0;
696 		sli_flsh_en  = 0;
697 	}
698 
699 	if (sli_split_en && slen_fifo_en && sli_flsh_en) {
700 		if (!slc_done_en || slc_done_msk)
701 			mpp_dbg_slice("task %d slice output enabled but irq disabled!\n",
702 				      task->mpp_task.task_id);
703 
704 		return 1;
705 	}
706 
707 	return 0;
708 }
709 
rkvenc_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)710 static void *rkvenc_alloc_task(struct mpp_session *session,
711 			       struct mpp_task_msgs *msgs)
712 {
713 	int ret;
714 	struct rkvenc_task *task;
715 	struct mpp_task *mpp_task;
716 	struct mpp_dev *mpp = session->mpp;
717 
718 	mpp_debug_enter();
719 
720 	task = kzalloc(sizeof(*task), GFP_KERNEL);
721 	if (!task)
722 		return NULL;
723 
724 	mpp_task = &task->mpp_task;
725 	mpp_task_init(session, mpp_task);
726 	mpp_task->hw_info = mpp->var->hw_info;
727 	task->hw_info = to_rkvenc_info(mpp_task->hw_info);
728 	/* extract reqs for current task */
729 	ret = rkvenc_extract_task_msg(session, task, msgs);
730 	if (ret)
731 		goto free_task;
732 	mpp_task->reg = task->reg[0].data;
733 	/* get format */
734 	ret = rkvenc_task_get_format(mpp, task);
735 	if (ret)
736 		goto free_task;
737 	/* process fd in register */
738 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
739 		u32 i, j;
740 		int cnt;
741 		u32 off;
742 		const u16 *tbl;
743 		struct rkvenc_hw_info *hw = task->hw_info;
744 
745 		for (i = 0; i < hw->fd_class; i++) {
746 			u32 class = hw->fd_reg[i].class;
747 			u32 fmt = hw->fd_reg[i].base_fmt + task->fmt;
748 			u32 *reg = task->reg[class].data;
749 			u32 ss = hw->reg_msg[class].base_s / sizeof(u32);
750 
751 			if (!reg)
752 				continue;
753 
754 			ret = mpp_translate_reg_address(session, mpp_task, fmt, reg, NULL);
755 			if (ret)
756 				goto fail;
757 
758 			cnt = mpp->var->trans_info[fmt].count;
759 			tbl = mpp->var->trans_info[fmt].table;
760 			for (j = 0; j < cnt; j++) {
761 				off = mpp_query_reg_offset_info(&task->off_inf, tbl[j] + ss);
762 				mpp_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n", tbl[j] + ss, off);
763 				reg[tbl[j]] += off;
764 			}
765 		}
766 	}
767 	rkvenc2_set_rcbbuf(mpp, session, task);
768 	rkvenc2_setup_task_id(session->index, task);
769 	task->clk_mode = CLK_MODE_NORMAL;
770 	task->task_split = rkvenc2_is_split_task(task);
771 	if (task->task_split)
772 		INIT_KFIFO(task->slice_len);
773 
774 	mpp_debug_leave();
775 
776 	return mpp_task;
777 
778 fail:
779 	mpp_task_dump_mem_region(mpp, mpp_task);
780 	mpp_task_dump_reg(mpp, mpp_task);
781 	mpp_task_finalize(session, mpp_task);
782 	/* free class register buffer */
783 	rkvenc_free_class_msg(task);
784 free_task:
785 	kfree(task);
786 
787 	return NULL;
788 }
789 
rkvenc2_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)790 static void *rkvenc2_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task)
791 {
792 	struct mpp_taskqueue *queue = mpp->queue;
793 	unsigned long flags;
794 	s32 core_id;
795 
796 	spin_lock_irqsave(&queue->running_lock, flags);
797 
798 	core_id = find_first_bit(&queue->core_idle, queue->core_count);
799 
800 	if (core_id >= queue->core_count) {
801 		mpp_task = NULL;
802 		mpp_dbg_core("core %d all busy %lx\n", core_id, queue->core_idle);
803 	} else {
804 		unsigned long core_idle = queue->core_idle;
805 
806 		clear_bit(core_id, &queue->core_idle);
807 		mpp_task->mpp = queue->cores[core_id];
808 		mpp_task->core_id = core_id;
809 
810 		mpp_dbg_core("core %d set idle %lx -> %lx\n", core_id,
811 			     core_idle, queue->core_idle);
812 	}
813 
814 	spin_unlock_irqrestore(&queue->running_lock, flags);
815 
816 	return mpp_task;
817 }
818 
rkvenc2_patch_dchs(struct rkvenc_dev * enc,struct rkvenc_task * task)819 static void rkvenc2_patch_dchs(struct rkvenc_dev *enc, struct rkvenc_task *task)
820 {
821 	struct rkvenc_ccu *ccu;
822 	union rkvenc2_dual_core_handshake_id *dchs;
823 	union rkvenc2_dual_core_handshake_id *task_id = &task->dchs_id;
824 	int core_num;
825 	int core_id = enc->mpp.core_id;
826 	unsigned long flags;
827 	int i;
828 
829 	if (!enc->ccu)
830 		return;
831 
832 	if (core_id >= RKVENC_MAX_CORE_NUM) {
833 		dev_err(enc->mpp.dev, "invalid core id %d max %d\n",
834 			core_id, RKVENC_MAX_CORE_NUM);
835 		return;
836 	}
837 
838 	ccu = enc->ccu;
839 	dchs = ccu->dchs;
840 	core_num = ccu->core_num;
841 
842 	spin_lock_irqsave(&ccu->lock_dchs, flags);
843 
844 	if (dchs[core_id].working) {
845 		pr_err("can not config when core %d is still working\n", core_id);
846 		spin_unlock_irqrestore(&ccu->lock_dchs, flags);
847 		return;
848 	}
849 
850 	if (mpp_debug_unlikely(DEBUG_CORE))
851 		pr_info("core tx:rx 0 %s %d:%d %d:%d -- 1 %s %d:%d %d:%d -- task %d %d:%d %d:%d\n",
852 			dchs[0].working ? "work" : "idle",
853 			dchs[0].txid, dchs[0].txe, dchs[0].rxid, dchs[0].rxe,
854 			dchs[1].working ? "work" : "idle",
855 			dchs[1].txid, dchs[1].txe, dchs[1].rxid, dchs[1].rxe,
856 			core_id, task_id->txid, task_id->txe, task_id->rxid, task_id->rxe);
857 
858 	dchs[core_id].val = task_id->val;
859 
860 	if (task_id->rxe) {
861 		u32 task_rxid = task_id->rxid;
862 		u32 session_id = task_id->session_id;
863 		int dependency_core = -1;
864 
865 		for (i = 0; i < core_num; i++) {
866 			if (i == core_id || !dchs[i].working)
867 				continue;
868 
869 			if (task_rxid == dchs[i].txid && session_id == dchs[i].session_id) {
870 				dependency_core = i;
871 				break;
872 			}
873 		}
874 
875 		if (dependency_core < 0) {
876 			u32 dchs_val = (u32)task_id->val & (~(DCHS_RXE));
877 
878 			task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET] = dchs_val;
879 			dchs[core_id].rxe = 0;
880 		}
881 	}
882 	dchs[core_id].working = 1;
883 
884 	spin_unlock_irqrestore(&ccu->lock_dchs, flags);
885 }
886 
rkvenc2_update_dchs(struct rkvenc_dev * enc,struct rkvenc_task * task)887 static void rkvenc2_update_dchs(struct rkvenc_dev *enc, struct rkvenc_task *task)
888 {
889 	struct rkvenc_ccu *ccu = enc->ccu;
890 	int core_id = enc->mpp.core_id;
891 	unsigned long flags;
892 
893 	if (!ccu)
894 		return;
895 
896 	if (core_id >= RKVENC_MAX_CORE_NUM) {
897 		dev_err(enc->mpp.dev, "invalid core id %d max %d\n",
898 			core_id, RKVENC_MAX_CORE_NUM);
899 		return;
900 	}
901 
902 	if (mpp_debug_unlikely(DEBUG_CORE))
903 		pr_info("core %d task done\n", core_id);
904 
905 	spin_lock_irqsave(&ccu->lock_dchs, flags);
906 	ccu->dchs[core_id].val = 0;
907 	if (mpp_debug_unlikely(DEBUG_CORE)) {
908 		union rkvenc2_dual_core_handshake_id *dchs = ccu->dchs;
909 		union rkvenc2_dual_core_handshake_id *task_id = &task->dchs_id;
910 
911 		pr_info("core tx:rx 0 %s %d:%d %d:%d -- 1 %s %d:%d %d:%d -- task %d %d:%d %d:%d\n",
912 			dchs[0].working ? "work" : "idle",
913 			dchs[0].txid, dchs[0].txe, dchs[0].rxid, dchs[0].rxe,
914 			dchs[1].working ? "work" : "idle",
915 			dchs[1].txid, dchs[1].txe, dchs[1].rxid, dchs[1].rxe,
916 			core_id, task_id->txid, task_id->txe, task_id->rxid, task_id->rxe);
917 	}
918 	spin_unlock_irqrestore(&ccu->lock_dchs, flags);
919 }
920 
rkvenc_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)921 static int rkvenc_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
922 {
923 	u32 i, j;
924 	u32 start_val = 0;
925 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
926 	struct rkvenc_task *task = to_rkvenc_task(mpp_task);
927 
928 	mpp_debug_enter();
929 
930 	/* clear hardware counter */
931 	mpp_write_relaxed(mpp, 0x5300, 0x2);
932 
933 	rkvenc2_patch_dchs(enc, task);
934 
935 	for (i = 0; i < task->w_req_cnt; i++) {
936 		int ret;
937 		u32 s, e, off;
938 		u32 *regs;
939 
940 		struct mpp_request msg;
941 		struct mpp_request *req = &task->w_reqs[i];
942 
943 		ret = rkvenc_get_class_msg(task, req->offset, &msg);
944 		if (ret)
945 			return -EINVAL;
946 
947 		s = (req->offset - msg.offset) / sizeof(u32);
948 		e = s + req->size / sizeof(u32);
949 		regs = (u32 *)msg.data;
950 		for (j = s; j < e; j++) {
951 			off = msg.offset + j * sizeof(u32);
952 			if (off == enc->hw_info->enc_start_base) {
953 				start_val = regs[j];
954 				continue;
955 			}
956 			mpp_write_relaxed(mpp, off, regs[j]);
957 		}
958 	}
959 
960 	if (mpp_debug_unlikely(DEBUG_CORE))
961 		dev_info(mpp->dev, "core %d dchs %08x\n", mpp->core_id,
962 			 mpp_read_relaxed(&enc->mpp, DCHS_REG_OFFSET));
963 
964 	/* flush tlb before starting hardware */
965 	mpp_iommu_flush_tlb(mpp->iommu_info);
966 
967 	/* init current task */
968 	mpp->cur_task = mpp_task;
969 
970 	/* Flush the register before the start the device */
971 	wmb();
972 	mpp_write(mpp, enc->hw_info->enc_start_base, start_val);
973 
974 	mpp_debug_leave();
975 
976 	return 0;
977 }
978 
rkvenc2_read_slice_len(struct mpp_dev * mpp,struct rkvenc_task * task)979 static void rkvenc2_read_slice_len(struct mpp_dev *mpp, struct rkvenc_task *task)
980 {
981 	u32 sli_num = mpp_read_relaxed(mpp, RKVENC2_REG_SLICE_NUM_BASE);
982 	u32 i;
983 
984 	for (i = 0; i < sli_num; i++) {
985 		u32 sli_len = mpp_read_relaxed(mpp, RKVENC2_REG_SLICE_LEN_BASE);
986 
987 		mpp_dbg_slice("task %d wr len %d %d:%d\n",
988 			      task->mpp_task.task_id, sli_len, sli_num, i);
989 		kfifo_in(&task->slice_len, &sli_len, 1);
990 	}
991 }
992 
rkvenc2_last_slice(struct rkvenc_task * task)993 static void rkvenc2_last_slice(struct rkvenc_task *task)
994 {
995 	u32 sli_len = 0;
996 
997 	mpp_dbg_slice("task %d last slice found\n", task->mpp_task.task_id);
998 	kfifo_in(&task->slice_len, &sli_len, 1);
999 }
1000 
rkvenc_irq(struct mpp_dev * mpp)1001 static int rkvenc_irq(struct mpp_dev *mpp)
1002 {
1003 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1004 	struct rkvenc_hw_info *hw = enc->hw_info;
1005 	struct mpp_task *mpp_task = NULL;
1006 	struct rkvenc_task *task = NULL;
1007 	int ret = IRQ_NONE;
1008 
1009 	mpp_debug_enter();
1010 
1011 	mpp->irq_status = mpp_read(mpp, hw->int_sta_base);
1012 	if (!mpp->irq_status)
1013 		return ret;
1014 
1015 	mpp_task = mpp->cur_task;
1016 
1017 	if (mpp_task) {
1018 		task = to_rkvenc_task(mpp_task);
1019 
1020 		if (task->task_split) {
1021 			mpp_time_part_diff(mpp_task);
1022 
1023 			rkvenc2_read_slice_len(mpp, task);
1024 			mpp_write(mpp, hw->int_clr_base, 0x8);
1025 			wake_up(&mpp_task->wait);
1026 		}
1027 	}
1028 
1029 	if (mpp->irq_status & 1) {
1030 		mpp_write(mpp, hw->int_mask_base, 0x100);
1031 		mpp_write(mpp, hw->int_clr_base, 0xffffffff);
1032 		udelay(5);
1033 		mpp_write(mpp, hw->int_sta_base, 0);
1034 
1035 		ret = IRQ_WAKE_THREAD;
1036 
1037 		if (task) {
1038 			if (task->task_split) {
1039 				rkvenc2_read_slice_len(mpp, task);
1040 				rkvenc2_last_slice(task);
1041 			}
1042 			wake_up(&mpp_task->wait);
1043 		}
1044 	}
1045 
1046 	mpp_debug_leave();
1047 
1048 	return ret;
1049 }
1050 
rkvenc_isr(struct mpp_dev * mpp)1051 static int rkvenc_isr(struct mpp_dev *mpp)
1052 {
1053 	struct rkvenc_task *task;
1054 	struct mpp_task *mpp_task;
1055 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1056 	struct mpp_taskqueue *queue = mpp->queue;
1057 	unsigned long core_idle;
1058 
1059 	mpp_debug_enter();
1060 
1061 	/* FIXME use a spin lock here */
1062 	if (!mpp->cur_task) {
1063 		dev_err(mpp->dev, "no current task\n");
1064 		return IRQ_HANDLED;
1065 	}
1066 
1067 	mpp_task = mpp->cur_task;
1068 	mpp_time_diff(mpp_task);
1069 	mpp->cur_task = NULL;
1070 
1071 	if (mpp_task->mpp && mpp_task->mpp != mpp)
1072 		dev_err(mpp->dev, "mismatch core dev %p:%p\n", mpp_task->mpp, mpp);
1073 
1074 	task = to_rkvenc_task(mpp_task);
1075 	task->irq_status = mpp->irq_status;
1076 
1077 	rkvenc2_update_dchs(enc, task);
1078 
1079 	mpp_debug(DEBUG_IRQ_STATUS, "%s irq_status: %08x\n",
1080 		  dev_name(mpp->dev), task->irq_status);
1081 
1082 	if (task->irq_status & enc->hw_info->err_mask) {
1083 		atomic_inc(&mpp->reset_request);
1084 		/* dump register */
1085 
1086 		mpp_task_dump_hw_reg(mpp);
1087 	}
1088 	mpp_task_finish(mpp_task->session, mpp_task);
1089 
1090 	core_idle = queue->core_idle;
1091 	set_bit(mpp->core_id, &queue->core_idle);
1092 
1093 	mpp_dbg_core("core %d isr idle %lx -> %lx\n", mpp->core_id, core_idle,
1094 		     queue->core_idle);
1095 
1096 	mpp_debug_leave();
1097 
1098 	return IRQ_HANDLED;
1099 }
1100 
rkvenc_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)1101 static int rkvenc_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1102 {
1103 	u32 i, j;
1104 	u32 *reg;
1105 	struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1106 
1107 	mpp_debug_enter();
1108 
1109 	for (i = 0; i < task->r_req_cnt; i++) {
1110 		int ret;
1111 		int s, e;
1112 		struct mpp_request msg;
1113 		struct mpp_request *req = &task->r_reqs[i];
1114 
1115 		ret = rkvenc_get_class_msg(task, req->offset, &msg);
1116 		if (ret)
1117 			return -EINVAL;
1118 		s = (req->offset - msg.offset) / sizeof(u32);
1119 		e = s + req->size / sizeof(u32);
1120 		reg = (u32 *)msg.data;
1121 		for (j = s; j < e; j++)
1122 			reg[j] = mpp_read_relaxed(mpp, msg.offset + j * sizeof(u32));
1123 
1124 	}
1125 
1126 	/* revert hack for irq status */
1127 	reg = rkvenc_get_class_reg(task, task->hw_info->int_sta_base);
1128 	if (reg)
1129 		*reg = task->irq_status;
1130 
1131 	mpp_debug_leave();
1132 
1133 	return 0;
1134 }
1135 
rkvenc_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)1136 static int rkvenc_result(struct mpp_dev *mpp,
1137 			 struct mpp_task *mpp_task,
1138 			 struct mpp_task_msgs *msgs)
1139 {
1140 	u32 i;
1141 	struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1142 
1143 	mpp_debug_enter();
1144 
1145 	for (i = 0; i < task->r_req_cnt; i++) {
1146 		struct mpp_request *req = &task->r_reqs[i];
1147 		u32 *reg = rkvenc_get_class_reg(task, req->offset);
1148 
1149 		if (!reg)
1150 			return -EINVAL;
1151 		if (copy_to_user(req->data, reg, req->size)) {
1152 			mpp_err("copy_to_user reg fail\n");
1153 			return -EIO;
1154 		}
1155 	}
1156 
1157 	mpp_debug_leave();
1158 
1159 	return 0;
1160 }
1161 
rkvenc_free_task(struct mpp_session * session,struct mpp_task * mpp_task)1162 static int rkvenc_free_task(struct mpp_session *session,
1163 			    struct mpp_task *mpp_task)
1164 {
1165 	struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1166 
1167 	mpp_task_finalize(session, mpp_task);
1168 	rkvenc_free_class_msg(task);
1169 	kfree(task);
1170 
1171 	return 0;
1172 }
1173 
rkvenc_control(struct mpp_session * session,struct mpp_request * req)1174 static int rkvenc_control(struct mpp_session *session, struct mpp_request *req)
1175 {
1176 	switch (req->cmd) {
1177 	case MPP_CMD_SEND_CODEC_INFO: {
1178 		int i;
1179 		int cnt;
1180 		struct codec_info_elem elem;
1181 		struct rkvenc2_session_priv *priv;
1182 
1183 		if (!session || !session->priv) {
1184 			mpp_err("session info null\n");
1185 			return -EINVAL;
1186 		}
1187 		priv = session->priv;
1188 
1189 		cnt = req->size / sizeof(elem);
1190 		cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
1191 		mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
1192 		for (i = 0; i < cnt; i++) {
1193 			if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
1194 				mpp_err("copy_from_user failed\n");
1195 				continue;
1196 			}
1197 			if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT &&
1198 			    elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
1199 				elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
1200 				priv->codec_info[elem.type].flag = elem.flag;
1201 				priv->codec_info[elem.type].val = elem.data;
1202 			} else {
1203 				mpp_err("codec info invalid, type %d, flag %d\n",
1204 					elem.type, elem.flag);
1205 			}
1206 		}
1207 	} break;
1208 	default: {
1209 		mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
1210 	} break;
1211 	}
1212 
1213 	return 0;
1214 }
1215 
rkvenc_free_session(struct mpp_session * session)1216 static int rkvenc_free_session(struct mpp_session *session)
1217 {
1218 	if (session && session->priv) {
1219 		kfree(session->priv);
1220 		session->priv = NULL;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
rkvenc_init_session(struct mpp_session * session)1226 static int rkvenc_init_session(struct mpp_session *session)
1227 {
1228 	struct rkvenc2_session_priv *priv;
1229 
1230 	if (!session) {
1231 		mpp_err("session is null\n");
1232 		return -EINVAL;
1233 	}
1234 
1235 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1236 	if (!priv)
1237 		return -ENOMEM;
1238 
1239 	init_rwsem(&priv->rw_sem);
1240 	session->priv = priv;
1241 
1242 	return 0;
1243 }
1244 
1245 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvenc_procfs_remove(struct mpp_dev * mpp)1246 static int rkvenc_procfs_remove(struct mpp_dev *mpp)
1247 {
1248 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1249 
1250 	if (enc->procfs) {
1251 		proc_remove(enc->procfs);
1252 		enc->procfs = NULL;
1253 	}
1254 
1255 	return 0;
1256 }
1257 
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)1258 static int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
1259 {
1260 	int i;
1261 	struct rkvenc2_session_priv *priv = session->priv;
1262 
1263 	down_read(&priv->rw_sem);
1264 	/* item name */
1265 	seq_puts(seq, "------------------------------------------------------");
1266 	seq_puts(seq, "------------------------------------------------------\n");
1267 	seq_printf(seq, "|%8s|", (const char *)"session");
1268 	seq_printf(seq, "%8s|", (const char *)"device");
1269 	for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
1270 		bool show = priv->codec_info[i].flag;
1271 
1272 		if (show)
1273 			seq_printf(seq, "%8s|", enc_info_item_name[i]);
1274 	}
1275 	seq_puts(seq, "\n");
1276 	/* item data*/
1277 	seq_printf(seq, "|%8p|", session);
1278 	seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
1279 	for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
1280 		u32 flag = priv->codec_info[i].flag;
1281 
1282 		if (!flag)
1283 			continue;
1284 		if (flag == CODEC_INFO_FLAG_NUMBER) {
1285 			u32 data = priv->codec_info[i].val;
1286 
1287 			seq_printf(seq, "%8d|", data);
1288 		} else if (flag == CODEC_INFO_FLAG_STRING) {
1289 			const char *name = (const char *)&priv->codec_info[i].val;
1290 
1291 			seq_printf(seq, "%8s|", name);
1292 		} else {
1293 			seq_printf(seq, "%8s|", (const char *)"null");
1294 		}
1295 	}
1296 	seq_puts(seq, "\n");
1297 	up_read(&priv->rw_sem);
1298 
1299 	return 0;
1300 }
1301 
rkvenc_show_session_info(struct seq_file * seq,void * offset)1302 static int rkvenc_show_session_info(struct seq_file *seq, void *offset)
1303 {
1304 	struct mpp_session *session = NULL, *n;
1305 	struct mpp_dev *mpp = seq->private;
1306 
1307 	mutex_lock(&mpp->srv->session_lock);
1308 	list_for_each_entry_safe(session, n,
1309 				 &mpp->srv->session_list,
1310 				 session_link) {
1311 		if (session->device_type != MPP_DEVICE_RKVENC)
1312 			continue;
1313 		if (!session->priv)
1314 			continue;
1315 		if (mpp->dev_ops->dump_session)
1316 			mpp->dev_ops->dump_session(session, seq);
1317 	}
1318 	mutex_unlock(&mpp->srv->session_lock);
1319 
1320 	return 0;
1321 }
1322 
rkvenc_procfs_init(struct mpp_dev * mpp)1323 static int rkvenc_procfs_init(struct mpp_dev *mpp)
1324 {
1325 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1326 	char name[32];
1327 
1328 	if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name ||
1329 	    !mpp->srv || !mpp->srv->procfs)
1330 		return -EINVAL;
1331 
1332 	snprintf(name, sizeof(name) - 1, "%s%d",
1333 		 mpp->dev->of_node->name, mpp->core_id);
1334 
1335 	enc->procfs = proc_mkdir(name, mpp->srv->procfs);
1336 	if (IS_ERR_OR_NULL(enc->procfs)) {
1337 		mpp_err("failed on open procfs\n");
1338 		enc->procfs = NULL;
1339 		return -EIO;
1340 	}
1341 	/* for debug */
1342 	mpp_procfs_create_u32("aclk", 0644,
1343 			      enc->procfs, &enc->aclk_info.debug_rate_hz);
1344 	mpp_procfs_create_u32("clk_core", 0644,
1345 			      enc->procfs, &enc->core_clk_info.debug_rate_hz);
1346 	mpp_procfs_create_u32("session_buffers", 0644,
1347 			      enc->procfs, &mpp->session_max_buffers);
1348 	/* for show session info */
1349 	proc_create_single_data("sessions-info", 0444,
1350 				enc->procfs, rkvenc_show_session_info, mpp);
1351 
1352 	return 0;
1353 }
1354 
rkvenc_procfs_ccu_init(struct mpp_dev * mpp)1355 static int rkvenc_procfs_ccu_init(struct mpp_dev *mpp)
1356 {
1357 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1358 
1359 	if (!enc->procfs)
1360 		goto done;
1361 
1362 	mpp_procfs_create_u32("disable_work", 0644,
1363 			      enc->procfs, &enc->disable_work);
1364 done:
1365 	return 0;
1366 }
1367 #else
rkvenc_procfs_remove(struct mpp_dev * mpp)1368 static inline int rkvenc_procfs_remove(struct mpp_dev *mpp)
1369 {
1370 	return 0;
1371 }
1372 
rkvenc_procfs_init(struct mpp_dev * mpp)1373 static inline int rkvenc_procfs_init(struct mpp_dev *mpp)
1374 {
1375 	return 0;
1376 }
1377 
rkvenc_procfs_ccu_init(struct mpp_dev * mpp)1378 static inline int rkvenc_procfs_ccu_init(struct mpp_dev *mpp)
1379 {
1380 	return 0;
1381 }
1382 #endif
1383 
rkvenc_init(struct mpp_dev * mpp)1384 static int rkvenc_init(struct mpp_dev *mpp)
1385 {
1386 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1387 	int ret = 0;
1388 
1389 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC];
1390 
1391 	/* Get clock info from dtsi */
1392 	ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
1393 	if (ret)
1394 		mpp_err("failed on clk_get aclk_vcodec\n");
1395 	ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
1396 	if (ret)
1397 		mpp_err("failed on clk_get hclk_vcodec\n");
1398 	ret = mpp_get_clk_info(mpp, &enc->core_clk_info, "clk_core");
1399 	if (ret)
1400 		mpp_err("failed on clk_get clk_core\n");
1401 	/* Get normal max workload from dtsi */
1402 	of_property_read_u32(mpp->dev->of_node,
1403 			     "rockchip,default-max-load",
1404 			     &enc->default_max_load);
1405 	/* Set default rates */
1406 	mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1407 	mpp_set_clk_info_rate_hz(&enc->core_clk_info, CLK_MODE_DEFAULT, 600 * MHZ);
1408 
1409 	/* Get reset control from dtsi */
1410 	enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1411 	if (!enc->rst_a)
1412 		mpp_err("No aclk reset resource define\n");
1413 	enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1414 	if (!enc->rst_h)
1415 		mpp_err("No hclk reset resource define\n");
1416 	enc->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1417 	if (!enc->rst_core)
1418 		mpp_err("No core reset resource define\n");
1419 
1420 	return 0;
1421 }
1422 
rkvenc_reset(struct mpp_dev * mpp)1423 static int rkvenc_reset(struct mpp_dev *mpp)
1424 {
1425 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1426 	struct rkvenc_hw_info *hw = enc->hw_info;
1427 	struct mpp_taskqueue *queue = mpp->queue;
1428 
1429 	mpp_debug_enter();
1430 
1431 	/* safe reset */
1432 	mpp_write(mpp, hw->int_mask_base, 0x3FF);
1433 	mpp_write(mpp, hw->enc_clr_base, 0x1);
1434 	udelay(5);
1435 	mpp_write(mpp, hw->int_clr_base, 0xffffffff);
1436 	mpp_write(mpp, hw->int_sta_base, 0);
1437 
1438 	/* cru reset */
1439 	if (enc->rst_a && enc->rst_h && enc->rst_core) {
1440 		mpp_pmu_idle_request(mpp, true);
1441 		mpp_safe_reset(enc->rst_a);
1442 		mpp_safe_reset(enc->rst_h);
1443 		mpp_safe_reset(enc->rst_core);
1444 		udelay(5);
1445 		mpp_safe_unreset(enc->rst_a);
1446 		mpp_safe_unreset(enc->rst_h);
1447 		mpp_safe_unreset(enc->rst_core);
1448 		mpp_pmu_idle_request(mpp, false);
1449 	}
1450 
1451 	set_bit(mpp->core_id, &queue->core_idle);
1452 	if (enc->ccu)
1453 		enc->ccu->dchs[mpp->core_id].val = 0;
1454 
1455 	mpp_dbg_core("core %d reset idle %lx\n", mpp->core_id, queue->core_idle);
1456 
1457 	mpp_debug_leave();
1458 
1459 	return 0;
1460 }
1461 
rkvenc_clk_on(struct mpp_dev * mpp)1462 static int rkvenc_clk_on(struct mpp_dev *mpp)
1463 {
1464 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1465 
1466 	mpp_clk_safe_enable(enc->aclk_info.clk);
1467 	mpp_clk_safe_enable(enc->hclk_info.clk);
1468 	mpp_clk_safe_enable(enc->core_clk_info.clk);
1469 
1470 	return 0;
1471 }
1472 
rkvenc_clk_off(struct mpp_dev * mpp)1473 static int rkvenc_clk_off(struct mpp_dev *mpp)
1474 {
1475 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1476 
1477 	clk_disable_unprepare(enc->aclk_info.clk);
1478 	clk_disable_unprepare(enc->hclk_info.clk);
1479 	clk_disable_unprepare(enc->core_clk_info.clk);
1480 
1481 	return 0;
1482 }
1483 
rkvenc_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1484 static int rkvenc_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1485 {
1486 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1487 	struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1488 
1489 	mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
1490 	mpp_clk_set_rate(&enc->core_clk_info, task->clk_mode);
1491 
1492 	return 0;
1493 }
1494 
1495 #define RKVENC2_WORK_TIMEOUT_DELAY		(200)
1496 #define RKVENC2_WAIT_TIMEOUT_DELAY		(2000)
1497 
rkvenc2_task_pop_pending(struct mpp_task * task)1498 static void rkvenc2_task_pop_pending(struct mpp_task *task)
1499 {
1500 	struct mpp_session *session = task->session;
1501 
1502 	mutex_lock(&session->pending_lock);
1503 	list_del_init(&task->pending_link);
1504 	mutex_unlock(&session->pending_lock);
1505 
1506 	kref_put(&task->ref, mpp_free_task);
1507 }
1508 
rkvenc2_task_default_process(struct mpp_dev * mpp,struct mpp_task * task)1509 static int rkvenc2_task_default_process(struct mpp_dev *mpp,
1510 					struct mpp_task *task)
1511 {
1512 	int ret = 0;
1513 
1514 	if (mpp->dev_ops && mpp->dev_ops->result)
1515 		ret = mpp->dev_ops->result(mpp, task, NULL);
1516 
1517 	mpp_debug_func(DEBUG_TASK_INFO, "kref_read %d, ret %d\n",
1518 			kref_read(&task->ref), ret);
1519 
1520 	rkvenc2_task_pop_pending(task);
1521 
1522 	return ret;
1523 }
1524 
rkvenc2_task_timeout_process(struct mpp_session * session,struct mpp_task * task)1525 static void rkvenc2_task_timeout_process(struct mpp_session *session,
1526 					 struct mpp_task *task)
1527 {
1528 	atomic_inc(&task->abort_request);
1529 	set_bit(TASK_STATE_ABORT, &task->state);
1530 
1531 	mpp_err("session %d:%d count %d task %d ref %d timeout\n",
1532 		session->pid, session->index, atomic_read(&session->task_count),
1533 		task->task_id, kref_read(&task->ref));
1534 
1535 	rkvenc2_task_pop_pending(task);
1536 }
1537 
rkvenc2_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)1538 static int rkvenc2_wait_result(struct mpp_session *session,
1539 			       struct mpp_task_msgs *msgs)
1540 {
1541 	struct rkvenc_poll_slice_cfg cfg;
1542 	struct rkvenc_task *enc_task;
1543 	struct mpp_request *req;
1544 	struct mpp_task *task;
1545 	struct mpp_dev *mpp;
1546 	u32 slice_len = 0;
1547 	u32 task_id;
1548 	int ret = 0;
1549 
1550 	mutex_lock(&session->pending_lock);
1551 	task = list_first_entry_or_null(&session->pending_list,
1552 					struct mpp_task,
1553 					pending_link);
1554 	mutex_unlock(&session->pending_lock);
1555 	if (!task) {
1556 		mpp_err("session %p pending list is empty!\n", session);
1557 		return -EIO;
1558 	}
1559 
1560 	mpp = mpp_get_task_used_device(task, session);
1561 	enc_task = to_rkvenc_task(task);
1562 	task_id = task->task_id;
1563 
1564 	req = cmpxchg(&msgs->poll_req, msgs->poll_req, NULL);
1565 
1566 	if (!enc_task->task_split || enc_task->task_split_done) {
1567 task_done_ret:
1568 		ret = wait_event_timeout(task->wait,
1569 					 test_bit(TASK_STATE_DONE, &task->state),
1570 					 msecs_to_jiffies(RKVENC2_WAIT_TIMEOUT_DELAY));
1571 
1572 		if (ret > 0)
1573 			return rkvenc2_task_default_process(mpp, task);
1574 
1575 		rkvenc2_task_timeout_process(session, task);
1576 		return ret;
1577 	}
1578 
1579 	/* not slice return just wait all slice length */
1580 	if (!req) {
1581 		do {
1582 			ret = wait_event_timeout(task->wait,
1583 						 kfifo_out(&enc_task->slice_len, &slice_len, 1),
1584 						 msecs_to_jiffies(RKVENC2_WORK_TIMEOUT_DELAY));
1585 			if (ret > 0) {
1586 				mpp_dbg_slice("task %d skip slice len %d\n",
1587 					      task_id, slice_len);
1588 				if (slice_len == 0)
1589 					goto task_done_ret;
1590 
1591 				continue;
1592 			}
1593 
1594 			rkvenc2_task_timeout_process(session, task);
1595 			return ret;
1596 		} while (1);
1597 	}
1598 
1599 	if (copy_from_user(&cfg, req->data, sizeof(cfg))) {
1600 		mpp_err("copy_from_user failed\n");
1601 		return -EINVAL;
1602 	}
1603 
1604 	mpp_dbg_slice("task %d poll irq %d:%d\n", task->task_id,
1605 		      cfg.count_max, cfg.count_ret);
1606 	cfg.count_ret = 0;
1607 
1608 	/* handle slice mode poll return */
1609 	ret = wait_event_timeout(task->wait,
1610 				 kfifo_out(&enc_task->slice_len, &slice_len, 1),
1611 				 msecs_to_jiffies(RKVENC2_WORK_TIMEOUT_DELAY));
1612 	if (ret > 0) {
1613 		mpp_dbg_slice("task %d rd len %d\n", task_id, slice_len);
1614 
1615 		if (cfg.count_ret < cfg.count_max) {
1616 			struct rkvenc_poll_slice_cfg __user *ucfg =
1617 				(struct rkvenc_poll_slice_cfg __user *)(req->data);
1618 			u32 __user *dst = (u32 __user *)(ucfg + 1);
1619 
1620 			/* Do NOT return here when put_user error. Just continue */
1621 			if (put_user(slice_len, dst + cfg.count_ret))
1622 				ret = -EFAULT;
1623 
1624 			cfg.count_ret++;
1625 			if (put_user(cfg.count_ret, &ucfg->count_ret))
1626 				ret = -EFAULT;
1627 		}
1628 
1629 		if (!slice_len) {
1630 			enc_task->task_split_done = 1;
1631 			goto task_done_ret;
1632 		}
1633 
1634 		return ret < 0 ? ret : 0;
1635 	}
1636 
1637 	rkvenc2_task_timeout_process(session, task);
1638 
1639 	return ret;
1640 }
1641 
1642 static struct mpp_hw_ops rkvenc_hw_ops = {
1643 	.init = rkvenc_init,
1644 	.clk_on = rkvenc_clk_on,
1645 	.clk_off = rkvenc_clk_off,
1646 	.set_freq = rkvenc_set_freq,
1647 	.reset = rkvenc_reset,
1648 };
1649 
1650 static struct mpp_dev_ops rkvenc_dev_ops_v2 = {
1651 	.wait_result = rkvenc2_wait_result,
1652 	.alloc_task = rkvenc_alloc_task,
1653 	.run = rkvenc_run,
1654 	.irq = rkvenc_irq,
1655 	.isr = rkvenc_isr,
1656 	.finish = rkvenc_finish,
1657 	.result = rkvenc_result,
1658 	.free_task = rkvenc_free_task,
1659 	.ioctl = rkvenc_control,
1660 	.init_session = rkvenc_init_session,
1661 	.free_session = rkvenc_free_session,
1662 	.dump_session = rkvenc_dump_session,
1663 };
1664 
1665 static struct mpp_dev_ops rkvenc_ccu_dev_ops = {
1666 	.wait_result = rkvenc2_wait_result,
1667 	.alloc_task = rkvenc_alloc_task,
1668 	.prepare = rkvenc2_prepare,
1669 	.run = rkvenc_run,
1670 	.irq = rkvenc_irq,
1671 	.isr = rkvenc_isr,
1672 	.finish = rkvenc_finish,
1673 	.result = rkvenc_result,
1674 	.free_task = rkvenc_free_task,
1675 	.ioctl = rkvenc_control,
1676 	.init_session = rkvenc_init_session,
1677 	.free_session = rkvenc_free_session,
1678 	.dump_session = rkvenc_dump_session,
1679 };
1680 
1681 
1682 static const struct mpp_dev_var rkvenc_v2_data = {
1683 	.device_type = MPP_DEVICE_RKVENC,
1684 	.hw_info = &rkvenc_v2_hw_info.hw,
1685 	.trans_info = trans_rkvenc_v2,
1686 	.hw_ops = &rkvenc_hw_ops,
1687 	.dev_ops = &rkvenc_dev_ops_v2,
1688 };
1689 
1690 static const struct mpp_dev_var rkvenc_ccu_data = {
1691 	.device_type = MPP_DEVICE_RKVENC,
1692 	.hw_info = &rkvenc_v2_hw_info.hw,
1693 	.trans_info = trans_rkvenc_v2,
1694 	.hw_ops = &rkvenc_hw_ops,
1695 	.dev_ops = &rkvenc_ccu_dev_ops,
1696 };
1697 
1698 static const struct of_device_id mpp_rkvenc_dt_match[] = {
1699 	{
1700 		.compatible = "rockchip,rkv-encoder-v2",
1701 		.data = &rkvenc_v2_data,
1702 	},
1703 #ifdef CONFIG_CPU_RK3588
1704 	{
1705 		.compatible = "rockchip,rkv-encoder-v2-core",
1706 		.data = &rkvenc_ccu_data,
1707 	},
1708 	{
1709 		.compatible = "rockchip,rkv-encoder-v2-ccu",
1710 	},
1711 #endif
1712 	{},
1713 };
1714 
rkvenc_ccu_probe(struct platform_device * pdev)1715 static int rkvenc_ccu_probe(struct platform_device *pdev)
1716 {
1717 	struct rkvenc_ccu *ccu;
1718 	struct device *dev = &pdev->dev;
1719 
1720 	ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL);
1721 	if (!ccu)
1722 		return -ENOMEM;
1723 
1724 	platform_set_drvdata(pdev, ccu);
1725 
1726 	mutex_init(&ccu->lock);
1727 	INIT_LIST_HEAD(&ccu->core_list);
1728 	spin_lock_init(&ccu->lock_dchs);
1729 
1730 	return 0;
1731 }
1732 
rkvenc_attach_ccu(struct device * dev,struct rkvenc_dev * enc)1733 static int rkvenc_attach_ccu(struct device *dev, struct rkvenc_dev *enc)
1734 {
1735 	struct device_node *np;
1736 	struct platform_device *pdev;
1737 	struct rkvenc_ccu *ccu;
1738 
1739 	mpp_debug_enter();
1740 
1741 	np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1742 	if (!np || !of_device_is_available(np))
1743 		return -ENODEV;
1744 
1745 	pdev = of_find_device_by_node(np);
1746 	of_node_put(np);
1747 	if (!pdev)
1748 		return -ENODEV;
1749 
1750 	ccu = platform_get_drvdata(pdev);
1751 	if (!ccu)
1752 		return -ENOMEM;
1753 
1754 	INIT_LIST_HEAD(&enc->core_link);
1755 	mutex_lock(&ccu->lock);
1756 	ccu->core_num++;
1757 	list_add_tail(&enc->core_link, &ccu->core_list);
1758 	mutex_unlock(&ccu->lock);
1759 
1760 	/* attach the ccu-domain to current core */
1761 	if (!ccu->main_core) {
1762 		/**
1763 		 * set the first device for the main-core,
1764 		 * then the domain of the main-core named ccu-domain
1765 		 */
1766 		ccu->main_core = &enc->mpp;
1767 	} else {
1768 		struct mpp_iommu_info *ccu_info, *cur_info;
1769 
1770 		/* set the ccu-domain for current device */
1771 		ccu_info = ccu->main_core->iommu_info;
1772 		cur_info = enc->mpp.iommu_info;
1773 
1774 		cur_info->domain = ccu_info->domain;
1775 		cur_info->rw_sem = ccu_info->rw_sem;
1776 		mpp_iommu_attach(cur_info);
1777 
1778 		/* increase main core message capacity */
1779 		ccu->main_core->msgs_cap++;
1780 		enc->mpp.msgs_cap = 0;
1781 	}
1782 	enc->ccu = ccu;
1783 
1784 	dev_info(dev, "attach ccu as core %d\n", enc->mpp.core_id);
1785 	mpp_debug_enter();
1786 
1787 	return 0;
1788 }
1789 
rkvenc2_alloc_rcbbuf(struct platform_device * pdev,struct rkvenc_dev * enc)1790 static int rkvenc2_alloc_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc)
1791 {
1792 	int ret;
1793 	u32 vals[2];
1794 	dma_addr_t iova;
1795 	u32 sram_used, sram_size;
1796 	struct device_node *sram_np;
1797 	struct resource sram_res;
1798 	resource_size_t sram_start, sram_end;
1799 	struct iommu_domain *domain;
1800 	struct device *dev = &pdev->dev;
1801 
1802 	/* get rcb iova start and size */
1803 	ret = device_property_read_u32_array(dev, "rockchip,rcb-iova", vals, 2);
1804 	if (ret)
1805 		return ret;
1806 
1807 	iova = PAGE_ALIGN(vals[0]);
1808 	sram_used = PAGE_ALIGN(vals[1]);
1809 	if (!sram_used) {
1810 		dev_err(dev, "sram rcb invalid.\n");
1811 		return -EINVAL;
1812 	}
1813 	/* alloc reserve iova for rcb */
1814 	ret = iommu_dma_reserve_iova(dev, iova, sram_used);
1815 	if (ret) {
1816 		dev_err(dev, "alloc rcb iova error.\n");
1817 		return ret;
1818 	}
1819 	/* get sram device node */
1820 	sram_np = of_parse_phandle(dev->of_node, "rockchip,sram", 0);
1821 	if (!sram_np) {
1822 		dev_err(dev, "could not find phandle sram\n");
1823 		return -ENODEV;
1824 	}
1825 	/* get sram start and size */
1826 	ret = of_address_to_resource(sram_np, 0, &sram_res);
1827 	of_node_put(sram_np);
1828 	if (ret) {
1829 		dev_err(dev, "find sram res error\n");
1830 		return ret;
1831 	}
1832 	/* check sram start and size is PAGE_SIZE align */
1833 	sram_start = round_up(sram_res.start, PAGE_SIZE);
1834 	sram_end = round_down(sram_res.start + resource_size(&sram_res), PAGE_SIZE);
1835 	if (sram_end <= sram_start) {
1836 		dev_err(dev, "no available sram, phy_start %pa, phy_end %pa\n",
1837 			&sram_start, &sram_end);
1838 		return -ENOMEM;
1839 	}
1840 	sram_size = sram_end - sram_start;
1841 	sram_size = sram_used < sram_size ? sram_used : sram_size;
1842 	/* iova map to sram */
1843 	domain = enc->mpp.iommu_info->domain;
1844 	ret = iommu_map(domain, iova, sram_start, sram_size, IOMMU_READ | IOMMU_WRITE);
1845 	if (ret) {
1846 		dev_err(dev, "sram iommu_map error.\n");
1847 		return ret;
1848 	}
1849 	/* alloc dma for the remaining buffer, sram + dma */
1850 	if (sram_size < sram_used) {
1851 		struct page *page;
1852 		size_t page_size = PAGE_ALIGN(sram_used - sram_size);
1853 
1854 		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(page_size));
1855 		if (!page) {
1856 			dev_err(dev, "unable to allocate pages\n");
1857 			ret = -ENOMEM;
1858 			goto err_sram_map;
1859 		}
1860 		/* iova map to dma */
1861 		ret = iommu_map(domain, iova + sram_size, page_to_phys(page),
1862 				page_size, IOMMU_READ | IOMMU_WRITE);
1863 		if (ret) {
1864 			dev_err(dev, "page iommu_map error.\n");
1865 			__free_pages(page, get_order(page_size));
1866 			goto err_sram_map;
1867 		}
1868 		enc->rcb_page = page;
1869 	}
1870 
1871 	enc->sram_size = sram_size;
1872 	enc->sram_used = sram_used;
1873 	enc->sram_iova = iova;
1874 	enc->sram_enabled = -1;
1875 	dev_info(dev, "sram_start %pa\n", &sram_start);
1876 	dev_info(dev, "sram_iova %pad\n", &enc->sram_iova);
1877 	dev_info(dev, "sram_size %u\n", enc->sram_size);
1878 	dev_info(dev, "sram_used %u\n", enc->sram_used);
1879 
1880 	return 0;
1881 
1882 err_sram_map:
1883 	iommu_unmap(domain, iova, sram_size);
1884 
1885 	return ret;
1886 }
1887 
rkvenc2_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1888 static int rkvenc2_iommu_fault_handle(struct iommu_domain *iommu,
1889 				      struct device *iommu_dev,
1890 				      unsigned long iova, int status, void *arg)
1891 {
1892 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
1893 	struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1894 	struct mpp_task *mpp_task = mpp->cur_task;
1895 
1896 	dev_info(mpp->dev, "core %d page fault found dchs %08x\n",
1897 		 mpp->core_id, mpp_read_relaxed(&enc->mpp, DCHS_REG_OFFSET));
1898 
1899 	if (mpp_task)
1900 		mpp_task_dump_mem_region(mpp, mpp_task);
1901 
1902 	return 0;
1903 }
1904 
rkvenc_core_probe(struct platform_device * pdev)1905 static int rkvenc_core_probe(struct platform_device *pdev)
1906 {
1907 	int ret = 0;
1908 	struct device *dev = &pdev->dev;
1909 	struct rkvenc_dev *enc = NULL;
1910 	struct mpp_dev *mpp = NULL;
1911 
1912 	enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1913 	if (!enc)
1914 		return -ENOMEM;
1915 
1916 	mpp = &enc->mpp;
1917 	platform_set_drvdata(pdev, mpp);
1918 
1919 	if (pdev->dev.of_node) {
1920 		struct device_node *np = pdev->dev.of_node;
1921 		const struct of_device_id *match = NULL;
1922 
1923 		match = of_match_node(mpp_rkvenc_dt_match, np);
1924 		if (match)
1925 			mpp->var = (struct mpp_dev_var *)match->data;
1926 
1927 		mpp->core_id = of_alias_get_id(np, "rkvenc");
1928 	}
1929 
1930 	ret = mpp_dev_probe(mpp, pdev);
1931 	if (ret)
1932 		return ret;
1933 
1934 	rkvenc2_alloc_rcbbuf(pdev, enc);
1935 
1936 	/* attach core to ccu */
1937 	ret = rkvenc_attach_ccu(dev, enc);
1938 	if (ret) {
1939 		dev_err(dev, "attach ccu failed\n");
1940 		return ret;
1941 	}
1942 
1943 	ret = devm_request_threaded_irq(dev, mpp->irq,
1944 					mpp_dev_irq,
1945 					mpp_dev_isr_sched,
1946 					IRQF_SHARED,
1947 					dev_name(dev), mpp);
1948 	if (ret) {
1949 		dev_err(dev, "register interrupter runtime failed\n");
1950 		return -EINVAL;
1951 	}
1952 	mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1953 	enc->hw_info = to_rkvenc_info(mpp->var->hw_info);
1954 	mpp->iommu_info->hdl = rkvenc2_iommu_fault_handle;
1955 	rkvenc_procfs_init(mpp);
1956 	rkvenc_procfs_ccu_init(mpp);
1957 
1958 	/* if current is main-core, register current device to mpp service */
1959 	if (mpp == enc->ccu->main_core)
1960 		mpp_dev_register_srv(mpp, mpp->srv);
1961 
1962 	return 0;
1963 }
1964 
rkvenc_probe_default(struct platform_device * pdev)1965 static int rkvenc_probe_default(struct platform_device *pdev)
1966 {
1967 	int ret = 0;
1968 	struct device *dev = &pdev->dev;
1969 	struct rkvenc_dev *enc = NULL;
1970 	struct mpp_dev *mpp = NULL;
1971 	const struct of_device_id *match = NULL;
1972 
1973 	enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1974 	if (!enc)
1975 		return -ENOMEM;
1976 
1977 	mpp = &enc->mpp;
1978 	platform_set_drvdata(pdev, mpp);
1979 
1980 	if (pdev->dev.of_node) {
1981 		match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node);
1982 		if (match)
1983 			mpp->var = (struct mpp_dev_var *)match->data;
1984 	}
1985 
1986 	ret = mpp_dev_probe(mpp, pdev);
1987 	if (ret)
1988 		return ret;
1989 
1990 	rkvenc2_alloc_rcbbuf(pdev, enc);
1991 
1992 	ret = devm_request_threaded_irq(dev, mpp->irq,
1993 					mpp_dev_irq,
1994 					mpp_dev_isr_sched,
1995 					IRQF_SHARED,
1996 					dev_name(dev), mpp);
1997 	if (ret) {
1998 		dev_err(dev, "register interrupter runtime failed\n");
1999 		goto failed_get_irq;
2000 	}
2001 	mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
2002 	enc->hw_info = to_rkvenc_info(mpp->var->hw_info);
2003 	rkvenc_procfs_init(mpp);
2004 	mpp_dev_register_srv(mpp, mpp->srv);
2005 
2006 	return 0;
2007 
2008 failed_get_irq:
2009 	mpp_dev_remove(mpp);
2010 
2011 	return ret;
2012 }
2013 
rkvenc_probe(struct platform_device * pdev)2014 static int rkvenc_probe(struct platform_device *pdev)
2015 {
2016 	int ret = 0;
2017 	struct device *dev = &pdev->dev;
2018 	struct device_node *np = dev->of_node;
2019 
2020 	dev_info(dev, "probing start\n");
2021 
2022 	if (strstr(np->name, "ccu"))
2023 		ret = rkvenc_ccu_probe(pdev);
2024 	else if (strstr(np->name, "core"))
2025 		ret = rkvenc_core_probe(pdev);
2026 	else
2027 		ret = rkvenc_probe_default(pdev);
2028 
2029 	dev_info(dev, "probing finish\n");
2030 
2031 	return ret;
2032 }
2033 
rkvenc2_free_rcbbuf(struct platform_device * pdev,struct rkvenc_dev * enc)2034 static int rkvenc2_free_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc)
2035 {
2036 	struct iommu_domain *domain;
2037 
2038 	if (enc->rcb_page) {
2039 		size_t page_size = PAGE_ALIGN(enc->sram_used - enc->sram_size);
2040 
2041 		__free_pages(enc->rcb_page, get_order(page_size));
2042 	}
2043 	if (enc->sram_iova) {
2044 		domain = enc->mpp.iommu_info->domain;
2045 		iommu_unmap(domain, enc->sram_iova, enc->sram_used);
2046 	}
2047 
2048 	return 0;
2049 }
2050 
rkvenc_remove(struct platform_device * pdev)2051 static int rkvenc_remove(struct platform_device *pdev)
2052 {
2053 	struct device *dev = &pdev->dev;
2054 	struct device_node *np = dev->of_node;
2055 
2056 	if (strstr(np->name, "ccu")) {
2057 		dev_info(dev, "remove ccu\n");
2058 	} else if (strstr(np->name, "core")) {
2059 		struct mpp_dev *mpp = dev_get_drvdata(dev);
2060 		struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
2061 
2062 		dev_info(dev, "remove core\n");
2063 		if (enc->ccu) {
2064 			mutex_lock(&enc->ccu->lock);
2065 			list_del_init(&enc->core_link);
2066 			enc->ccu->core_num--;
2067 			mutex_unlock(&enc->ccu->lock);
2068 		}
2069 		rkvenc2_free_rcbbuf(pdev, enc);
2070 		mpp_dev_remove(&enc->mpp);
2071 		rkvenc_procfs_remove(&enc->mpp);
2072 	} else {
2073 		struct mpp_dev *mpp = dev_get_drvdata(dev);
2074 		struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
2075 
2076 		dev_info(dev, "remove device\n");
2077 		rkvenc2_free_rcbbuf(pdev, enc);
2078 		mpp_dev_remove(mpp);
2079 		rkvenc_procfs_remove(mpp);
2080 	}
2081 
2082 	return 0;
2083 }
2084 
rkvenc_shutdown(struct platform_device * pdev)2085 static void rkvenc_shutdown(struct platform_device *pdev)
2086 {
2087 	struct device *dev = &pdev->dev;
2088 
2089 	if (!strstr(dev_name(dev), "ccu"))
2090 		mpp_dev_shutdown(pdev);
2091 }
2092 
2093 struct platform_driver rockchip_rkvenc2_driver = {
2094 	.probe = rkvenc_probe,
2095 	.remove = rkvenc_remove,
2096 	.shutdown = rkvenc_shutdown,
2097 	.driver = {
2098 		.name = RKVENC_DRIVER_NAME,
2099 		.of_match_table = of_match_ptr(mpp_rkvenc_dt_match),
2100 	},
2101 };
2102