1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11
12 #include <asm/cacheflush.h>
13 #include <linux/delay.h>
14 #include <linux/devfreq.h>
15 #include <linux/devfreq_cooling.h>
16 #include <linux/iopoll.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/of_platform.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/uaccess.h>
25 #include <linux/regmap.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/proc_fs.h>
28 #include <linux/nospec.h>
29 #include <linux/workqueue.h>
30 #include <soc/rockchip/pm_domains.h>
31 #include <soc/rockchip/rockchip_ipa.h>
32 #include <soc/rockchip/rockchip_opp_select.h>
33 #include <soc/rockchip/rockchip_system_monitor.h>
34
35 #ifdef CONFIG_PM_DEVFREQ
36 #include "../../../devfreq/governor.h"
37 #endif
38
39 #include "mpp_debug.h"
40 #include "mpp_iommu.h"
41 #include "mpp_common.h"
42
43 #define RKVENC_DRIVER_NAME "mpp_rkvenc"
44
45 #define IOMMU_GET_BUS_ID(x) (((x) >> 6) & 0x1f)
46 #define IOMMU_PAGE_SIZE SZ_4K
47
48 #define RKVENC_SESSION_MAX_BUFFERS 40
49 /* The maximum registers number of all the version */
50 #define RKVENC_REG_L1_NUM 780
51 #define RKVENC_REG_L2_NUM 320
52 #define RKVENC_REG_START_INDEX 0
53 #define RKVENC_REG_END_INDEX 131
54 /* rkvenc register info */
55 #define RKVENC_REG_NUM 112
56 #define RKVENC_REG_HW_ID_INDEX 0
57 #define RKVENC_REG_CLR_CACHE_BASE 0x884
58
59 #define RKVENC_ENC_START_INDEX 1
60 #define RKVENC_ENC_START_BASE 0x004
61 #define RKVENC_LKT_NUM(x) ((x) & 0xff)
62 #define RKVENC_CMD(x) (((x) & 0x3) << 8)
63 #define RKVENC_CLK_GATE_EN BIT(16)
64 #define RKVENC_CLR_BASE 0x008
65 #define RKVENC_SAFE_CLR_BIT BIT(0)
66 #define RKVENC_FORCE_CLR_BIT BIT(1)
67 #define RKVENC_LKT_ADDR_BASE 0x00c
68
69 #define RKVENC_INT_EN_INDEX 4
70 #define RKVENC_INT_EN_BASE 0x010
71 #define RKVENC_INT_MSK_BASE 0x014
72 #define RKVENC_INT_CLR_BASE 0x018
73 #define RKVENC_INT_STATUS_INDEX 7
74 #define RKVENC_INT_STATUS_BASE 0x01c
75 /* bit for int mask clr status */
76 #define RKVENC_BIT_ONE_FRAME BIT(0)
77 #define RKVENC_BIT_LINK_TABLE BIT(1)
78 #define RKVENC_BIT_SAFE_CLEAR BIT(2)
79 #define RKVENC_BIT_ONE_SLICE BIT(3)
80 #define RKVENC_BIT_STREAM_OVERFLOW BIT(4)
81 #define RKVENC_BIT_AXI_WRITE_FIFO_FULL BIT(5)
82 #define RKVENC_BIT_AXI_WRITE_CHANNEL BIT(6)
83 #define RKVENC_BIT_AXI_READ_CHANNEL BIT(7)
84 #define RKVENC_BIT_TIMEOUT BIT(8)
85 #define RKVENC_INT_ERROR_BITS ((RKVENC_BIT_STREAM_OVERFLOW) |\
86 (RKVENC_BIT_AXI_WRITE_FIFO_FULL) |\
87 (RKVENC_BIT_AXI_WRITE_CHANNEL) |\
88 (RKVENC_BIT_AXI_READ_CHANNEL) |\
89 (RKVENC_BIT_TIMEOUT))
90 #define RKVENC_ENC_RSL_INDEX 12
91 #define RKVENC_ENC_PIC_INDEX 13
92 #define RKVENC_ENC_PIC_BASE 0x034
93 #define RKVENC_GET_FORMAT(x) ((x) & 0x1)
94 #define RKVENC_ENC_PIC_NODE_INT_EN BIT(31)
95 #define RKVENC_ENC_WDG_BASE 0x038
96 #define RKVENC_PPLN_ENC_LMT(x) ((x) & 0xf)
97 #define RKVENC_OSD_CFG_BASE 0x1c0
98 #define RKVENC_OSD_PLT_TYPE BIT(17)
99 #define RKVENC_OSD_CLK_SEL_BIT BIT(16)
100 #define RKVENC_STATUS_BASE(i) (0x210 + (4 * (i)))
101 #define RKVENC_BSL_STATUS_BASE 0x210
102 #define RKVENC_BITSTREAM_LENGTH(x) ((x) & 0x7FFFFFF)
103 #define RKVENC_ENC_STATUS_BASE 0x220
104 #define RKVENC_ENC_STATUS_ENC(x) (((x) >> 0) & 0x3)
105 #define RKVENC_LKT_STATUS_BASE 0x224
106 #define RKVENC_LKT_STATUS_FNUM_ENC(x) (((x) >> 0) & 0xff)
107 #define RKVENC_LKT_STATUS_FNUM_CFG(x) (((x) >> 8) & 0xff)
108 #define RKVENC_LKT_STATUS_FNUM_INT(x) (((x) >> 16) & 0xff)
109 #define RKVENC_OSD_PLT_BASE(i) (0x400 + (4 * (i)))
110
111 #define RKVENC_L2_OFFSET (0x10000)
112 #define RKVENC_L2_ADDR_BASE (0x3f0)
113 #define RKVENC_L2_WRITE_BASE (0x3f4)
114 #define RKVENC_L2_READ_BASE (0x3f8)
115 #define RKVENC_L2_BURST_TYPE BIT(0)
116
117 #define RKVENC_GET_WIDTH(x) (((x & 0x1ff) + 1) << 3)
118 #define RKVENC_GET_HEIGHT(x) ((((x >> 16) & 0x1ff) + 1) << 3)
119
120 #define to_rkvenc_task(ctx) \
121 container_of(ctx, struct rkvenc_task, mpp_task)
122 #define to_rkvenc_dev(dev) \
123 container_of(dev, struct rkvenc_dev, mpp)
124
125 enum rkvenc_format_type {
126 RKVENC_FMT_H264E = 0,
127 RKVENC_FMT_H265E = 1,
128 RKVENC_FMT_BUTT,
129 };
130
131 enum RKVENC_MODE {
132 RKVENC_MODE_NONE,
133 RKVENC_MODE_ONEFRAME,
134 RKVENC_MODE_LINKTABLE_FIX,
135 RKVENC_MODE_LINKTABLE_UPDATE,
136 RKVENC_MODE_BUTT
137 };
138
139 struct rkvenc_task {
140 struct mpp_task mpp_task;
141
142 int link_flags;
143 int fmt;
144 enum RKVENC_MODE link_mode;
145
146 /* level 1 register setting */
147 u32 reg_offset;
148 u32 reg_num;
149 u32 reg[RKVENC_REG_L1_NUM];
150 u32 width;
151 u32 height;
152 u32 pixels;
153 /* level 2 register setting */
154 u32 reg_l2_offset;
155 u32 reg_l2_num;
156 u32 reg_l2[RKVENC_REG_L2_NUM];
157 /* register offset info */
158 struct reg_offset_info off_inf;
159
160 enum MPP_CLOCK_MODE clk_mode;
161 u32 irq_status;
162 /* req for current task */
163 u32 w_req_cnt;
164 struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
165 u32 r_req_cnt;
166 struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
167 };
168
169 struct rkvenc_session_priv {
170 struct rw_semaphore rw_sem;
171 /* codec info from user */
172 struct {
173 /* show mode */
174 u32 flag;
175 /* item data */
176 u64 val;
177 } codec_info[ENC_INFO_BUTT];
178 };
179
180 struct rkvenc_dev {
181 struct mpp_dev mpp;
182
183 struct mpp_clk_info aclk_info;
184 struct mpp_clk_info hclk_info;
185 struct mpp_clk_info core_clk_info;
186 u32 default_max_load;
187 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
188 struct proc_dir_entry *procfs;
189 #endif
190 struct reset_control *rst_a;
191 struct reset_control *rst_h;
192 struct reset_control *rst_core;
193
194 #ifdef CONFIG_PM_DEVFREQ
195 struct regulator *vdd;
196 struct devfreq *devfreq;
197 unsigned long volt;
198 unsigned long core_rate_hz;
199 unsigned long core_last_rate_hz;
200 struct ipa_power_model_data *model_data;
201 struct thermal_cooling_device *devfreq_cooling;
202 struct monitor_dev_info *mdev_info;
203 #endif
204 /* for iommu pagefault handle */
205 struct work_struct iommu_work;
206 struct workqueue_struct *iommu_wq;
207 struct page *aux_page;
208 unsigned long aux_iova;
209 unsigned long fault_iova;
210 };
211
212 struct link_table_elem {
213 dma_addr_t lkt_dma_addr;
214 void *lkt_cpu_addr;
215 u32 lkt_index;
216 struct list_head list;
217 };
218
219 static struct mpp_hw_info rkvenc_hw_info = {
220 .reg_num = RKVENC_REG_NUM,
221 .reg_id = RKVENC_REG_HW_ID_INDEX,
222 .reg_en = RKVENC_ENC_START_INDEX,
223 .reg_start = RKVENC_REG_START_INDEX,
224 .reg_end = RKVENC_REG_END_INDEX,
225 };
226
227 /*
228 * file handle translate information
229 */
230 static const u16 trans_tbl_h264e[] = {
231 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
232 80, 81, 82, 83, 84, 85, 86, 124, 125,
233 126, 127, 128, 129, 130, 131
234 };
235
236 static const u16 trans_tbl_h265e[] = {
237 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
238 80, 81, 82, 83, 84, 85, 86, 124, 125,
239 126, 127, 128, 129, 130, 131, 95, 96
240 };
241
242 static struct mpp_trans_info trans_rk_rkvenc[] = {
243 [RKVENC_FMT_H264E] = {
244 .count = ARRAY_SIZE(trans_tbl_h264e),
245 .table = trans_tbl_h264e,
246 },
247 [RKVENC_FMT_H265E] = {
248 .count = ARRAY_SIZE(trans_tbl_h265e),
249 .table = trans_tbl_h265e,
250 },
251 };
252
rkvenc_extract_task_msg(struct rkvenc_task * task,struct mpp_task_msgs * msgs)253 static int rkvenc_extract_task_msg(struct rkvenc_task *task,
254 struct mpp_task_msgs *msgs)
255 {
256 u32 i;
257 int ret;
258 struct mpp_request *req;
259
260 for (i = 0; i < msgs->req_cnt; i++) {
261 req = &msgs->reqs[i];
262 if (!req->size)
263 continue;
264
265 switch (req->cmd) {
266 case MPP_CMD_SET_REG_WRITE: {
267 int req_base;
268 int max_size;
269 u8 *dst = NULL;
270
271 if (req->offset >= RKVENC_L2_OFFSET) {
272 req_base = RKVENC_L2_OFFSET;
273 max_size = sizeof(task->reg_l2);
274 dst = (u8 *)task->reg_l2;
275 } else {
276 req_base = 0;
277 max_size = sizeof(task->reg);
278 dst = (u8 *)task->reg;
279 }
280
281 ret = mpp_check_req(req, req_base, max_size,
282 0, max_size);
283 if (ret)
284 return ret;
285
286 dst += req->offset - req_base;
287 if (copy_from_user(dst, req->data, req->size)) {
288 mpp_err("copy_from_user reg failed\n");
289 return -EIO;
290 }
291 memcpy(&task->w_reqs[task->w_req_cnt++],
292 req, sizeof(*req));
293 } break;
294 case MPP_CMD_SET_REG_READ: {
295 int req_base;
296 int max_size;
297
298 if (req->offset >= RKVENC_L2_OFFSET) {
299 req_base = RKVENC_L2_OFFSET;
300 max_size = sizeof(task->reg_l2);
301 } else {
302 req_base = 0;
303 max_size = sizeof(task->reg);
304 }
305
306 ret = mpp_check_req(req, req_base, max_size,
307 0, max_size);
308 if (ret)
309 return ret;
310
311 memcpy(&task->r_reqs[task->r_req_cnt++],
312 req, sizeof(*req));
313 } break;
314 case MPP_CMD_SET_REG_ADDR_OFFSET: {
315 mpp_extract_reg_offset_info(&task->off_inf, req);
316 } break;
317 default:
318 break;
319 }
320 }
321 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt=%d, r_req_cnt=%d\n",
322 task->w_req_cnt, task->r_req_cnt);
323
324 return 0;
325 }
326
rkvenc_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)327 static void *rkvenc_alloc_task(struct mpp_session *session,
328 struct mpp_task_msgs *msgs)
329 {
330 int ret;
331 struct mpp_task *mpp_task = NULL;
332 struct rkvenc_task *task = NULL;
333 struct mpp_dev *mpp = session->mpp;
334
335 mpp_debug_enter();
336
337 task = kzalloc(sizeof(*task), GFP_KERNEL);
338 if (!task)
339 return NULL;
340
341 mpp_task = &task->mpp_task;
342 mpp_task_init(session, mpp_task);
343 mpp_task->hw_info = mpp->var->hw_info;
344 mpp_task->reg = task->reg;
345 /* extract reqs for current task */
346 ret = rkvenc_extract_task_msg(task, msgs);
347 if (ret)
348 goto fail;
349 task->fmt = RKVENC_GET_FORMAT(task->reg[RKVENC_ENC_PIC_INDEX]);
350 /* process fd in register */
351 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
352 ret = mpp_translate_reg_address(session,
353 mpp_task, task->fmt,
354 task->reg, &task->off_inf);
355 if (ret)
356 goto fail;
357 mpp_translate_reg_offset_info(mpp_task,
358 &task->off_inf, task->reg);
359 }
360 task->link_mode = RKVENC_MODE_ONEFRAME;
361 task->clk_mode = CLK_MODE_NORMAL;
362 /* get resolution info */
363 task->width = RKVENC_GET_WIDTH(task->reg[RKVENC_ENC_RSL_INDEX]);
364 task->height = RKVENC_GET_HEIGHT(task->reg[RKVENC_ENC_RSL_INDEX]);
365 task->pixels = task->width * task->height;
366 mpp_debug(DEBUG_TASK_INFO, "width=%d, height=%d\n", task->width, task->height);
367
368 mpp_debug_leave();
369
370 return mpp_task;
371
372 fail:
373 mpp_task_dump_mem_region(mpp, mpp_task);
374 mpp_task_dump_reg(mpp, mpp_task);
375 mpp_task_finalize(session, mpp_task);
376 kfree(task);
377 return NULL;
378 }
379
rkvenc_write_req_l2(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)380 static int rkvenc_write_req_l2(struct mpp_dev *mpp,
381 u32 *regs,
382 u32 start_idx, u32 end_idx)
383 {
384 int i;
385
386 for (i = start_idx; i < end_idx; i++) {
387 int reg = i * sizeof(u32);
388
389 mpp_debug(DEBUG_SET_REG_L2, "reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
390 writel_relaxed(reg, mpp->reg_base + RKVENC_L2_ADDR_BASE);
391 writel_relaxed(regs[i], mpp->reg_base + RKVENC_L2_WRITE_BASE);
392 }
393
394 return 0;
395 }
396
rkvenc_read_req_l2(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)397 static int rkvenc_read_req_l2(struct mpp_dev *mpp,
398 u32 *regs,
399 u32 start_idx, u32 end_idx)
400 {
401 int i;
402
403 for (i = start_idx; i < end_idx; i++) {
404 int reg = i * sizeof(u32);
405
406 writel_relaxed(reg, mpp->reg_base + RKVENC_L2_ADDR_BASE);
407 regs[i] = readl_relaxed(mpp->reg_base + RKVENC_L2_READ_BASE);
408 mpp_debug(DEBUG_GET_REG_L2, "reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
409 }
410
411 return 0;
412 }
413
rkvenc_write_req_backward(struct mpp_dev * mpp,u32 * regs,s32 start_idx,s32 end_idx,s32 en_idx)414 static int rkvenc_write_req_backward(struct mpp_dev *mpp, u32 *regs,
415 s32 start_idx, s32 end_idx, s32 en_idx)
416 {
417 int i;
418
419 for (i = end_idx - 1; i >= start_idx; i--) {
420 if (i == en_idx)
421 continue;
422 mpp_write_relaxed(mpp, i * sizeof(u32), regs[i]);
423 }
424
425 return 0;
426 }
427
rkvenc_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)428 static int rkvenc_run(struct mpp_dev *mpp,
429 struct mpp_task *mpp_task)
430 {
431 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
432
433 mpp_debug_enter();
434
435 /* clear cache */
436 mpp_write_relaxed(mpp, RKVENC_REG_CLR_CACHE_BASE, 1);
437 switch (task->link_mode) {
438 case RKVENC_MODE_ONEFRAME: {
439 int i;
440 struct mpp_request *req;
441 u32 reg_en = mpp_task->hw_info->reg_en;
442
443 /*
444 * Tips: ensure osd plt clock is 0 before setting register,
445 * otherwise, osd setting will not work
446 */
447 mpp_write_relaxed(mpp, RKVENC_OSD_CFG_BASE, 0);
448 /* ensure clear finish */
449 wmb();
450 for (i = 0; i < task->w_req_cnt; i++) {
451 int s, e;
452
453 req = &task->w_reqs[i];
454 /* set register L2 */
455 if (req->offset >= RKVENC_L2_OFFSET) {
456 int off = req->offset - RKVENC_L2_OFFSET;
457
458 s = off / sizeof(u32);
459 e = s + req->size / sizeof(u32);
460 rkvenc_write_req_l2(mpp, task->reg_l2, s, e);
461 } else {
462 /* set register L1 */
463 s = req->offset / sizeof(u32);
464 e = s + req->size / sizeof(u32);
465 /* NOTE: for rkvenc, register should set backward */
466 rkvenc_write_req_backward(mpp, task->reg, s, e, reg_en);
467 }
468 }
469 /* init current task */
470 mpp->cur_task = mpp_task;
471 /* Flush the register before the start the device */
472 wmb();
473 mpp_write(mpp, RKVENC_ENC_START_BASE, task->reg[reg_en]);
474 } break;
475 case RKVENC_MODE_LINKTABLE_FIX:
476 case RKVENC_MODE_LINKTABLE_UPDATE:
477 default: {
478 mpp_err("link_mode %d failed.\n", task->link_mode);
479 } break;
480 }
481
482 mpp_debug_leave();
483
484 return 0;
485 }
486
rkvenc_irq(struct mpp_dev * mpp)487 static int rkvenc_irq(struct mpp_dev *mpp)
488 {
489 mpp_debug_enter();
490
491 mpp->irq_status = mpp_read(mpp, RKVENC_INT_STATUS_BASE);
492 if (!mpp->irq_status)
493 return IRQ_NONE;
494
495 mpp_write(mpp, RKVENC_INT_MSK_BASE, 0x100);
496 mpp_write(mpp, RKVENC_INT_CLR_BASE, 0xffffffff);
497 mpp_write(mpp, RKVENC_INT_STATUS_BASE, 0);
498
499 mpp_debug_leave();
500
501 return IRQ_WAKE_THREAD;
502 }
503
rkvenc_isr(struct mpp_dev * mpp)504 static int rkvenc_isr(struct mpp_dev *mpp)
505 {
506 struct rkvenc_task *task = NULL;
507 struct mpp_task *mpp_task = mpp->cur_task;
508 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
509
510 mpp_debug_enter();
511
512 /* FIXME use a spin lock here */
513 if (!mpp_task) {
514 dev_err(mpp->dev, "no current task\n");
515 return IRQ_HANDLED;
516 }
517
518 mpp_time_diff(mpp_task);
519 mpp->cur_task = NULL;
520 task = to_rkvenc_task(mpp_task);
521 task->irq_status = mpp->irq_status;
522 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
523
524 if (task->irq_status & RKVENC_INT_ERROR_BITS) {
525 atomic_inc(&mpp->reset_request);
526 /* dump register */
527 mpp_debug(DEBUG_DUMP_ERR_REG, "irq_status: %08x\n", task->irq_status);
528 mpp_task_dump_hw_reg(mpp);
529 }
530
531 /* unmap reserve buffer */
532 if (enc->aux_iova != -1) {
533 iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
534 enc->aux_iova = -1;
535 }
536
537 mpp_task_finish(mpp_task->session, mpp_task);
538
539 mpp_debug_leave();
540
541 return IRQ_HANDLED;
542 }
543
rkvenc_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)544 static int rkvenc_finish(struct mpp_dev *mpp,
545 struct mpp_task *mpp_task)
546 {
547 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
548
549 mpp_debug_enter();
550
551 switch (task->link_mode) {
552 case RKVENC_MODE_ONEFRAME: {
553 u32 i;
554 struct mpp_request *req;
555
556 for (i = 0; i < task->r_req_cnt; i++) {
557 int s, e;
558
559 req = &task->r_reqs[i];
560 if (req->offset >= RKVENC_L2_OFFSET) {
561 int off = req->offset - RKVENC_L2_OFFSET;
562
563 s = off / sizeof(u32);
564 e = s + req->size / sizeof(u32);
565 rkvenc_read_req_l2(mpp, task->reg_l2, s, e);
566 } else {
567 s = req->offset / sizeof(u32);
568 e = s + req->size / sizeof(u32);
569 mpp_read_req(mpp, task->reg, s, e);
570 }
571 }
572 task->reg[RKVENC_INT_STATUS_INDEX] = task->irq_status;
573 } break;
574 case RKVENC_MODE_LINKTABLE_FIX:
575 case RKVENC_MODE_LINKTABLE_UPDATE:
576 default: {
577 mpp_err("link_mode %d failed.\n", task->link_mode);
578 } break;
579 }
580
581 mpp_debug_leave();
582
583 return 0;
584 }
585
rkvenc_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)586 static int rkvenc_result(struct mpp_dev *mpp,
587 struct mpp_task *mpp_task,
588 struct mpp_task_msgs *msgs)
589 {
590 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
591
592 mpp_debug_enter();
593
594 switch (task->link_mode) {
595 case RKVENC_MODE_ONEFRAME: {
596 u32 i;
597 struct mpp_request *req;
598
599 for (i = 0; i < task->r_req_cnt; i++) {
600 req = &task->r_reqs[i];
601 /* set register L2 */
602 if (req->offset >= RKVENC_L2_OFFSET) {
603 int off = req->offset - RKVENC_L2_OFFSET;
604
605 if (copy_to_user(req->data,
606 (u8 *)task->reg_l2 + off,
607 req->size)) {
608 mpp_err("copy_to_user reg_l2 fail\n");
609 return -EIO;
610 }
611 } else {
612 if (copy_to_user(req->data,
613 (u8 *)task->reg + req->offset,
614 req->size)) {
615 mpp_err("copy_to_user reg fail\n");
616 return -EIO;
617 }
618 }
619 }
620 } break;
621 case RKVENC_MODE_LINKTABLE_FIX:
622 case RKVENC_MODE_LINKTABLE_UPDATE:
623 default: {
624 mpp_err("link_mode %d failed.\n", task->link_mode);
625 } break;
626 }
627
628 return 0;
629 }
630
rkvenc_free_task(struct mpp_session * session,struct mpp_task * mpp_task)631 static int rkvenc_free_task(struct mpp_session *session,
632 struct mpp_task *mpp_task)
633 {
634 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
635
636 mpp_task_finalize(session, mpp_task);
637 kfree(task);
638
639 return 0;
640 }
641
rkvenc_control(struct mpp_session * session,struct mpp_request * req)642 static int rkvenc_control(struct mpp_session *session, struct mpp_request *req)
643 {
644 switch (req->cmd) {
645 case MPP_CMD_SEND_CODEC_INFO: {
646 int i;
647 int cnt;
648 struct codec_info_elem elem;
649 struct rkvenc_session_priv *priv;
650
651 if (!session || !session->priv) {
652 mpp_err("session info null\n");
653 return -EINVAL;
654 }
655 priv = session->priv;
656
657 cnt = req->size / sizeof(elem);
658 cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
659 mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
660 for (i = 0; i < cnt; i++) {
661 if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
662 mpp_err("copy_from_user failed\n");
663 continue;
664 }
665 if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT &&
666 elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
667 elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
668 priv->codec_info[elem.type].flag = elem.flag;
669 priv->codec_info[elem.type].val = elem.data;
670 } else {
671 mpp_err("codec info invalid, type %d, flag %d\n",
672 elem.type, elem.flag);
673 }
674 }
675 } break;
676 default: {
677 mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
678 } break;
679 }
680
681 return 0;
682 }
683
rkvenc_free_session(struct mpp_session * session)684 static int rkvenc_free_session(struct mpp_session *session)
685 {
686 if (session && session->priv) {
687 kfree(session->priv);
688 session->priv = NULL;
689 }
690
691 return 0;
692 }
693
rkvenc_init_session(struct mpp_session * session)694 static int rkvenc_init_session(struct mpp_session *session)
695 {
696 struct rkvenc_session_priv *priv;
697
698 if (!session) {
699 mpp_err("session is null\n");
700 return -EINVAL;
701 }
702
703 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
704 if (!priv)
705 return -ENOMEM;
706
707 init_rwsem(&priv->rw_sem);
708 session->priv = priv;
709
710 return 0;
711 }
712
713 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvenc_procfs_remove(struct mpp_dev * mpp)714 static int rkvenc_procfs_remove(struct mpp_dev *mpp)
715 {
716 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
717
718 if (enc->procfs) {
719 proc_remove(enc->procfs);
720 enc->procfs = NULL;
721 }
722
723 return 0;
724 }
725
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)726 static int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
727 {
728 int i;
729 struct rkvenc_session_priv *priv = session->priv;
730
731 down_read(&priv->rw_sem);
732 /* item name */
733 seq_puts(seq, "------------------------------------------------------");
734 seq_puts(seq, "------------------------------------------------------\n");
735 seq_printf(seq, "|%8s|", (const char *)"session");
736 seq_printf(seq, "%8s|", (const char *)"device");
737 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
738 bool show = priv->codec_info[i].flag;
739
740 if (show)
741 seq_printf(seq, "%8s|", enc_info_item_name[i]);
742 }
743 seq_puts(seq, "\n");
744 /* item data*/
745 seq_printf(seq, "|%8p|", session);
746 seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
747 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
748 u32 flag = priv->codec_info[i].flag;
749
750 if (!flag)
751 continue;
752 if (flag == CODEC_INFO_FLAG_NUMBER) {
753 u32 data = priv->codec_info[i].val;
754
755 seq_printf(seq, "%8d|", data);
756 } else if (flag == CODEC_INFO_FLAG_STRING) {
757 const char *name = (const char *)&priv->codec_info[i].val;
758
759 seq_printf(seq, "%8s|", name);
760 } else {
761 seq_printf(seq, "%8s|", (const char *)"null");
762 }
763 }
764 seq_puts(seq, "\n");
765 up_read(&priv->rw_sem);
766
767 return 0;
768 }
769
rkvenc_show_session_info(struct seq_file * seq,void * offset)770 static int rkvenc_show_session_info(struct seq_file *seq, void *offset)
771 {
772 struct mpp_session *session = NULL, *n;
773 struct mpp_dev *mpp = seq->private;
774
775 mutex_lock(&mpp->srv->session_lock);
776 list_for_each_entry_safe(session, n,
777 &mpp->srv->session_list,
778 session_link) {
779 if (session->device_type != MPP_DEVICE_RKVENC)
780 continue;
781 if (!session->priv)
782 continue;
783 if (mpp->dev_ops->dump_session)
784 mpp->dev_ops->dump_session(session, seq);
785 }
786 mutex_unlock(&mpp->srv->session_lock);
787
788 return 0;
789 }
790
rkvenc_procfs_init(struct mpp_dev * mpp)791 static int rkvenc_procfs_init(struct mpp_dev *mpp)
792 {
793 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
794
795 enc->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
796 if (IS_ERR_OR_NULL(enc->procfs)) {
797 mpp_err("failed on open procfs\n");
798 enc->procfs = NULL;
799 return -EIO;
800 }
801 /* for debug */
802 mpp_procfs_create_u32("aclk", 0644,
803 enc->procfs, &enc->aclk_info.debug_rate_hz);
804 mpp_procfs_create_u32("clk_core", 0644,
805 enc->procfs, &enc->core_clk_info.debug_rate_hz);
806 mpp_procfs_create_u32("session_buffers", 0644,
807 enc->procfs, &mpp->session_max_buffers);
808 /* for show session info */
809 proc_create_single_data("sessions-info", 0444,
810 enc->procfs, rkvenc_show_session_info, mpp);
811
812 return 0;
813 }
814 #else
rkvenc_procfs_remove(struct mpp_dev * mpp)815 static inline int rkvenc_procfs_remove(struct mpp_dev *mpp)
816 {
817 return 0;
818 }
819
rkvenc_procfs_init(struct mpp_dev * mpp)820 static inline int rkvenc_procfs_init(struct mpp_dev *mpp)
821 {
822 return 0;
823 }
824
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)825 static inline int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
826 {
827 return 0;
828 }
829 #endif
830
831 #ifdef CONFIG_PM_DEVFREQ
rkvenc_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)832 static int rkvenc_devfreq_target(struct device *dev,
833 unsigned long *freq, u32 flags)
834 {
835 struct dev_pm_opp *opp;
836 unsigned long target_volt, target_freq;
837 int ret = 0;
838
839 struct rkvenc_dev *enc = dev_get_drvdata(dev);
840 struct devfreq *devfreq = enc->devfreq;
841 struct devfreq_dev_status *stat = &devfreq->last_status;
842 unsigned long old_clk_rate = stat->current_frequency;
843
844 opp = devfreq_recommended_opp(dev, freq, flags);
845 if (IS_ERR(opp)) {
846 dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
847 return PTR_ERR(opp);
848 }
849 target_freq = dev_pm_opp_get_freq(opp);
850 target_volt = dev_pm_opp_get_voltage(opp);
851 dev_pm_opp_put(opp);
852
853 if (old_clk_rate == target_freq) {
854 enc->core_last_rate_hz = target_freq;
855 if (enc->volt == target_volt)
856 return ret;
857 ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
858 if (ret) {
859 dev_err(dev, "Cannot set voltage %lu uV\n",
860 target_volt);
861 return ret;
862 }
863 enc->volt = target_volt;
864 return 0;
865 }
866
867 if (old_clk_rate < target_freq) {
868 ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
869 if (ret) {
870 dev_err(dev, "set voltage %lu uV\n", target_volt);
871 return ret;
872 }
873 }
874
875 dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
876 clk_set_rate(enc->core_clk_info.clk, target_freq);
877 stat->current_frequency = target_freq;
878 enc->core_last_rate_hz = target_freq;
879
880 if (old_clk_rate > target_freq) {
881 ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
882 if (ret) {
883 dev_err(dev, "set vol %lu uV\n", target_volt);
884 return ret;
885 }
886 }
887 enc->volt = target_volt;
888
889 return ret;
890 }
891
rkvenc_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)892 static int rkvenc_devfreq_get_dev_status(struct device *dev,
893 struct devfreq_dev_status *stat)
894 {
895 return 0;
896 }
897
rkvenc_devfreq_get_cur_freq(struct device * dev,unsigned long * freq)898 static int rkvenc_devfreq_get_cur_freq(struct device *dev,
899 unsigned long *freq)
900 {
901 struct rkvenc_dev *enc = dev_get_drvdata(dev);
902
903 *freq = enc->core_last_rate_hz;
904
905 return 0;
906 }
907
908 static struct devfreq_dev_profile rkvenc_devfreq_profile = {
909 .target = rkvenc_devfreq_target,
910 .get_dev_status = rkvenc_devfreq_get_dev_status,
911 .get_cur_freq = rkvenc_devfreq_get_cur_freq,
912 };
913
devfreq_venc_ondemand_func(struct devfreq * df,unsigned long * freq)914 static int devfreq_venc_ondemand_func(struct devfreq *df, unsigned long *freq)
915 {
916 struct rkvenc_dev *enc = df->data;
917
918 if (enc)
919 *freq = enc->core_rate_hz;
920 else
921 *freq = df->previous_freq;
922
923 return 0;
924 }
925
devfreq_venc_ondemand_handler(struct devfreq * devfreq,unsigned int event,void * data)926 static int devfreq_venc_ondemand_handler(struct devfreq *devfreq,
927 unsigned int event, void *data)
928 {
929 return 0;
930 }
931
932 static struct devfreq_governor devfreq_venc_ondemand = {
933 .name = "venc_ondemand",
934 .get_target_freq = devfreq_venc_ondemand_func,
935 .event_handler = devfreq_venc_ondemand_handler,
936 };
937
rkvenc_get_static_power(struct devfreq * devfreq,unsigned long voltage)938 static unsigned long rkvenc_get_static_power(struct devfreq *devfreq,
939 unsigned long voltage)
940 {
941 struct rkvenc_dev *enc = devfreq->data;
942
943 if (!enc->model_data)
944 return 0;
945 else
946 return rockchip_ipa_get_static_power(enc->model_data,
947 voltage);
948 }
949
950 static struct devfreq_cooling_power venc_cooling_power_data = {
951 .get_static_power = rkvenc_get_static_power,
952 };
953
954 static struct monitor_dev_profile enc_mdevp = {
955 .type = MONITOR_TPYE_DEV,
956 .low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
957 .high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
958 };
959
rv1126_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)960 static int __maybe_unused rv1126_get_soc_info(struct device *dev,
961 struct device_node *np,
962 int *bin, int *process)
963 {
964 int ret = 0;
965 u8 value = 0;
966
967 if (of_property_match_string(np, "nvmem-cell-names", "performance") >= 0) {
968 ret = rockchip_nvmem_cell_read_u8(np, "performance", &value);
969 if (ret) {
970 dev_err(dev, "Failed to get soc performance value\n");
971 return ret;
972 }
973 if (value == 0x1)
974 *bin = 1;
975 else
976 *bin = 0;
977 }
978 if (*bin >= 0)
979 dev_info(dev, "bin=%d\n", *bin);
980
981 return ret;
982 }
983
984 static const struct rockchip_opp_data __maybe_unused rv1126_rkvenc_opp_data = {
985 .get_soc_info = rv1126_get_soc_info,
986 };
987
988 static const struct of_device_id rockchip_rkvenc_of_match[] = {
989 #ifdef CONFIG_CPU_RV1126
990 {
991 .compatible = "rockchip,rv1109",
992 .data = (void *)&rv1126_rkvenc_opp_data,
993 },
994 {
995 .compatible = "rockchip,rv1126",
996 .data = (void *)&rv1126_rkvenc_opp_data,
997 },
998 #endif
999 {},
1000 };
1001
rkvenc_devfreq_init(struct mpp_dev * mpp)1002 static int rkvenc_devfreq_init(struct mpp_dev *mpp)
1003 {
1004 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1005 struct clk *clk_core = enc->core_clk_info.clk;
1006 struct devfreq_cooling_power *venc_dcp = &venc_cooling_power_data;
1007 struct rockchip_opp_info opp_info = {0};
1008 int ret = 0;
1009
1010 if (!clk_core)
1011 return 0;
1012
1013 enc->vdd = devm_regulator_get_optional(mpp->dev, "venc");
1014 if (IS_ERR_OR_NULL(enc->vdd)) {
1015 if (PTR_ERR(enc->vdd) == -EPROBE_DEFER) {
1016 dev_warn(mpp->dev, "venc regulator not ready, retry\n");
1017
1018 return -EPROBE_DEFER;
1019 }
1020 dev_info(mpp->dev, "no regulator, devfreq is disabled\n");
1021
1022 return 0;
1023 }
1024
1025 rockchip_get_opp_data(rockchip_rkvenc_of_match, &opp_info);
1026 ret = rockchip_init_opp_table(mpp->dev, &opp_info, "leakage", "venc");
1027 if (ret) {
1028 dev_err(mpp->dev, "failed to init_opp_table\n");
1029 return ret;
1030 }
1031
1032 ret = devfreq_add_governor(&devfreq_venc_ondemand);
1033 if (ret) {
1034 dev_err(mpp->dev, "failed to add venc_ondemand governor\n");
1035 goto governor_err;
1036 }
1037
1038 rkvenc_devfreq_profile.initial_freq = clk_get_rate(clk_core);
1039
1040 enc->devfreq = devm_devfreq_add_device(mpp->dev,
1041 &rkvenc_devfreq_profile,
1042 "venc_ondemand", (void *)enc);
1043 if (IS_ERR(enc->devfreq)) {
1044 ret = PTR_ERR(enc->devfreq);
1045 enc->devfreq = NULL;
1046 goto devfreq_err;
1047 }
1048 enc->devfreq->last_status.total_time = 1;
1049 enc->devfreq->last_status.busy_time = 1;
1050
1051 devfreq_register_opp_notifier(mpp->dev, enc->devfreq);
1052
1053 of_property_read_u32(mpp->dev->of_node, "dynamic-power-coefficient",
1054 (u32 *)&venc_dcp->dyn_power_coeff);
1055 enc->model_data = rockchip_ipa_power_model_init(mpp->dev,
1056 "venc_leakage");
1057 if (IS_ERR_OR_NULL(enc->model_data)) {
1058 enc->model_data = NULL;
1059 dev_err(mpp->dev, "failed to initialize power model\n");
1060 } else if (enc->model_data->dynamic_coefficient) {
1061 venc_dcp->dyn_power_coeff =
1062 enc->model_data->dynamic_coefficient;
1063 }
1064 if (!venc_dcp->dyn_power_coeff) {
1065 dev_err(mpp->dev, "failed to get dynamic-coefficient\n");
1066 goto out;
1067 }
1068
1069 enc->devfreq_cooling =
1070 of_devfreq_cooling_register_power(mpp->dev->of_node,
1071 enc->devfreq, venc_dcp);
1072 if (IS_ERR_OR_NULL(enc->devfreq_cooling))
1073 dev_err(mpp->dev, "failed to register cooling device\n");
1074
1075 enc_mdevp.data = enc->devfreq;
1076 enc->mdev_info = rockchip_system_monitor_register(mpp->dev, &enc_mdevp);
1077 if (IS_ERR(enc->mdev_info)) {
1078 dev_dbg(mpp->dev, "without system monitor\n");
1079 enc->mdev_info = NULL;
1080 }
1081
1082 out:
1083
1084 return 0;
1085
1086 devfreq_err:
1087 devfreq_remove_governor(&devfreq_venc_ondemand);
1088 governor_err:
1089 dev_pm_opp_of_remove_table(mpp->dev);
1090
1091 return ret;
1092 }
1093
rkvenc_devfreq_remove(struct mpp_dev * mpp)1094 static int rkvenc_devfreq_remove(struct mpp_dev *mpp)
1095 {
1096 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1097
1098 if (enc->mdev_info)
1099 rockchip_system_monitor_unregister(enc->mdev_info);
1100 if (enc->devfreq) {
1101 devfreq_unregister_opp_notifier(mpp->dev, enc->devfreq);
1102 dev_pm_opp_of_remove_table(mpp->dev);
1103 devfreq_remove_governor(&devfreq_venc_ondemand);
1104 }
1105
1106 return 0;
1107 }
1108 #endif
1109
rkvenc_iommu_handle_work(struct work_struct * work_s)1110 static void rkvenc_iommu_handle_work(struct work_struct *work_s)
1111 {
1112 int ret = 0;
1113 struct rkvenc_dev *enc = container_of(work_s, struct rkvenc_dev, iommu_work);
1114 struct mpp_dev *mpp = &enc->mpp;
1115 unsigned long page_iova = 0;
1116
1117 mpp_debug_enter();
1118
1119 /* avoid another page fault occur after page fault */
1120 mpp_iommu_down_write(mpp->iommu_info);
1121
1122 if (enc->aux_iova != -1) {
1123 iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
1124 enc->aux_iova = -1;
1125 }
1126
1127 page_iova = round_down(enc->fault_iova, SZ_4K);
1128 ret = iommu_map(mpp->iommu_info->domain, page_iova,
1129 page_to_phys(enc->aux_page), IOMMU_PAGE_SIZE,
1130 IOMMU_READ | IOMMU_WRITE);
1131 if (ret)
1132 mpp_err("iommu_map iova %lx error.\n", page_iova);
1133 else
1134 enc->aux_iova = page_iova;
1135
1136 rk_iommu_unmask_irq(mpp->dev);
1137 mpp_iommu_up_write(mpp->iommu_info);
1138
1139 mpp_debug_leave();
1140 }
1141
rkvenc_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1142 static int rkvenc_iommu_fault_handle(struct iommu_domain *iommu,
1143 struct device *iommu_dev,
1144 unsigned long iova, int status, void *arg)
1145 {
1146 struct mpp_dev *mpp = (struct mpp_dev *)arg;
1147 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1148
1149 mpp_debug_enter();
1150 mpp_debug(DEBUG_IOMMU, "IOMMU_GET_BUS_ID(status)=%d\n", IOMMU_GET_BUS_ID(status));
1151 if (IOMMU_GET_BUS_ID(status)) {
1152 enc->fault_iova = iova;
1153 rk_iommu_mask_irq(mpp->dev);
1154 queue_work(enc->iommu_wq, &enc->iommu_work);
1155 }
1156 mpp_debug_leave();
1157
1158 return 0;
1159 }
1160
rkvenc_init(struct mpp_dev * mpp)1161 static int rkvenc_init(struct mpp_dev *mpp)
1162 {
1163 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1164 int ret = 0;
1165
1166 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC];
1167
1168 /* Get clock info from dtsi */
1169 ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
1170 if (ret)
1171 mpp_err("failed on clk_get aclk_vcodec\n");
1172 ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
1173 if (ret)
1174 mpp_err("failed on clk_get hclk_vcodec\n");
1175 ret = mpp_get_clk_info(mpp, &enc->core_clk_info, "clk_core");
1176 if (ret)
1177 mpp_err("failed on clk_get clk_core\n");
1178 /* Get normal max workload from dtsi */
1179 of_property_read_u32(mpp->dev->of_node,
1180 "rockchip,default-max-load",
1181 &enc->default_max_load);
1182 /* Set default rates */
1183 mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1184 mpp_set_clk_info_rate_hz(&enc->core_clk_info, CLK_MODE_DEFAULT, 600 * MHZ);
1185
1186 /* Get reset control from dtsi */
1187 enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1188 if (!enc->rst_a)
1189 mpp_err("No aclk reset resource define\n");
1190 enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1191 if (!enc->rst_h)
1192 mpp_err("No hclk reset resource define\n");
1193 enc->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1194 if (!enc->rst_core)
1195 mpp_err("No core reset resource define\n");
1196
1197 #ifdef CONFIG_PM_DEVFREQ
1198 ret = rkvenc_devfreq_init(mpp);
1199 if (ret)
1200 mpp_err("failed to add venc devfreq\n");
1201 #endif
1202
1203 /* for mmu pagefault */
1204 enc->aux_page = alloc_page(GFP_KERNEL);
1205 if (!enc->aux_page) {
1206 dev_err(mpp->dev, "allocate a page for auxiliary usage\n");
1207 return -ENOMEM;
1208 }
1209 enc->aux_iova = -1;
1210
1211 enc->iommu_wq = create_singlethread_workqueue("iommu_wq");
1212 if (!enc->iommu_wq) {
1213 mpp_err("failed to create workqueue\n");
1214 return -ENOMEM;
1215 }
1216 INIT_WORK(&enc->iommu_work, rkvenc_iommu_handle_work);
1217
1218 mpp->iommu_info->hdl = rkvenc_iommu_fault_handle;
1219
1220 return 0;
1221 }
1222
rkvenc_exit(struct mpp_dev * mpp)1223 static int rkvenc_exit(struct mpp_dev *mpp)
1224 {
1225 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1226
1227 #ifdef CONFIG_PM_DEVFREQ
1228 rkvenc_devfreq_remove(mpp);
1229 #endif
1230
1231 if (enc->aux_page)
1232 __free_page(enc->aux_page);
1233
1234 if (enc->aux_iova != -1) {
1235 iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
1236 enc->aux_iova = -1;
1237 }
1238
1239 if (enc->iommu_wq) {
1240 destroy_workqueue(enc->iommu_wq);
1241 enc->iommu_wq = NULL;
1242 }
1243
1244 return 0;
1245 }
1246
rkvenc_reset(struct mpp_dev * mpp)1247 static int rkvenc_reset(struct mpp_dev *mpp)
1248 {
1249 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1250
1251 mpp_debug_enter();
1252
1253 #ifdef CONFIG_PM_DEVFREQ
1254 if (enc->devfreq)
1255 mutex_lock(&enc->devfreq->lock);
1256 #endif
1257 mpp_clk_set_rate(&enc->aclk_info, CLK_MODE_REDUCE);
1258 mpp_clk_set_rate(&enc->core_clk_info, CLK_MODE_REDUCE);
1259 /* safe reset */
1260 mpp_write(mpp, RKVENC_INT_MSK_BASE, 0x1FF);
1261 mpp_write(mpp, RKVENC_CLR_BASE, RKVENC_SAFE_CLR_BIT);
1262 udelay(5);
1263 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", mpp_read(mpp, RKVENC_INT_STATUS_BASE));
1264 mpp_write(mpp, RKVENC_INT_CLR_BASE, 0xffffffff);
1265 mpp_write(mpp, RKVENC_INT_STATUS_BASE, 0);
1266 /* cru reset */
1267 if (enc->rst_a && enc->rst_h && enc->rst_core) {
1268 mpp_pmu_idle_request(mpp, true);
1269 mpp_safe_reset(enc->rst_a);
1270 mpp_safe_reset(enc->rst_h);
1271 mpp_safe_reset(enc->rst_core);
1272 udelay(5);
1273 mpp_safe_unreset(enc->rst_a);
1274 mpp_safe_unreset(enc->rst_h);
1275 mpp_safe_unreset(enc->rst_core);
1276 mpp_pmu_idle_request(mpp, false);
1277 }
1278 #ifdef CONFIG_PM_DEVFREQ
1279 if (enc->devfreq)
1280 mutex_unlock(&enc->devfreq->lock);
1281 #endif
1282
1283 mpp_debug_leave();
1284
1285 return 0;
1286 }
1287
rkvenc_clk_on(struct mpp_dev * mpp)1288 static int rkvenc_clk_on(struct mpp_dev *mpp)
1289 {
1290 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1291
1292 mpp_clk_safe_enable(enc->aclk_info.clk);
1293 mpp_clk_safe_enable(enc->hclk_info.clk);
1294 mpp_clk_safe_enable(enc->core_clk_info.clk);
1295
1296 return 0;
1297 }
1298
rkvenc_clk_off(struct mpp_dev * mpp)1299 static int rkvenc_clk_off(struct mpp_dev *mpp)
1300 {
1301 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1302
1303 clk_disable_unprepare(enc->aclk_info.clk);
1304 clk_disable_unprepare(enc->hclk_info.clk);
1305 clk_disable_unprepare(enc->core_clk_info.clk);
1306
1307 return 0;
1308 }
1309
rkvenc_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1310 static int rkvenc_get_freq(struct mpp_dev *mpp,
1311 struct mpp_task *mpp_task)
1312 {
1313 u32 task_cnt;
1314 u32 workload;
1315 struct mpp_task *loop = NULL, *n;
1316 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1317 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1318
1319 /* if not set max load, consider not have advanced mode */
1320 if (!enc->default_max_load)
1321 return 0;
1322
1323 task_cnt = 1;
1324 workload = task->pixels;
1325 /* calc workload in pending list */
1326 mutex_lock(&mpp->queue->pending_lock);
1327 list_for_each_entry_safe(loop, n,
1328 &mpp->queue->pending_list,
1329 queue_link) {
1330 struct rkvenc_task *loop_task = to_rkvenc_task(loop);
1331
1332 task_cnt++;
1333 workload += loop_task->pixels;
1334 }
1335 mutex_unlock(&mpp->queue->pending_lock);
1336
1337 if (workload > enc->default_max_load)
1338 task->clk_mode = CLK_MODE_ADVANCED;
1339
1340 mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1341 task_cnt, workload, task->clk_mode);
1342
1343 return 0;
1344 }
1345
rkvenc_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1346 static int rkvenc_set_freq(struct mpp_dev *mpp,
1347 struct mpp_task *mpp_task)
1348 {
1349 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1350 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1351
1352 mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
1353
1354 #ifdef CONFIG_PM_DEVFREQ
1355 if (enc->devfreq) {
1356 unsigned long core_rate_hz;
1357
1358 mutex_lock(&enc->devfreq->lock);
1359 core_rate_hz = mpp_get_clk_info_rate_hz(&enc->core_clk_info, task->clk_mode);
1360 if (enc->core_rate_hz != core_rate_hz) {
1361 enc->core_rate_hz = core_rate_hz;
1362 update_devfreq(enc->devfreq);
1363 } else {
1364 /*
1365 * Restore frequency when frequency is changed by
1366 * rkvenc_reduce_freq()
1367 */
1368 clk_set_rate(enc->core_clk_info.clk, enc->core_last_rate_hz);
1369 }
1370 mutex_unlock(&enc->devfreq->lock);
1371 return 0;
1372 }
1373 #endif
1374 mpp_clk_set_rate(&enc->core_clk_info, task->clk_mode);
1375
1376 return 0;
1377 }
1378
1379 static struct mpp_hw_ops rkvenc_hw_ops = {
1380 .init = rkvenc_init,
1381 .exit = rkvenc_exit,
1382 .clk_on = rkvenc_clk_on,
1383 .clk_off = rkvenc_clk_off,
1384 .get_freq = rkvenc_get_freq,
1385 .set_freq = rkvenc_set_freq,
1386 .reset = rkvenc_reset,
1387 };
1388
1389 static struct mpp_dev_ops rkvenc_dev_ops = {
1390 .alloc_task = rkvenc_alloc_task,
1391 .run = rkvenc_run,
1392 .irq = rkvenc_irq,
1393 .isr = rkvenc_isr,
1394 .finish = rkvenc_finish,
1395 .result = rkvenc_result,
1396 .free_task = rkvenc_free_task,
1397 .ioctl = rkvenc_control,
1398 .init_session = rkvenc_init_session,
1399 .free_session = rkvenc_free_session,
1400 .dump_session = rkvenc_dump_session,
1401 };
1402
1403 static const struct mpp_dev_var rkvenc_v1_data = {
1404 .device_type = MPP_DEVICE_RKVENC,
1405 .hw_info = &rkvenc_hw_info,
1406 .trans_info = trans_rk_rkvenc,
1407 .hw_ops = &rkvenc_hw_ops,
1408 .dev_ops = &rkvenc_dev_ops,
1409 };
1410
1411 static const struct of_device_id mpp_rkvenc_dt_match[] = {
1412 {
1413 .compatible = "rockchip,rkv-encoder-v1",
1414 .data = &rkvenc_v1_data,
1415 },
1416 {},
1417 };
1418
rkvenc_probe(struct platform_device * pdev)1419 static int rkvenc_probe(struct platform_device *pdev)
1420 {
1421 int ret = 0;
1422 struct device *dev = &pdev->dev;
1423 struct rkvenc_dev *enc = NULL;
1424 struct mpp_dev *mpp = NULL;
1425 const struct of_device_id *match = NULL;
1426
1427 dev_info(dev, "probing start\n");
1428
1429 enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1430 if (!enc)
1431 return -ENOMEM;
1432 mpp = &enc->mpp;
1433 platform_set_drvdata(pdev, mpp);
1434
1435 if (pdev->dev.of_node) {
1436 match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node);
1437 if (match)
1438 mpp->var = (struct mpp_dev_var *)match->data;
1439 }
1440
1441 ret = mpp_dev_probe(mpp, pdev);
1442 if (ret)
1443 return ret;
1444
1445 ret = devm_request_threaded_irq(dev, mpp->irq,
1446 mpp_dev_irq,
1447 mpp_dev_isr_sched,
1448 IRQF_SHARED,
1449 dev_name(dev), mpp);
1450 if (ret) {
1451 dev_err(dev, "register interrupter runtime failed\n");
1452 goto failed_get_irq;
1453 }
1454
1455 mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1456 rkvenc_procfs_init(mpp);
1457 /* register current device to mpp service */
1458 mpp_dev_register_srv(mpp, mpp->srv);
1459 dev_info(dev, "probing finish\n");
1460
1461 return 0;
1462
1463 failed_get_irq:
1464 mpp_dev_remove(mpp);
1465
1466 return ret;
1467 }
1468
rkvenc_remove(struct platform_device * pdev)1469 static int rkvenc_remove(struct platform_device *pdev)
1470 {
1471 struct device *dev = &pdev->dev;
1472 struct mpp_dev *mpp = dev_get_drvdata(dev);
1473
1474 dev_info(dev, "remove device\n");
1475 mpp_dev_remove(mpp);
1476 rkvenc_procfs_remove(mpp);
1477
1478 return 0;
1479 }
1480
1481 struct platform_driver rockchip_rkvenc_driver = {
1482 .probe = rkvenc_probe,
1483 .remove = rkvenc_remove,
1484 .shutdown = mpp_dev_shutdown,
1485 .driver = {
1486 .name = RKVENC_DRIVER_NAME,
1487 .of_match_table = of_match_ptr(mpp_rkvenc_dt_match),
1488 },
1489 };
1490