1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2020 Rockchip Electronics Co., Ltd.
4 *
5 * author:
6 * Ding Wei, leo.ding@rock-chips.com
7 * Alpha Lin, alpha.lin@rock-chips.com
8 *
9 */
10 #include <asm/cacheflush.h>
11 #include <linux/delay.h>
12 #include <linux/iopoll.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/of_platform.h>
17 #include <linux/slab.h>
18 #include <linux/dma-buf.h>
19 #include <linux/uaccess.h>
20 #include <linux/regmap.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24
25 #include "rockchip_iep2_regs.h"
26 #include "mpp_debug.h"
27 #include "mpp_common.h"
28 #include "mpp_iommu.h"
29
30 #define IEP2_DRIVER_NAME "mpp-iep2"
31
32 #define IEP2_SESSION_MAX_BUFFERS 20
33 #define IEP2_SESSION_MAX_BUFFERS 20
34 #define IEP2_SESSION_PROCF 0644
35 #define TILE_WIDTH 16
36 #define TILE_HEIGHT 4
37 #define MVL 28
38 #define MVR 27
39
40 enum rockchip_iep2_fmt { ROCKCHIP_IEP2_FMT_YUV422 = 2, ROCKCHIP_IEP2_FMT_YUV420 };
41
42 enum rockchip_iep2_yuv_swap {
43 ROCKCHIP_IEP2_YUV_SWAP_SP_UV,
44 ROCKCHIP_IEP2_YUV_SWAP_SP_VU,
45 ROCKCHIP_IEP2_YUV_SWAP_P0,
46 ROCKCHIP_IEP2_YUV_SWAP_P
47 };
48
49 enum rockchip_iep2_dil_ff_order { ROCKCHIP_IEP2_DIL_FF_ORDER_TB, ROCKCHIP_IEP2_DIL_FF_ORDER_BT };
50
51 enum rockchip_iep2_dil_mode {
52 ROCKCHIP_IEP2_DIL_MODE_DISABLE,
53 ROCKCHIP_IEP2_DIL_MODE_I5O2,
54 ROCKCHIP_IEP2_DIL_MODE_I5O1T,
55 ROCKCHIP_IEP2_DIL_MODE_I5O1B,
56 ROCKCHIP_IEP2_DIL_MODE_I2O2,
57 ROCKCHIP_IEP2_DIL_MODE_I1O1T,
58 ROCKCHIP_IEP2_DIL_MODE_I1O1B,
59 ROCKCHIP_IEP2_DIL_MODE_PD,
60 ROCKCHIP_IEP2_DIL_MODE_BYPASS,
61 ROCKCHIP_IEP2_DIL_MODE_DECT
62 };
63
64 enum ROCKCHIP_IEP2_PD_COMP_FLAG {
65 ROCKCHIP_IEP2_PD_COMP_FLAG_CC,
66 ROCKCHIP_IEP2_PD_COMP_FLAG_CN,
67 ROCKCHIP_IEP2_PD_COMP_FLAG_NC,
68 ROCKCHIP_IEP2_PD_COMP_FLAG_NON
69 };
70
71 /* default iep2 mtn table */
72 static u32 iep2_mtn_tab[] = {0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01010000, 0x06050302,
73 0x0f0d0a08, 0x1c191512, 0x2b282420, 0x3634312e, 0x3d3c3a38, 0x40403f3e,
74 0x40404040, 0x40404040, 0x40404040, 0x40404040};
75
76 #define to_iep_task(task) container_of(task, struct iep_task, mpp_task)
77 #define to_iep2_dev(dev) container_of(dev, struct iep2_dev, mpp)
78
79 struct iep2_addr {
80 u32 y;
81 u32 cbcr;
82 u32 cr;
83 };
84
85 struct iep2_params {
86 u32 src_fmt;
87 u32 src_yuv_swap;
88 u32 dst_fmt;
89 u32 dst_yuv_swap;
90 u32 tile_cols;
91 u32 tile_rows;
92 u32 src_y_stride;
93 u32 src_uv_stride;
94 u32 dst_y_stride;
95
96 /* current, previous, next. */
97 struct iep2_addr src[3];
98 struct iep2_addr dst[2];
99 u32 mv_addr;
100 u32 md_addr;
101
102 u32 dil_mode;
103 u32 dil_out_mode;
104 u32 dil_field_order;
105
106 u32 md_theta;
107 u32 md_r;
108 u32 md_lambda;
109
110 u32 dect_resi_thr;
111 u32 osd_area_num;
112 u32 osd_gradh_thr;
113 u32 osd_gradv_thr;
114
115 u32 osd_pos_limit_en;
116 u32 osd_pos_limit_num;
117
118 u32 osd_limit_area[2];
119
120 u32 osd_line_num;
121 u32 osd_pec_thr;
122
123 u32 osd_x_sta[8];
124 u32 osd_x_end[8];
125 u32 osd_y_sta[8];
126 u32 osd_y_end[8];
127
128 u32 me_pena;
129 u32 mv_bonus;
130 u32 mv_similar_thr;
131 u32 mv_similar_num_thr0;
132 s32 me_thr_offset;
133
134 u32 mv_left_limit;
135 u32 mv_right_limit;
136
137 s8 mv_tru_list[8];
138 u32 mv_tru_vld[8];
139
140 u32 eedi_thr0;
141
142 u32 ble_backtoma_num;
143
144 u32 comb_cnt_thr;
145 u32 comb_feature_thr;
146 u32 comb_t_thr;
147 u32 comb_osd_vld[8];
148
149 u32 mtn_en;
150 u32 mtn_tab[16];
151
152 u32 pd_mode;
153
154 u32 roi_en;
155 u32 roi_layer_num;
156 u32 roi_mode[8];
157 u32 xsta[8];
158 u32 xend[8];
159 u32 ysta[8];
160 u32 yend[8];
161 };
162
163 struct iep2_output {
164 u32 mv_hist[MVL + MVR + 1];
165 u32 dect_pd_tcnt;
166 u32 dect_pd_bcnt;
167 u32 dect_ff_cur_tcnt;
168 u32 dect_ff_cur_bcnt;
169 u32 dect_ff_nxt_tcnt;
170 u32 dect_ff_nxt_bcnt;
171 u32 dect_ff_ble_tcnt;
172 u32 dect_ff_ble_bcnt;
173 u32 dect_ff_nz;
174 u32 dect_ff_comb_f;
175 u32 dect_osd_cnt;
176 u32 out_comb_cnt;
177 u32 out_osd_comb_cnt;
178 u32 ff_gradt_tcnt;
179 u32 ff_gradt_bcnt;
180 u32 x_sta[8];
181 u32 x_end[8];
182 u32 y_sta[8];
183 u32 y_end[8];
184 };
185
186 struct iep_task {
187 struct mpp_task mpp_task;
188 struct mpp_hw_info *hw_info;
189
190 enum MPP_CLOCK_MODE clk_mode;
191 struct iep2_params params;
192 struct iep2_output output;
193
194 struct reg_offset_info off_inf;
195 u32 irq_status;
196 /* req for current task */
197 u32 w_req_cnt;
198 struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
199 u32 r_req_cnt;
200 struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
201 };
202
203 struct iep2_dev {
204 struct mpp_dev mpp;
205
206 struct mpp_clk_info aclk_info;
207 struct mpp_clk_info hclk_info;
208 struct mpp_clk_info sclk_info;
209 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
210 struct proc_dir_entry *procfs;
211 #endif
212 struct reset_control *rst_a;
213 struct reset_control *rst_h;
214 struct reset_control *rst_s;
215
216 struct mpp_dma_buffer roi;
217 };
218
219 static int iep2_addr_rnum[] = {
220 24, 27, 28, /* src cur */
221 25, 29, 30, /* src nxt */
222 26, 31, 32, /* src prv */
223 44, 46, -1, /* dst top */
224 45, 47, -1, /* dst bot */
225 34, /* mv */
226 33, /* md */
227 };
228
iep2_process_reg_fd(struct mpp_session * session,struct iep_task * task,struct mpp_task_msgs * msgs)229 static int iep2_process_reg_fd(struct mpp_session *session, struct iep_task *task, struct mpp_task_msgs *msgs)
230 {
231 int i;
232 /* see the detail at above table iep2_addr_rnum */
233 int addr_num = ARRAY_SIZE(task->params.src) * 0x3 + ARRAY_SIZE(task->params.dst) * 0x3 + 0x2;
234
235 u32 *paddr = &task->params.src[0].y;
236
237 for (i = 0; i < addr_num; ++i) {
238 int usr_fd;
239 u32 offset;
240 struct mpp_mem_region *mem_region = NULL;
241
242 if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
243 usr_fd = paddr[i];
244 offset = 0;
245 } else {
246 usr_fd = paddr[i] & 0x3ff;
247 offset = paddr[i] >> 0xa;
248 }
249
250 if (usr_fd == 0 || iep2_addr_rnum[i] == -1) {
251 continue;
252 }
253
254 mem_region = mpp_task_attach_fd(&task->mpp_task, usr_fd);
255 if (IS_ERR(mem_region)) {
256 mpp_debug(DEBUG_IOMMU, "reg[%3d]: %08x failed\n", iep2_addr_rnum[i], paddr[i]);
257 return PTR_ERR(mem_region);
258 }
259
260 mem_region->reg_idx = iep2_addr_rnum[i];
261 mpp_debug(DEBUG_IOMMU, "reg[%3d]: %3d => %pad + offset %10d\n", iep2_addr_rnum[i], usr_fd, &mem_region->iova,
262 offset);
263 paddr[i] = mem_region->iova + offset;
264 }
265
266 return 0;
267 }
268
iep2_extract_task_msg(struct iep_task * task,struct mpp_task_msgs * msgs)269 static int iep2_extract_task_msg(struct iep_task *task, struct mpp_task_msgs *msgs)
270 {
271 u32 i;
272 struct mpp_request *req;
273
274 for (i = 0; i < msgs->req_cnt; i++) {
275 req = &msgs->reqs[i];
276 if (!req->size) {
277 continue;
278 }
279
280 switch (req->cmd) {
281 case MPP_CMD_SET_REG_WRITE: {
282 if (copy_from_user(&task->params, req->data, req->size)) {
283 mpp_err("copy_from_user params failed\n");
284 return -EIO;
285 }
286 break;
287 }
288 case MPP_CMD_SET_REG_READ: {
289 memcpy(&task->r_reqs[task->r_req_cnt++], req, sizeof(*req));
290 break;
291 }
292 case MPP_CMD_SET_REG_ADDR_OFFSET: {
293 mpp_extract_reg_offset_info(&task->off_inf, req);
294 break;
295 }
296 default:
297 break;
298 }
299 }
300 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n", task->w_req_cnt, task->r_req_cnt);
301
302 return 0;
303 }
304
iep2_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)305 static void *iep2_alloc_task(struct mpp_session *session, struct mpp_task_msgs *msgs)
306 {
307 int ret;
308 struct iep_task *task = NULL;
309
310 mpp_debug_enter();
311
312 task = kzalloc(sizeof(*task), GFP_KERNEL);
313 if (!task) {
314 return NULL;
315 }
316
317 mpp_task_init(session, &task->mpp_task);
318 /* extract reqs for current task */
319 ret = iep2_extract_task_msg(task, msgs);
320 if (ret) {
321 goto fail;
322 }
323 /* process fd in register */
324 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
325 ret = iep2_process_reg_fd(session, task, msgs);
326 if (ret) {
327 goto fail;
328 }
329 }
330 task->clk_mode = CLK_MODE_NORMAL;
331
332 mpp_debug_leave();
333
334 return &task->mpp_task;
335
336 fail:
337 mpp_task_finalize(session, &task->mpp_task);
338 kfree(task);
339 return NULL;
340 }
341
iep2_config(struct mpp_dev * mpp,struct iep_task * task)342 static void iep2_config(struct mpp_dev *mpp, struct iep_task *task)
343 {
344 struct iep2_dev *iep = to_iep2_dev(mpp);
345 struct iep2_params *cfg = &task->params;
346 u32 reg;
347 u32 width, height;
348
349 width = cfg->tile_cols * TILE_WIDTH;
350 height = cfg->tile_rows * TILE_HEIGHT;
351
352 reg = IEP2_REG_SRC_FMT(cfg->src_fmt) | IEP2_REG_SRC_YUV_SWAP(cfg->src_yuv_swap) | IEP2_REG_DST_FMT(cfg->dst_fmt) |
353 IEP2_REG_DST_YUV_SWAP(cfg->dst_yuv_swap) | IEP2_REG_DEBUG_DATA_EN;
354 mpp_write_relaxed(mpp, IEP2_REG_IEP_CONFIG0, reg);
355
356 reg = IEP2_REG_SRC_PIC_WIDTH(width - 1) | IEP2_REG_SRC_PIC_HEIGHT(height - 1);
357 mpp_write_relaxed(mpp, IEP2_REG_SRC_IMG_SIZE, reg);
358
359 reg = IEP2_REG_SRC_VIR_Y_STRIDE(cfg->src_y_stride) | IEP2_REG_SRC_VIR_UV_STRIDE(cfg->src_uv_stride);
360 mpp_write_relaxed(mpp, IEP2_REG_VIR_SRC_IMG_WIDTH, reg);
361
362 reg = IEP2_REG_DST_VIR_STRIDE(cfg->dst_y_stride);
363 mpp_write_relaxed(mpp, IEP2_REG_VIR_DST_IMG_WIDTH, reg);
364
365 reg = IEP2_REG_DIL_MV_HIST_EN | IEP2_REG_DIL_COMB_EN | IEP2_REG_DIL_BLE_EN | IEP2_REG_DIL_EEDI_EN |
366 IEP2_REG_DIL_MEMC_EN | IEP2_REG_DIL_OSD_EN | IEP2_REG_DIL_PD_EN | IEP2_REG_DIL_FF_EN |
367 IEP2_REG_DIL_MD_PRE_EN | IEP2_REG_DIL_FIELD_ORDER(cfg->dil_field_order) |
368 IEP2_REG_DIL_OUT_MODE(cfg->dil_out_mode) | IEP2_REG_DIL_MODE(cfg->dil_mode);
369 if (cfg->roi_en) {
370 reg |= IEP2_REG_DIL_ROI_EN;
371 }
372 mpp_write_relaxed(mpp, IEP2_REG_DIL_CONFIG0, reg);
373
374 if (cfg->dil_mode != ROCKCHIP_IEP2_DIL_MODE_PD) {
375 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURY, cfg->src[0].y);
376 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURUV, cfg->src[0].cbcr);
377 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURV, cfg->src[0].cr);
378
379 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTY, cfg->src[1].y);
380 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTUV, cfg->src[1].cbcr);
381 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTV, cfg->src[1].cr);
382 } else {
383 struct iep2_addr *top, *bot;
384
385 switch (cfg->pd_mode) {
386 default:
387 case ROCKCHIP_IEP2_PD_COMP_FLAG_CC:
388 top = &cfg->src[0];
389 bot = &cfg->src[0];
390 break;
391 case ROCKCHIP_IEP2_PD_COMP_FLAG_CN:
392 top = &cfg->src[0];
393 bot = &cfg->src[1];
394 break;
395 case ROCKCHIP_IEP2_PD_COMP_FLAG_NC:
396 top = &cfg->src[1];
397 bot = &cfg->src[0];
398 break;
399 }
400
401 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURY, top->y);
402 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURUV, top->cbcr);
403 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURV, top->cr);
404 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTY, bot->y);
405 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTUV, bot->cbcr);
406 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTV, bot->cr);
407 }
408
409 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_PREY, cfg->src[0x2].y);
410 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_PREUV, cfg->src[0x2].cbcr);
411 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_PREV, cfg->src[0x2].cr);
412
413 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_MD, cfg->md_addr);
414 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_MV, cfg->mv_addr);
415 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_MD, cfg->md_addr);
416 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_MV, cfg->mv_addr);
417 mpp_write_relaxed(mpp, IEP2_REG_ROI_ADDR, (u32)iep->roi.iova);
418
419 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_TOPY, cfg->dst[0].y);
420 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_TOPC, cfg->dst[0].cbcr);
421 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_BOTY, cfg->dst[1].y);
422 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_BOTC, cfg->dst[1].cbcr);
423
424 reg = IEP2_REG_MD_THETA(cfg->md_theta) | IEP2_REG_MD_R(cfg->md_r) | IEP2_REG_MD_LAMBDA(cfg->md_lambda);
425 mpp_write_relaxed(mpp, IEP2_REG_MD_CONFIG0, reg);
426
427 reg = IEP2_REG_DECT_RESI_THR(cfg->dect_resi_thr) | IEP2_REG_OSD_AREA_NUM(cfg->osd_area_num) |
428 IEP2_REG_OSD_GRADH_THR(cfg->osd_gradh_thr) | IEP2_REG_OSD_GRADV_THR(cfg->osd_gradv_thr);
429 mpp_write_relaxed(mpp, IEP2_REG_DECT_CONFIG0, reg);
430
431 reg = IEP2_REG_OSD_POS_LIMIT_NUM(cfg->osd_pos_limit_num);
432 if (cfg->osd_pos_limit_en) {
433 reg |= IEP2_REG_OSD_POS_LIMIT_EN;
434 }
435 mpp_write_relaxed(mpp, IEP2_REG_OSD_LIMIT_CONFIG, reg);
436
437 mpp_write_relaxed(mpp, IEP2_REG_OSD_LIMIT_AREA(0), cfg->osd_limit_area[0]);
438 mpp_write_relaxed(mpp, IEP2_REG_OSD_LIMIT_AREA(1), cfg->osd_limit_area[1]);
439
440 reg = IEP2_REG_OSD_PEC_THR(cfg->osd_pec_thr) | IEP2_REG_OSD_LINE_NUM(cfg->osd_line_num);
441 mpp_write_relaxed(mpp, IEP2_REG_OSD_CONFIG0, reg);
442
443 reg = IEP2_REG_ME_PENA(cfg->me_pena) | IEP2_REG_MV_BONUS(cfg->mv_bonus) |
444 IEP2_REG_MV_SIMILAR_THR(cfg->mv_similar_thr) | IEP2_REG_MV_SIMILAR_NUM_THR0(cfg->mv_similar_num_thr0) |
445 IEP2_REG_ME_THR_OFFSET(cfg->me_thr_offset);
446 mpp_write_relaxed(mpp, IEP2_REG_ME_CONFIG0, reg);
447
448 reg = IEP2_REG_MV_LEFT_LIMIT((~cfg->mv_left_limit) + 1) | IEP2_REG_MV_RIGHT_LIMIT(cfg->mv_right_limit);
449 mpp_write_relaxed(mpp, IEP2_REG_ME_LIMIT_CONFIG, reg);
450
451 mpp_write_relaxed(mpp, IEP2_REG_EEDI_CONFIG0, IEP2_REG_EEDI_THR0(cfg->eedi_thr0));
452 mpp_write_relaxed(mpp, IEP2_REG_BLE_CONFIG0, IEP2_REG_BLE_BACKTOMA_NUM(cfg->ble_backtoma_num));
453 }
454
iep2_osd_cfg(struct mpp_dev * mpp,struct iep_task * task)455 static void iep2_osd_cfg(struct mpp_dev *mpp, struct iep_task *task)
456 {
457 struct iep2_params *hw_cfg = &task->params;
458 int i;
459 u32 reg;
460
461 for (i = 0; i < hw_cfg->osd_area_num; ++i) {
462 reg = IEP2_REG_OSD_X_STA(hw_cfg->osd_x_sta[i]) | IEP2_REG_OSD_X_END(hw_cfg->osd_x_end[i]) |
463 IEP2_REG_OSD_Y_STA(hw_cfg->osd_y_sta[i]) | IEP2_REG_OSD_Y_END(hw_cfg->osd_y_end[i]);
464 mpp_write_relaxed(mpp, IEP2_REG_OSD_AREA_CONF(i), reg);
465 }
466
467 for (; i < ARRAY_SIZE(hw_cfg->osd_x_sta); ++i) {
468 mpp_write_relaxed(mpp, IEP2_REG_OSD_AREA_CONF(i), 0);
469 }
470 }
471
iep2_mtn_tab_cfg(struct mpp_dev * mpp,struct iep_task * task)472 static void iep2_mtn_tab_cfg(struct mpp_dev *mpp, struct iep_task *task)
473 {
474 struct iep2_params *hw_cfg = &task->params;
475 int i;
476 u32 *mtn_tab = hw_cfg->mtn_en ? hw_cfg->mtn_tab : iep2_mtn_tab;
477
478 for (i = 0; i < ARRAY_SIZE(hw_cfg->mtn_tab); ++i) {
479 mpp_write_relaxed(mpp, IEP2_REG_DIL_MTN_TAB(i), mtn_tab[i]);
480 }
481 }
482
483 static u32 iep2_tru_list_vld_tab[] = {
484 IEP2_REG_MV_TRU_LIST0_4_VLD, IEP2_REG_MV_TRU_LIST1_5_VLD, IEP2_REG_MV_TRU_LIST2_6_VLD, IEP2_REG_MV_TRU_LIST3_7_VLD,
485 IEP2_REG_MV_TRU_LIST0_4_VLD, IEP2_REG_MV_TRU_LIST1_5_VLD, IEP2_REG_MV_TRU_LIST2_6_VLD, IEP2_REG_MV_TRU_LIST3_7_VLD};
486
iep2_tru_list_cfg(struct mpp_dev * mpp,struct iep_task * task)487 static void iep2_tru_list_cfg(struct mpp_dev *mpp, struct iep_task *task)
488 {
489 struct iep2_params *cfg = &task->params;
490 int i;
491 u32 reg;
492
493 for (i = 0; i < ARRAY_SIZE(cfg->mv_tru_list); i += 0x4) {
494 reg = 0;
495
496 if (cfg->mv_tru_vld[i]) {
497 reg |= IEP2_REG_MV_TRU_LIST0_4(cfg->mv_tru_list[i]) | iep2_tru_list_vld_tab[i];
498 }
499
500 if (cfg->mv_tru_vld[i + 1]) {
501 reg |= IEP2_REG_MV_TRU_LIST1_5(cfg->mv_tru_list[i + 1]) | iep2_tru_list_vld_tab[i + 1];
502 }
503
504 if (cfg->mv_tru_vld[i + 0x2]) {
505 reg |= IEP2_REG_MV_TRU_LIST2_6(cfg->mv_tru_list[i + 0x2]) | iep2_tru_list_vld_tab[i + 0x2];
506 }
507
508 if (cfg->mv_tru_vld[i + 0x3]) {
509 reg |= IEP2_REG_MV_TRU_LIST3_7(cfg->mv_tru_list[i + 0x3]) | iep2_tru_list_vld_tab[i + 0x3];
510 }
511
512 mpp_write_relaxed(mpp, IEP2_REG_MV_TRU_LIST(i / 0x4), reg);
513 }
514 }
515
iep2_comb_cfg(struct mpp_dev * mpp,struct iep_task * task)516 static void iep2_comb_cfg(struct mpp_dev *mpp, struct iep_task *task)
517 {
518 struct iep2_params *hw_cfg = &task->params;
519 int i;
520 u32 reg = 0;
521
522 for (i = 0; i < ARRAY_SIZE(hw_cfg->comb_osd_vld); ++i) {
523 if (hw_cfg->comb_osd_vld[i]) {
524 reg |= IEP2_REG_COMB_OSD_VLD(i);
525 }
526 }
527
528 reg |= IEP2_REG_COMB_T_THR(hw_cfg->comb_t_thr) | IEP2_REG_COMB_FEATRUE_THR(hw_cfg->comb_feature_thr) |
529 IEP2_REG_COMB_CNT_THR(hw_cfg->comb_cnt_thr);
530 mpp_write_relaxed(mpp, IEP2_REG_COMB_CONFIG0, reg);
531 }
532
iep2_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)533 static int iep2_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
534 {
535 struct iep_task *task = NULL;
536
537 mpp_debug_enter();
538
539 task = to_iep_task(mpp_task);
540
541 /* init current task */
542 mpp->cur_task = mpp_task;
543
544 iep2_config(mpp, task);
545 iep2_osd_cfg(mpp, task);
546 iep2_mtn_tab_cfg(mpp, task);
547 iep2_tru_list_cfg(mpp, task);
548 iep2_comb_cfg(mpp, task);
549
550 /* set interrupt enable bits */
551 mpp_write_relaxed(mpp, IEP2_REG_INT_EN, IEP2_REG_FRM_DONE_EN | IEP2_REG_OSD_MAX_EN | IEP2_REG_BUS_ERROR_EN);
552
553 /* Last, flush the registers */
554 wmb();
555 /* start iep2 */
556 mpp_write(mpp, IEP2_REG_FRM_START, 1);
557
558 mpp_debug_leave();
559
560 return 0;
561 }
562
iep2_irq(struct mpp_dev * mpp)563 static int iep2_irq(struct mpp_dev *mpp)
564 {
565 mpp->irq_status = mpp_read(mpp, IEP2_REG_INT_STS);
566 mpp_write(mpp, IEP2_REG_INT_CLR, 0xffffffff);
567
568 if (!IEP2_REG_RO_VALID_INT_STS(mpp->irq_status)) {
569 return IRQ_NONE;
570 }
571
572 return IRQ_WAKE_THREAD;
573 }
574
iep2_isr(struct mpp_dev * mpp)575 static int iep2_isr(struct mpp_dev *mpp)
576 {
577 struct mpp_task *mpp_task = NULL;
578 struct iep_task *task = NULL;
579 struct iep2_dev *iep = to_iep2_dev(mpp);
580
581 mpp_task = mpp->cur_task;
582 task = to_iep_task(mpp_task);
583 if (!task) {
584 dev_err(iep->mpp.dev, "no current task\n");
585 return IRQ_HANDLED;
586 }
587
588 mpp_time_diff(mpp_task);
589 mpp->cur_task = NULL;
590 task->irq_status = mpp->irq_status;
591 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
592
593 if (IEP2_REG_RO_BUS_ERROR_STS(task->irq_status)) {
594 atomic_inc(&mpp->reset_request);
595 }
596
597 mpp_task_finish(mpp_task->session, mpp_task);
598
599 mpp_debug_leave();
600
601 return IRQ_HANDLED;
602 }
603
iep2_osd_done(struct mpp_dev * mpp,struct iep_task * task)604 static void iep2_osd_done(struct mpp_dev *mpp, struct iep_task *task)
605 {
606 int i;
607 u32 reg;
608
609 for (i = 0; i < task->output.dect_osd_cnt; ++i) {
610 reg = mpp_read(mpp, IEP2_REG_RO_OSD_AREA_X(i));
611 task->output.x_sta[i] = IEP2_REG_RO_X_STA(reg) / 0x10;
612 task->output.x_end[i] = IEP2_REG_RO_X_END(reg) / 0x10;
613
614 reg = mpp_read(mpp, IEP2_REG_RO_OSD_AREA_Y(i));
615 task->output.y_sta[i] = IEP2_REG_RO_Y_STA(reg) / 0x4;
616 task->output.y_end[i] = IEP2_REG_RO_Y_END(reg) / 0x4;
617 }
618
619 for (; i < ARRAY_SIZE(task->output.x_sta); ++i) {
620 task->output.x_sta[i] = 0;
621 task->output.x_end[i] = 0;
622 task->output.y_sta[i] = 0;
623 task->output.y_end[i] = 0;
624 }
625 }
626
iep2_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)627 static int iep2_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
628 {
629 struct iep_task *task = to_iep_task(mpp_task);
630 struct iep2_output *output = &task->output;
631 u32 i;
632 u32 reg;
633
634 mpp_debug_enter();
635
636 output->dect_pd_tcnt = mpp_read(mpp, IEP2_REG_RO_PD_TCNT);
637 output->dect_pd_bcnt = mpp_read(mpp, IEP2_REG_RO_PD_BCNT);
638 output->dect_ff_cur_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_CUR_TCNT);
639 output->dect_ff_cur_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_CUR_BCNT);
640 output->dect_ff_nxt_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_NXT_TCNT);
641 output->dect_ff_nxt_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_NXT_BCNT);
642 output->dect_ff_ble_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_BLE_TCNT);
643 output->dect_ff_ble_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_BLE_BCNT);
644 output->dect_ff_nz = mpp_read(mpp, IEP2_REG_RO_FF_COMB_NZ);
645 output->dect_ff_comb_f = mpp_read(mpp, IEP2_REG_RO_FF_COMB_F);
646 output->dect_osd_cnt = mpp_read(mpp, IEP2_REG_RO_OSD_NUM);
647
648 reg = mpp_read(mpp, IEP2_REG_RO_COMB_CNT);
649 output->out_comb_cnt = IEP2_REG_RO_OUT_COMB_CNT(reg);
650 output->out_osd_comb_cnt = IEP2_REG_RO_OUT_OSD_COMB_CNT(reg);
651 output->ff_gradt_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_GRADT_TCNT);
652 output->ff_gradt_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_GRADT_BCNT);
653
654 iep2_osd_done(mpp, task);
655
656 for (i = 0; i < ARRAY_SIZE(output->mv_hist); i += 0x2) {
657 reg = mpp_read(mpp, IEP2_REG_RO_MV_HIST_BIN(i / 0x2));
658 output->mv_hist[i] = IEP2_REG_RO_MV_HIST_EVEN(reg);
659 output->mv_hist[i + 1] = IEP2_REG_RO_MV_HIST_ODD(reg);
660 }
661
662 mpp_debug_leave();
663
664 return 0;
665 }
666
iep2_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)667 static int iep2_result(struct mpp_dev *mpp, struct mpp_task *mpp_task, struct mpp_task_msgs *msgs)
668 {
669 u32 i;
670 struct mpp_request *req;
671 struct iep_task *task = to_iep_task(mpp_task);
672
673 /* may overflow the kernel */
674 for (i = 0; i < task->r_req_cnt; i++) {
675 req = &task->r_reqs[i];
676
677 if (copy_to_user(req->data, (u8 *)&task->output, req->size)) {
678 mpp_err("copy_to_user reg fail\n");
679 return -EIO;
680 }
681 }
682
683 return 0;
684 }
685
iep2_free_task(struct mpp_session * session,struct mpp_task * mpp_task)686 static int iep2_free_task(struct mpp_session *session, struct mpp_task *mpp_task)
687 {
688 struct iep_task *task = to_iep_task(mpp_task);
689
690 mpp_task_finalize(session, mpp_task);
691 kfree(task);
692
693 return 0;
694 }
695
696 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
iep2_procfs_remove(struct mpp_dev * mpp)697 static int iep2_procfs_remove(struct mpp_dev *mpp)
698 {
699 struct iep2_dev *iep = to_iep2_dev(mpp);
700
701 if (iep->procfs) {
702 proc_remove(iep->procfs);
703 iep->procfs = NULL;
704 }
705
706 return 0;
707 }
708
iep2_procfs_init(struct mpp_dev * mpp)709 static int iep2_procfs_init(struct mpp_dev *mpp)
710 {
711 struct iep2_dev *iep = to_iep2_dev(mpp);
712
713 iep->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
714 if (IS_ERR_OR_NULL(iep->procfs)) {
715 mpp_err("failed on mkdir\n");
716 iep->procfs = NULL;
717 return -EIO;
718 }
719 mpp_procfs_create_u32("aclk", IEP2_SESSION_PROCF, iep->procfs, &iep->aclk_info.debug_rate_hz);
720 mpp_procfs_create_u32("session_buffers", IEP2_SESSION_PROCF, iep->procfs, &mpp->session_max_buffers);
721
722 return 0;
723 }
724 #else
iep2_procfs_remove(struct mpp_dev * mpp)725 static inline int iep2_procfs_remove(struct mpp_dev *mpp)
726 {
727 return 0;
728 }
729
iep2_procfs_init(struct mpp_dev * mpp)730 static inline int iep2_procfs_init(struct mpp_dev *mpp)
731 {
732 return 0;
733 }
734 #endif
735
736 #define IEP2_TILE_W_MAX 120
737 #define IEP2_TILE_H_MAX 272
738
iep2_init(struct mpp_dev * mpp)739 static int iep2_init(struct mpp_dev *mpp)
740 {
741 int ret;
742 struct iep2_dev *iep = to_iep2_dev(mpp);
743
744 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_IEP2];
745
746 /* Get clock info from dtsi */
747 ret = mpp_get_clk_info(mpp, &iep->aclk_info, "aclk");
748 if (ret) {
749 mpp_err("failed on clk_get aclk\n");
750 }
751 ret = mpp_get_clk_info(mpp, &iep->hclk_info, "hclk");
752 if (ret) {
753 mpp_err("failed on clk_get hclk\n");
754 }
755 ret = mpp_get_clk_info(mpp, &iep->sclk_info, "sclk");
756 if (ret) {
757 mpp_err("failed on clk_get sclk\n");
758 }
759 /* Set default rates */
760 mpp_set_clk_info_rate_hz(&iep->aclk_info, CLK_MODE_DEFAULT, 0x12c * MHZ);
761
762 iep->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "rst_a");
763 if (!iep->rst_a) {
764 mpp_err("No aclk reset resource define\n");
765 }
766 iep->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "rst_h");
767 if (!iep->rst_h) {
768 mpp_err("No hclk reset resource define\n");
769 }
770 iep->rst_s = mpp_reset_control_get(mpp, RST_TYPE_CORE, "rst_s");
771 if (!iep->rst_s) {
772 mpp_err("No sclk reset resource define\n");
773 }
774
775 iep->roi.size = IEP2_TILE_W_MAX * IEP2_TILE_H_MAX;
776 iep->roi.vaddr = dma_alloc_coherent(mpp->dev, iep->roi.size, &iep->roi.iova, GFP_KERNEL);
777 if (iep->roi.vaddr) {
778 dev_err(mpp->dev, "allocate roi buffer failed\n");
779 }
780
781 return 0;
782 }
783
iep2_clk_on(struct mpp_dev * mpp)784 static int iep2_clk_on(struct mpp_dev *mpp)
785 {
786 struct iep2_dev *iep = to_iep2_dev(mpp);
787
788 mpp_clk_safe_enable(iep->aclk_info.clk);
789 mpp_clk_safe_enable(iep->hclk_info.clk);
790 mpp_clk_safe_enable(iep->sclk_info.clk);
791
792 return 0;
793 }
794
iep2_clk_off(struct mpp_dev * mpp)795 static int iep2_clk_off(struct mpp_dev *mpp)
796 {
797 struct iep2_dev *iep = to_iep2_dev(mpp);
798
799 mpp_clk_safe_disable(iep->aclk_info.clk);
800 mpp_clk_safe_disable(iep->hclk_info.clk);
801 mpp_clk_safe_disable(iep->sclk_info.clk);
802
803 return 0;
804 }
805
iep2_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)806 static int iep2_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
807 {
808 struct iep2_dev *iep = to_iep2_dev(mpp);
809 struct iep_task *task = to_iep_task(mpp_task);
810
811 mpp_clk_set_rate(&iep->aclk_info, task->clk_mode);
812
813 return 0;
814 }
815
iep2_reset(struct mpp_dev * mpp)816 static int iep2_reset(struct mpp_dev *mpp)
817 {
818 struct iep2_dev *iep = to_iep2_dev(mpp);
819
820 if (iep->rst_a && iep->rst_h && iep->rst_s) {
821 /* Don't skip this or iommu won't work after reset */
822 rockchip_pmu_idle_request(mpp->dev, true);
823 mpp_safe_reset(iep->rst_a);
824 mpp_safe_reset(iep->rst_h);
825 mpp_safe_reset(iep->rst_s);
826 udelay(0x5);
827 mpp_safe_unreset(iep->rst_a);
828 mpp_safe_unreset(iep->rst_h);
829 mpp_safe_unreset(iep->rst_s);
830 rockchip_pmu_idle_request(mpp->dev, false);
831 }
832
833 return 0;
834 }
835
836 static struct mpp_hw_ops iep_v2_hw_ops = {
837 .init = iep2_init,
838 .clk_on = iep2_clk_on,
839 .clk_off = iep2_clk_off,
840 .set_freq = iep2_set_freq,
841 .reset = iep2_reset,
842 };
843
844 static struct mpp_dev_ops iep_v2_dev_ops = {
845 .alloc_task = iep2_alloc_task,
846 .run = iep2_run,
847 .irq = iep2_irq,
848 .isr = iep2_isr,
849 .finish = iep2_finish,
850 .result = iep2_result,
851 .free_task = iep2_free_task,
852 };
853
854 static struct mpp_hw_info iep2_hw_info = {
855 .reg_id = -1,
856 };
857
858 static const struct mpp_dev_var iep2_v2_data = {
859 .device_type = MPP_DEVICE_IEP2,
860 .hw_ops = &iep_v2_hw_ops,
861 .dev_ops = &iep_v2_dev_ops,
862 .hw_info = &iep2_hw_info,
863 };
864
865 static const struct of_device_id mpp_iep2_match[] = {
866 {
867 .compatible = "rockchip,iep-v2",
868 .data = &iep2_v2_data,
869 },
870 #ifdef CONFIG_CPU_RV1126
871 {
872 .compatible = "rockchip,rv1126-iep",
873 .data = &iep2_v2_data,
874 },
875 #endif
876 {},
877 };
878
iep2_probe(struct platform_device * pdev)879 static int iep2_probe(struct platform_device *pdev)
880 {
881 struct device *dev = &pdev->dev;
882 struct iep2_dev *iep = NULL;
883 struct mpp_dev *mpp = NULL;
884 const struct of_device_id *match = NULL;
885 int ret = 0;
886
887 dev_info(dev, "probe device\n");
888 iep = devm_kzalloc(dev, sizeof(struct iep2_dev), GFP_KERNEL);
889 if (!iep) {
890 return -ENOMEM;
891 }
892
893 mpp = &iep->mpp;
894 platform_set_drvdata(pdev, iep);
895
896 if (pdev->dev.of_node) {
897 match = of_match_node(mpp_iep2_match, pdev->dev.of_node);
898 if (match) {
899 mpp->var = (struct mpp_dev_var *)match->data;
900 }
901 }
902
903 ret = mpp_dev_probe(mpp, pdev);
904 if (ret) {
905 dev_err(dev, "probe sub driver failed\n");
906 return -EINVAL;
907 }
908
909 ret = devm_request_threaded_irq(dev, mpp->irq, mpp_dev_irq, mpp_dev_isr_sched, IRQF_SHARED, dev_name(dev), mpp);
910 if (ret) {
911 dev_err(dev, "register interrupter runtime failed\n");
912 return -EINVAL;
913 }
914
915 mpp->session_max_buffers = IEP2_SESSION_MAX_BUFFERS;
916 iep2_procfs_init(mpp);
917 /* register current device to mpp service */
918 mpp_dev_register_srv(mpp, mpp->srv);
919 dev_info(dev, "probing finish\n");
920
921 return 0;
922 }
923
iep2_remove(struct platform_device * pdev)924 static int iep2_remove(struct platform_device *pdev)
925 {
926 struct device *dev = &pdev->dev;
927 struct iep2_dev *iep = platform_get_drvdata(pdev);
928
929 dma_free_coherent(dev, iep->roi.size, iep->roi.vaddr, iep->roi.iova);
930
931 dev_info(dev, "remove device\n");
932 mpp_dev_remove(&iep->mpp);
933 iep2_procfs_remove(&iep->mpp);
934
935 return 0;
936 }
937
iep2_shutdown(struct platform_device * pdev)938 static void iep2_shutdown(struct platform_device *pdev)
939 {
940 int ret;
941 int val;
942 struct device *dev = &pdev->dev;
943 struct iep2_dev *iep = platform_get_drvdata(pdev);
944 struct mpp_dev *mpp = &iep->mpp;
945
946 dev_info(dev, "shutdown device\n");
947
948 atomic_inc(&mpp->srv->shutdown_request);
949 ret = readx_poll_timeout(atomic_read, &mpp->task_count, val, val == 0, 0x4e20, 0x30d40);
950 if (ret == -ETIMEDOUT) {
951 dev_err(dev, "wait total running time out\n");
952 }
953 }
954
955 struct platform_driver rockchip_iep2_driver = {
956 .probe = iep2_probe,
957 .remove = iep2_remove,
958 .shutdown = iep2_shutdown,
959 .driver =
960 {
961 .name = IEP2_DRIVER_NAME,
962 .of_match_table = of_match_ptr(mpp_iep2_match),
963 },
964 };
965 EXPORT_SYMBOL(rockchip_iep2_driver);
966