1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/clk.h>
5 #include <linux/delay.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/slab.h>
8 #include <media/v4l2-common.h>
9 #include <media/v4l2-event.h>
10 #include <media/v4l2-fh.h>
11 #include <media/v4l2-ioctl.h>
12 #include <media/v4l2-mc.h>
13 #include <media/v4l2-subdev.h>
14 #include <media/videobuf2-dma-contig.h>
15 #include <media/videobuf2-dma-sg.h>
16 #include <linux/rkisp1-config.h>
17 #include <uapi/linux/rk-video-format.h>
18
19 #include "dev.h"
20 #include "regs.h"
21
22
23 /*
24 * DDR->| |->MB------->DDR
25 * |->TNR->DDR->NR->SHARP->DDR->FEC->|->SCL0----->DDR
26 * ISP->| |->SCL1----->DDR
27 * |->SCL2----->DDR
28 */
29
30 static void rkispp_module_work_event(struct rkispp_device *dev,
31 void *buf_rd, void *buf_wr,
32 u32 module, bool is_isr);
33
set_y_addr(struct rkispp_stream * stream,u32 val)34 static void set_y_addr(struct rkispp_stream *stream, u32 val)
35 {
36 rkispp_write(stream->isppdev, stream->config->reg.cur_y_base, val);
37 }
38
set_uv_addr(struct rkispp_stream * stream,u32 val)39 static void set_uv_addr(struct rkispp_stream *stream, u32 val)
40 {
41 rkispp_write(stream->isppdev, stream->config->reg.cur_uv_base, val);
42 }
43
rkispp_frame_done_early(struct hrtimer * timer)44 static enum hrtimer_restart rkispp_frame_done_early(struct hrtimer *timer)
45 {
46 struct rkispp_stream_vdev *vdev =
47 container_of(timer, struct rkispp_stream_vdev, frame_qst);
48 struct rkispp_stream *stream = &vdev->stream[0];
49 struct rkispp_device *dev = stream->isppdev;
50 void __iomem *base = dev->hw_dev->base_addr;
51 bool is_fec_en = (vdev->module_ens & ISPP_MODULE_FEC);
52 enum hrtimer_restart ret = HRTIMER_NORESTART;
53 u32 threshold = vdev->wait_line / 128;
54 u32 tile, tile_mask, working, work_mask;
55 u32 i, seq, ycnt, shift, time, max_time;
56 u64 t, ns = ktime_get_ns();
57
58 working = readl(base + RKISPP_CTRL_SYS_STATUS);
59 tile = readl(base + RKISPP_CTRL_SYS_CTL_STA0);
60 if (is_fec_en) {
61 shift = 16;
62 work_mask = FEC_WORKING;
63 tile_mask = FEC_TILE_LINE_CNT_MASK;
64 t = vdev->fec.dbg.timestamp;
65 seq = vdev->fec.dbg.id;
66 max_time = 6000000;
67 } else {
68 shift = 8;
69 work_mask = NR_WORKING;
70 tile_mask = NR_TILE_LINE_CNT_MASK;
71 t = vdev->nr.dbg.timestamp;
72 seq = vdev->nr.dbg.id;
73 max_time = 2000000;
74 }
75 working &= work_mask;
76 tile &= tile_mask;
77 ycnt = tile >> shift;
78 time = (u32)(ns - t);
79 if (dev->ispp_sdev.state == ISPP_STOP) {
80 vdev->is_done_early = false;
81 goto end;
82 } else if (working && ycnt < threshold) {
83 if (!ycnt)
84 ns = max_time;
85 else
86 ns = time * (threshold - ycnt) / ycnt + 100 * 1000;
87 if (ns > max_time)
88 ns = max_time;
89 hrtimer_forward(timer, timer->base->get_time(), ns_to_ktime(ns));
90 ret = HRTIMER_RESTART;
91 } else {
92 v4l2_dbg(3, rkispp_debug, &stream->isppdev->v4l2_dev,
93 "%s seq:%d line:%d ycnt:%d time:%dus\n",
94 __func__, seq, vdev->wait_line, ycnt * 128, time / 1000);
95 for (i = 0; i < dev->stream_max; i++) {
96 stream = &vdev->stream[i];
97 if (!stream->streaming || !stream->is_cfg || stream->stopping)
98 continue;
99 rkispp_frame_end(stream, FRAME_WORK);
100 }
101 }
102 end:
103 return ret;
104 }
105
update_mi(struct rkispp_stream * stream)106 static void update_mi(struct rkispp_stream *stream)
107 {
108 struct rkispp_device *dev = stream->isppdev;
109 struct rkispp_dummy_buffer *dummy_buf;
110 u32 val;
111
112 if (stream->curr_buf) {
113 val = stream->curr_buf->buff_addr[RKISPP_PLANE_Y];
114 set_y_addr(stream, val);
115 val = stream->curr_buf->buff_addr[RKISPP_PLANE_UV];
116 set_uv_addr(stream, val);
117 }
118
119 if (stream->type == STREAM_OUTPUT && !stream->curr_buf) {
120 dummy_buf = &dev->hw_dev->dummy_buf;
121 set_y_addr(stream, dummy_buf->dma_addr);
122 set_uv_addr(stream, dummy_buf->dma_addr);
123 }
124
125 v4l2_dbg(2, rkispp_debug, &stream->isppdev->v4l2_dev,
126 "%s stream:%d Y:0x%x UV:0x%x\n",
127 __func__, stream->id,
128 rkispp_read(dev, stream->config->reg.cur_y_base),
129 rkispp_read(dev, stream->config->reg.cur_uv_base));
130 }
131
is_en_done_early(struct rkispp_device * dev)132 static bool is_en_done_early(struct rkispp_device *dev)
133 {
134 u32 height = dev->ispp_sdev.out_fmt.height;
135 u32 line = dev->stream_vdev.wait_line;
136 bool en = false;
137
138 if (line) {
139 if (line > height - 128)
140 dev->stream_vdev.wait_line = height - 128;
141 en = true;
142 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
143 "wait %d line to wake up frame\n", line);
144 }
145
146 return en;
147 }
148
rkispp_tnr_complete(struct rkispp_device * dev,struct rkispp_tnr_inf * inf)149 static void rkispp_tnr_complete(struct rkispp_device *dev, struct rkispp_tnr_inf *inf)
150 {
151 struct rkispp_subdev *ispp_sdev = &dev->ispp_sdev;
152 struct v4l2_event ev = {
153 .type = RKISPP_V4L2_EVENT_TNR_COMPLETE,
154 };
155 struct rkispp_tnr_inf *tnr_inf;
156
157 tnr_inf = (struct rkispp_tnr_inf *)ev.u.data;
158 memcpy(tnr_inf, inf, sizeof(*tnr_inf));
159
160 v4l2_subdev_notify_event(&ispp_sdev->sd, &ev);
161 }
162
tnr_free_buf(struct rkispp_device * dev)163 static void tnr_free_buf(struct rkispp_device *dev)
164 {
165 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
166 struct rkisp_ispp_buf *dbufs;
167 struct list_head *list;
168 int i;
169
170 list = &vdev->tnr.list_rd;
171 if (vdev->tnr.cur_rd) {
172 list_add_tail(&vdev->tnr.cur_rd->list, list);
173 if (vdev->tnr.nxt_rd == vdev->tnr.cur_rd)
174 vdev->tnr.nxt_rd = NULL;
175 vdev->tnr.cur_rd = NULL;
176 }
177 if (vdev->tnr.nxt_rd) {
178 list_add_tail(&vdev->tnr.nxt_rd->list, list);
179 vdev->tnr.nxt_rd = NULL;
180 }
181 while (!list_empty(list)) {
182 dbufs = get_list_buf(list, true);
183 v4l2_subdev_call(dev->ispp_sdev.remote_sd,
184 video, s_rx_buffer, dbufs, NULL);
185 }
186
187 list = &vdev->tnr.list_wr;
188 if (vdev->tnr.cur_wr) {
189 list_add_tail(&vdev->tnr.cur_wr->list, list);
190 vdev->tnr.cur_wr = NULL;
191 }
192 while (!list_empty(list)) {
193 dbufs = get_list_buf(list, true);
194 kfree(dbufs);
195 }
196 list = &vdev->tnr.list_rpt;
197 while (!list_empty(list)) {
198 dbufs = get_list_buf(list, true);
199 kfree(dbufs);
200 }
201
202 for (i = 0; i < sizeof(vdev->tnr.buf) /
203 sizeof(struct rkispp_dummy_buffer); i++)
204 rkispp_free_buffer(dev, &vdev->tnr.buf.iir + i);
205
206 vdev->tnr.is_but_init = false;
207 vdev->tnr.is_trigger = false;
208 }
209
tnr_init_buf(struct rkispp_device * dev,u32 pic_size,u32 gain_size)210 static int tnr_init_buf(struct rkispp_device *dev,
211 u32 pic_size, u32 gain_size)
212 {
213 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
214 struct rkisp_ispp_buf *dbufs;
215 struct rkispp_dummy_buffer *buf;
216 int i, j, ret, cnt = RKISPP_BUF_MAX;
217 u32 buf_idx = 0;
218
219 if (dev->inp == INP_ISP && dev->isp_mode & ISP_ISPP_QUICK)
220 cnt = 1;
221 for (i = 0; i < cnt; i++) {
222 dbufs = kzalloc(sizeof(*dbufs), GFP_KERNEL);
223 if (!dbufs) {
224 ret = -ENOMEM;
225 goto err;
226 }
227 dbufs->is_isp = false;
228 for (j = 0; j < GROUP_BUF_MAX; j++) {
229 buf = &vdev->tnr.buf.wr[i][j];
230 buf->is_need_dbuf = true;
231 buf->is_need_dmafd = false;
232 buf->is_need_vaddr = true;
233 buf->size = !j ? pic_size : PAGE_ALIGN(gain_size);
234 buf->index = buf_idx++;
235 ret = rkispp_allow_buffer(dev, buf);
236 if (ret) {
237 kfree(dbufs);
238 goto err;
239 }
240 dbufs->dbuf[j] = buf->dbuf;
241 dbufs->didx[j] = buf->index;
242 }
243 list_add_tail(&dbufs->list, &vdev->tnr.list_wr);
244 }
245
246 if (dev->inp == INP_ISP && dev->isp_mode & ISP_ISPP_QUICK) {
247 buf = &vdev->tnr.buf.iir;
248 buf->size = pic_size;
249 ret = rkispp_allow_buffer(dev, buf);
250 if (ret < 0)
251 goto err;
252 }
253
254 buf = &vdev->tnr.buf.gain_kg;
255 buf->is_need_vaddr = true;
256 buf->is_need_dbuf = true;
257 buf->is_need_dmafd = false;
258 buf->size = PAGE_ALIGN(gain_size * 4);
259 buf->index = buf_idx++;
260 ret = rkispp_allow_buffer(dev, buf);
261 if (ret < 0)
262 goto err;
263
264 vdev->tnr.is_but_init = true;
265 return 0;
266 err:
267 tnr_free_buf(dev);
268 v4l2_err(&dev->v4l2_dev, "%s failed\n", __func__);
269 return ret;
270 }
271
config_tnr(struct rkispp_device * dev)272 static int config_tnr(struct rkispp_device *dev)
273 {
274 struct rkispp_hw_dev *hw = dev->hw_dev;
275 struct rkispp_stream_vdev *vdev;
276 struct rkispp_stream *stream = NULL;
277 int ret, mult = 1;
278 u32 width, height, fmt;
279 u32 pic_size, gain_size;
280 u32 addr_offs, w, h, val;
281 u32 max_w, max_h;
282
283 vdev = &dev->stream_vdev;
284 vdev->tnr.is_end = true;
285 vdev->tnr.is_3to1 =
286 ((vdev->module_ens & ISPP_MODULE_TNR_3TO1) ==
287 ISPP_MODULE_TNR_3TO1);
288 if (!(vdev->module_ens & ISPP_MODULE_TNR))
289 return 0;
290
291 if (dev->inp == INP_DDR) {
292 vdev->tnr.is_3to1 = false;
293 stream = &vdev->stream[STREAM_II];
294 fmt = stream->out_cap_fmt.wr_fmt;
295 } else {
296 fmt = dev->isp_mode & (FMT_YUV422 | FMT_FBC);
297 }
298
299 width = dev->ispp_sdev.out_fmt.width;
300 height = dev->ispp_sdev.out_fmt.height;
301 max_w = hw->max_in.w ? hw->max_in.w : width;
302 max_h = hw->max_in.h ? hw->max_in.h : height;
303 w = (fmt & FMT_FBC) ? ALIGN(max_w, 16) : max_w;
304 h = (fmt & FMT_FBC) ? ALIGN(max_h, 16) : max_h;
305 addr_offs = (fmt & FMT_FBC) ? w * h >> 4 : w * h;
306 pic_size = (fmt & FMT_YUV422) ? w * h * 2 : w * h * 3 >> 1;
307 vdev->tnr.uv_offset = addr_offs;
308 if (fmt & FMT_FBC)
309 pic_size += w * h >> 4;
310
311 gain_size = ALIGN(width, 64) * ALIGN(height, 128) >> 4;
312 if (fmt & FMT_YUYV)
313 mult = 2;
314
315 if (vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)) {
316 ret = tnr_init_buf(dev, pic_size, gain_size);
317 if (ret)
318 return ret;
319 if (dev->inp == INP_ISP &&
320 dev->isp_mode & ISP_ISPP_QUICK) {
321 rkispp_set_bits(dev, RKISPP_CTRL_QUICK,
322 GLB_QUICK_MODE_MASK,
323 GLB_QUICK_MODE(0));
324
325 val = hw->pool[0].dma[GROUP_BUF_PIC];
326 rkispp_write(dev, RKISPP_TNR_CUR_Y_BASE, val);
327 rkispp_write(dev, RKISPP_TNR_CUR_UV_BASE, val + addr_offs);
328
329 val = hw->pool[0].dma[GROUP_BUF_GAIN];
330 rkispp_write(dev, RKISPP_TNR_GAIN_CUR_Y_BASE, val);
331
332 if (vdev->tnr.is_3to1) {
333 val = hw->pool[1].dma[GROUP_BUF_PIC];
334 rkispp_write(dev, RKISPP_TNR_NXT_Y_BASE, val);
335 rkispp_write(dev, RKISPP_TNR_NXT_UV_BASE, val + addr_offs);
336 val = hw->pool[1].dma[GROUP_BUF_GAIN];
337 rkispp_write(dev, RKISPP_TNR_GAIN_NXT_Y_BASE, val);
338 }
339 }
340
341 val = vdev->tnr.buf.gain_kg.dma_addr;
342 rkispp_write(dev, RKISPP_TNR_GAIN_KG_Y_BASE, val);
343
344 val = vdev->tnr.buf.wr[0][GROUP_BUF_PIC].dma_addr;
345 rkispp_write(dev, RKISPP_TNR_WR_Y_BASE, val);
346 rkispp_write(dev, RKISPP_TNR_WR_UV_BASE, val + addr_offs);
347 if (vdev->tnr.buf.iir.mem_priv)
348 val = vdev->tnr.buf.iir.dma_addr;
349 rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE, val);
350 rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE, val + addr_offs);
351
352 val = vdev->tnr.buf.wr[0][GROUP_BUF_GAIN].dma_addr;
353 rkispp_write(dev, RKISPP_TNR_GAIN_WR_Y_BASE, val);
354
355 rkispp_write(dev, RKISPP_TNR_WR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
356 rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_WR_MASK, fmt << 4 | SW_TNR_1ST_FRM);
357 }
358
359 if (stream) {
360 stream->config->frame_end_id = TNR_INT;
361 stream->config->reg.cur_y_base = RKISPP_TNR_CUR_Y_BASE;
362 stream->config->reg.cur_uv_base = RKISPP_TNR_CUR_UV_BASE;
363 stream->config->reg.cur_y_base_shd = RKISPP_TNR_CUR_Y_BASE_SHD;
364 stream->config->reg.cur_uv_base_shd = RKISPP_TNR_CUR_UV_BASE_SHD;
365 }
366
367 rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_RD_MASK, fmt);
368 if (fmt & FMT_FBC) {
369 rkispp_write(dev, RKISPP_TNR_CUR_VIR_STRIDE, 0);
370 rkispp_write(dev, RKISPP_TNR_IIR_VIR_STRIDE, 0);
371 rkispp_write(dev, RKISPP_TNR_NXT_VIR_STRIDE, 0);
372 } else {
373 rkispp_write(dev, RKISPP_TNR_CUR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
374 rkispp_write(dev, RKISPP_TNR_IIR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
375 rkispp_write(dev, RKISPP_TNR_NXT_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
376 }
377 rkispp_set_bits(dev, RKISPP_TNR_CORE_CTRL, SW_TNR_MODE,
378 vdev->tnr.is_3to1 ? SW_TNR_MODE : 0);
379 rkispp_write(dev, RKISPP_TNR_GAIN_CUR_VIR_STRIDE, ALIGN(width, 64) >> 4);
380 rkispp_write(dev, RKISPP_TNR_GAIN_NXT_VIR_STRIDE, ALIGN(width, 64) >> 4);
381 rkispp_write(dev, RKISPP_TNR_GAIN_KG_VIR_STRIDE, ALIGN(width, 16) * 6);
382 rkispp_write(dev, RKISPP_TNR_GAIN_WR_VIR_STRIDE, ALIGN(width, 64) >> 4);
383 rkispp_write(dev, RKISPP_CTRL_TNR_SIZE, height << 16 | width);
384
385 if (vdev->monitor.is_en) {
386 init_completion(&vdev->monitor.tnr.cmpl);
387 schedule_work(&vdev->monitor.tnr.work);
388 }
389 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
390 "%s size:%dx%d ctrl:0x%x core_ctrl:0x%x\n",
391 __func__, width, height,
392 rkispp_read(dev, RKISPP_TNR_CTRL),
393 rkispp_read(dev, RKISPP_TNR_CORE_CTRL));
394 return 0;
395 }
396
nr_free_buf(struct rkispp_device * dev)397 static void nr_free_buf(struct rkispp_device *dev)
398 {
399 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
400 struct rkisp_ispp_buf *dbufs;
401 struct list_head *list;
402 int i;
403
404 list = &vdev->nr.list_rd;
405 if (vdev->nr.cur_rd) {
406 list_add_tail(&vdev->nr.cur_rd->list, list);
407 vdev->nr.cur_rd = NULL;
408 }
409 while (!list_empty(list)) {
410 dbufs = get_list_buf(list, true);
411 if (dbufs->is_isp)
412 v4l2_subdev_call(dev->ispp_sdev.remote_sd,
413 video, s_rx_buffer, dbufs, NULL);
414 else
415 kfree(dbufs);
416 }
417
418 list = &vdev->nr.list_wr;
419 if (vdev->nr.cur_wr)
420 vdev->nr.cur_wr = NULL;
421 while (!list_empty(list))
422 get_list_buf(list, false);
423
424 for (i = 0; i < sizeof(vdev->nr.buf) /
425 sizeof(struct rkispp_dummy_buffer); i++)
426 rkispp_free_buffer(dev, &vdev->nr.buf.tmp_yuv + i);
427 }
428
nr_init_buf(struct rkispp_device * dev,u32 size)429 static int nr_init_buf(struct rkispp_device *dev, u32 size)
430 {
431 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
432 struct rkispp_dummy_buffer *buf;
433 int i, ret, cnt = 0;
434
435 if (vdev->module_ens & ISPP_MODULE_FEC)
436 cnt = vdev->is_done_early ? 1 : RKISPP_BUF_MAX;
437
438 for (i = 0; i < cnt; i++) {
439 buf = &vdev->nr.buf.wr[i];
440 buf->size = size;
441 ret = rkispp_allow_buffer(dev, buf);
442 if (ret)
443 goto err;
444 list_add_tail(&buf->list, &vdev->nr.list_wr);
445 }
446
447 buf = &vdev->nr.buf.tmp_yuv;
448 cnt = DIV_ROUND_UP(dev->ispp_sdev.out_fmt.width, 32);
449 buf->size = PAGE_ALIGN(cnt * 42 * 32);
450 ret = rkispp_allow_buffer(dev, buf);
451 if (ret)
452 goto err;
453 return 0;
454 err:
455 nr_free_buf(dev);
456 v4l2_err(&dev->v4l2_dev, "%s failed\n", __func__);
457 return ret;
458 }
459
config_nr_shp(struct rkispp_device * dev)460 static int config_nr_shp(struct rkispp_device *dev)
461 {
462 struct rkispp_hw_dev *hw = dev->hw_dev;
463 struct rkispp_stream_vdev *vdev;
464 struct rkispp_stream *stream = NULL;
465 u32 width, height, fmt;
466 u32 pic_size, addr_offs;
467 u32 w, h, val;
468 u32 max_w, max_h;
469 int ret, mult = 1;
470
471 vdev = &dev->stream_vdev;
472 vdev->nr.is_end = true;
473 if (!(vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)))
474 return 0;
475
476 vdev->is_done_early = is_en_done_early(dev);
477
478 if (dev->inp == INP_DDR) {
479 stream = &vdev->stream[STREAM_II];
480 fmt = stream->out_cap_fmt.wr_fmt;
481 } else {
482 fmt = dev->isp_mode & (FMT_YUV422 | FMT_FBC);
483 }
484
485 width = dev->ispp_sdev.out_fmt.width;
486 height = dev->ispp_sdev.out_fmt.height;
487 w = width;
488 h = height;
489 max_w = hw->max_in.w ? hw->max_in.w : w;
490 max_h = hw->max_in.h ? hw->max_in.h : h;
491 if (fmt & FMT_FBC) {
492 max_w = ALIGN(max_w, 16);
493 max_h = ALIGN(max_h, 16);
494 w = ALIGN(w, 16);
495 h = ALIGN(h, 16);
496 }
497 addr_offs = (fmt & FMT_FBC) ? max_w * max_h >> 4 : max_w * max_h;
498 pic_size = (fmt & FMT_YUV422) ? w * h * 2 : w * h * 3 >> 1;
499 vdev->nr.uv_offset = addr_offs;
500
501 if (fmt & FMT_YUYV)
502 mult = 2;
503
504 ret = nr_init_buf(dev, pic_size);
505 if (ret)
506 return ret;
507
508 if (vdev->module_ens & ISPP_MODULE_TNR) {
509 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y,
510 rkispp_read(dev, RKISPP_TNR_WR_Y_BASE));
511 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV,
512 rkispp_read(dev, RKISPP_TNR_WR_UV_BASE));
513 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN,
514 rkispp_read(dev, RKISPP_TNR_GAIN_WR_Y_BASE));
515 rkispp_set_bits(dev, RKISPP_CTRL_QUICK, 0, GLB_NR_SD32_TNR);
516 } else {
517 /* tnr need to set same format with nr in the fbc mode */
518 rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_RD_MASK, fmt);
519 rkispp_write(dev, RKISPP_CTRL_TNR_SIZE, height << 16 | width);
520 if (dev->inp == INP_ISP) {
521 if (dev->isp_mode & ISP_ISPP_QUICK)
522 rkispp_set_bits(dev, RKISPP_CTRL_QUICK,
523 GLB_QUICK_MODE_MASK,
524 GLB_QUICK_MODE(2));
525 else
526 rkispp_set_bits(dev, RKISPP_NR_UVNR_CTRL_PARA,
527 0, SW_UVNR_SD32_SELF_EN);
528
529 val = hw->pool[0].dma[GROUP_BUF_PIC];
530 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
531 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val + addr_offs);
532 val = hw->pool[0].dma[GROUP_BUF_GAIN];
533 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN, val);
534 rkispp_clear_bits(dev, RKISPP_CTRL_QUICK, GLB_NR_SD32_TNR);
535 } else if (stream) {
536 stream->config->frame_end_id = NR_INT;
537 stream->config->reg.cur_y_base = RKISPP_NR_ADDR_BASE_Y;
538 stream->config->reg.cur_uv_base = RKISPP_NR_ADDR_BASE_UV;
539 stream->config->reg.cur_y_base_shd = RKISPP_NR_ADDR_BASE_Y_SHD;
540 stream->config->reg.cur_uv_base_shd = RKISPP_NR_ADDR_BASE_UV_SHD;
541 }
542 }
543
544 rkispp_clear_bits(dev, RKISPP_CTRL_QUICK, GLB_FEC2SCL_EN);
545 if (vdev->module_ens & ISPP_MODULE_FEC) {
546 addr_offs = width * height;
547 vdev->fec.uv_offset = addr_offs;
548 val = vdev->nr.buf.wr[0].dma_addr;
549 rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
550 rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val + addr_offs);
551 rkispp_write(dev, RKISPP_SHARP_WR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
552 rkispp_set_bits(dev, RKISPP_SHARP_CTRL, SW_SHP_WR_FORMAT_MASK, fmt & (~FMT_FBC));
553
554 rkispp_write(dev, RKISPP_FEC_RD_Y_BASE, val);
555 rkispp_write(dev, RKISPP_FEC_RD_UV_BASE, val + addr_offs);
556 } else {
557 stream = &vdev->stream[STREAM_MB];
558 if (!stream->streaming) {
559 val = hw->dummy_buf.dma_addr;
560 rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
561 rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val);
562 rkispp_write(dev, RKISPP_SHARP_WR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
563 if (dev->inp == INP_ISP)
564 rkispp_set_bits(dev, RKISPP_SHARP_CTRL,
565 SW_SHP_WR_FORMAT_MASK, FMT_FBC);
566 }
567 }
568
569 val = vdev->nr.buf.tmp_yuv.dma_addr;
570 rkispp_write(dev, RKISPP_SHARP_TMP_YUV_BASE, val);
571
572 /* fix to use new nr algorithm */
573 rkispp_set_bits(dev, RKISPP_NR_CTRL, NR_NEW_ALGO, NR_NEW_ALGO);
574 rkispp_set_bits(dev, RKISPP_NR_CTRL, FMT_RD_MASK, fmt);
575 if (fmt & FMT_FBC) {
576 rkispp_write(dev, RKISPP_NR_VIR_STRIDE, 0);
577 rkispp_write(dev, RKISPP_FBC_VIR_HEIGHT, max_h);
578 } else {
579 rkispp_write(dev, RKISPP_NR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
580 }
581 rkispp_write(dev, RKISPP_NR_VIR_STRIDE_GAIN, ALIGN(width, 64) >> 4);
582 rkispp_write(dev, RKISPP_CTRL_SIZE, height << 16 | width);
583
584 if (vdev->monitor.is_en) {
585 init_completion(&vdev->monitor.nr.cmpl);
586 schedule_work(&vdev->monitor.nr.work);
587 }
588 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
589 "%s size:%dx%d\n"
590 "nr ctrl:0x%x ctrl_para:0x%x\n"
591 "shp ctrl:0x%x core_ctrl:0x%x\n",
592 __func__, width, height,
593 rkispp_read(dev, RKISPP_NR_CTRL),
594 rkispp_read(dev, RKISPP_NR_UVNR_CTRL_PARA),
595 rkispp_read(dev, RKISPP_SHARP_CTRL),
596 rkispp_read(dev, RKISPP_SHARP_CORE_CTRL));
597 return 0;
598 }
599
fec_free_buf(struct rkispp_device * dev)600 static void fec_free_buf(struct rkispp_device *dev)
601 {
602 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
603 struct list_head *list = &vdev->fec.list_rd;
604
605 if (vdev->fec.cur_rd)
606 vdev->fec.cur_rd = NULL;
607 while (!list_empty(list))
608 get_list_buf(list, false);
609 }
610
config_fec(struct rkispp_device * dev)611 static int config_fec(struct rkispp_device *dev)
612 {
613 struct rkispp_stream_vdev *vdev;
614 struct rkispp_stream *stream = NULL;
615 u32 width, height, fmt, mult = 1;
616
617 vdev = &dev->stream_vdev;
618 vdev->fec.is_end = true;
619 if (!(vdev->module_ens & ISPP_MODULE_FEC))
620 return 0;
621
622 if (dev->inp == INP_DDR) {
623 stream = &vdev->stream[STREAM_II];
624 fmt = stream->out_cap_fmt.wr_fmt;
625 } else {
626 fmt = dev->isp_mode & FMT_YUV422;
627 }
628
629 width = dev->ispp_sdev.out_fmt.width;
630 height = dev->ispp_sdev.out_fmt.height;
631
632 if (vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)) {
633 rkispp_write(dev, RKISPP_FEC_RD_Y_BASE,
634 rkispp_read(dev, RKISPP_SHARP_WR_Y_BASE));
635 rkispp_write(dev, RKISPP_FEC_RD_UV_BASE,
636 rkispp_read(dev, RKISPP_SHARP_WR_UV_BASE));
637 } else if (stream) {
638 stream->config->frame_end_id = FEC_INT;
639 stream->config->reg.cur_y_base = RKISPP_FEC_RD_Y_BASE;
640 stream->config->reg.cur_uv_base = RKISPP_FEC_RD_UV_BASE;
641 stream->config->reg.cur_y_base_shd = RKISPP_FEC_RD_Y_BASE_SHD;
642 stream->config->reg.cur_uv_base_shd = RKISPP_FEC_RD_UV_BASE_SHD;
643 }
644
645 if (fmt & FMT_YUYV)
646 mult = 2;
647 rkispp_set_bits(dev, RKISPP_FEC_CTRL, FMT_RD_MASK, fmt);
648 rkispp_write(dev, RKISPP_FEC_RD_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
649 rkispp_write(dev, RKISPP_FEC_DST_SIZE, height << 16 | width);
650 rkispp_set_bits(dev, RKISPP_CTRL_QUICK, 0, GLB_FEC2SCL_EN);
651
652 if (vdev->monitor.is_en) {
653 init_completion(&vdev->monitor.fec.cmpl);
654 schedule_work(&vdev->monitor.fec.work);
655 }
656 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
657 "%s size:%dx%d ctrl:0x%x core_ctrl:0x%x\n",
658 __func__, width, height,
659 rkispp_read(dev, RKISPP_FEC_CTRL),
660 rkispp_read(dev, RKISPP_FEC_CORE_CTRL));
661 return 0;
662 }
663
config_modules(struct rkispp_device * dev)664 static int config_modules(struct rkispp_device *dev)
665 {
666 int ret;
667
668 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
669 "stream module ens:0x%x\n", dev->stream_vdev.module_ens);
670 dev->stream_vdev.monitor.monitoring_module = 0;
671 dev->stream_vdev.monitor.restart_module = 0;
672 dev->stream_vdev.monitor.is_restart = false;
673 dev->stream_vdev.monitor.retry = 0;
674 dev->stream_vdev.monitor.is_en = rkispp_monitor;
675 init_completion(&dev->stream_vdev.monitor.cmpl);
676
677 ret = config_tnr(dev);
678 if (ret < 0)
679 return ret;
680
681 ret = config_nr_shp(dev);
682 if (ret < 0)
683 goto free_tnr;
684
685 ret = config_fec(dev);
686 if (ret < 0)
687 goto free_nr;
688
689 /* config default params */
690 dev->params_vdev.params_ops->rkispp_params_cfg(&dev->params_vdev, 0);
691 return 0;
692 free_nr:
693 nr_free_buf(dev);
694 free_tnr:
695 tnr_free_buf(dev);
696 return ret;
697 }
698
rkispp_destroy_buf(struct rkispp_stream * stream)699 static void rkispp_destroy_buf(struct rkispp_stream *stream)
700 {
701 struct rkispp_device *dev = stream->isppdev;
702 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
703
704 if (atomic_read(&vdev->refcnt) == 1) {
705 vdev->irq_ends = 0;
706 tnr_free_buf(dev);
707 nr_free_buf(dev);
708 fec_free_buf(dev);
709 rkispp_event_handle(dev, CMD_FREE_POOL, NULL);
710 }
711 }
712
713
nr_work_event(struct rkispp_device * dev,struct rkisp_ispp_buf * buf_rd,struct rkispp_dummy_buffer * buf_wr,bool is_isr)714 static void nr_work_event(struct rkispp_device *dev,
715 struct rkisp_ispp_buf *buf_rd,
716 struct rkispp_dummy_buffer *buf_wr,
717 bool is_isr)
718 {
719 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
720 struct rkispp_stream *stream = &vdev->stream[STREAM_II];
721 struct rkispp_monitor *monitor = &vdev->monitor;
722 void __iomem *base = dev->hw_dev->base_addr;
723 struct rkispp_dummy_buffer *buf_to_fec = NULL;
724 struct rkispp_dummy_buffer *dummy;
725 struct rkispp_buffer *inbuf;
726 struct v4l2_subdev *sd = NULL;
727 struct list_head *list;
728 struct dma_buf *dbuf;
729 unsigned long lock_flags = 0, lock_flags1 = 0;
730 bool is_start = false, is_quick = false;
731 bool is_fec_en = (vdev->module_ens & ISPP_MODULE_FEC);
732 struct rkisp_ispp_reg *reg_buf = NULL;
733 u32 val;
734
735 if (!(vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)))
736 return;
737
738 if (dev->inp == INP_ISP) {
739 if (dev->isp_mode & ISP_ISPP_QUICK)
740 is_quick = true;
741 else
742 sd = dev->ispp_sdev.remote_sd;
743 }
744
745 spin_lock_irqsave(&vdev->nr.buf_lock, lock_flags);
746
747 /* event from nr frame end */
748 if (!buf_rd && !buf_wr && is_isr) {
749 vdev->nr.is_end = true;
750
751 if (vdev->nr.cur_rd) {
752 /* nr read buf return to isp or tnr */
753 if (vdev->nr.cur_rd->is_isp && sd) {
754 v4l2_subdev_call(sd, video, s_rx_buffer, vdev->nr.cur_rd, NULL);
755 } else if (!vdev->nr.cur_rd->priv) {
756 rkispp_module_work_event(dev, NULL, vdev->nr.cur_rd,
757 ISPP_MODULE_TNR, is_isr);
758 } else if (stream->streaming && vdev->nr.cur_rd->priv) {
759 inbuf = vdev->nr.cur_rd->priv;
760 vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
761 }
762 vdev->nr.cur_rd = NULL;
763 }
764
765 if (vdev->nr.cur_wr) {
766 /* nr write buf to fec */
767 buf_to_fec = vdev->nr.cur_wr;
768 vdev->nr.cur_wr = NULL;
769
770 if (vdev->is_done_early && !dev->hw_dev->is_first)
771 buf_to_fec = NULL;
772 }
773 }
774
775 if (!vdev->fec.is_end) {
776 if (buf_rd)
777 list_add_tail(&buf_rd->list, &vdev->nr.list_rd);
778 goto end;
779 }
780
781 spin_lock_irqsave(&monitor->lock, lock_flags1);
782 if (monitor->is_restart) {
783 if (buf_rd)
784 list_add_tail(&buf_rd->list, &vdev->nr.list_rd);
785 if (buf_wr)
786 list_add_tail(&buf_wr->list, &vdev->nr.list_wr);
787 goto restart_unlock;
788 }
789
790 list = &vdev->nr.list_rd;
791 if (buf_rd && vdev->nr.is_end && list_empty(list)) {
792 /* nr read buf from isp or tnr */
793 vdev->nr.cur_rd = buf_rd;
794 } else if (vdev->nr.is_end && !list_empty(list)) {
795 /* nr read buf from list
796 * nr processing slow than isp or tnr
797 * new read buf from isp or tnr into list
798 */
799 vdev->nr.cur_rd = get_list_buf(list, true);
800 if (buf_rd)
801 list_add_tail(&buf_rd->list, list);
802 } else if (!vdev->nr.is_end && buf_rd) {
803 /* nr no idle
804 * new read buf from isp or tnr into list
805 */
806 list_add_tail(&buf_rd->list, list);
807 }
808
809 list = &vdev->nr.list_wr;
810 if (vdev->nr.is_end && !vdev->nr.cur_wr) {
811 /* nr idle, get new write buf */
812 vdev->nr.cur_wr = buf_wr ? buf_wr :
813 get_list_buf(list, false);
814 } else if (buf_wr) {
815 /* tnr no idle, write buf from nr into list */
816 list_add_tail(&buf_wr->list, list);
817 }
818
819 if (vdev->nr.cur_rd && vdev->nr.is_end) {
820 if (vdev->nr.cur_rd->priv) {
821 inbuf = vdev->nr.cur_rd->priv;
822 val = inbuf->buff_addr[RKISPP_PLANE_Y];
823 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
824 val = inbuf->buff_addr[RKISPP_PLANE_UV];
825 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val);
826 } else if (!vdev->nr.cur_rd->is_isp) {
827 u32 size = sizeof(vdev->tnr.buf) / sizeof(*dummy);
828
829 dbuf = vdev->nr.cur_rd->dbuf[GROUP_BUF_PIC];
830 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
831 val = dummy->dma_addr;
832 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
833 val += vdev->nr.uv_offset;
834 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val);
835
836 dbuf = vdev->nr.cur_rd->dbuf[GROUP_BUF_GAIN];
837 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
838 val = dummy->dma_addr;
839 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN, val);
840 } else {
841 struct rkispp_isp_buf_pool *buf;
842
843 buf = get_pool_buf(dev, vdev->nr.cur_rd);
844 val = buf->dma[GROUP_BUF_PIC];
845 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
846 val += vdev->nr.uv_offset;
847 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val);
848
849 val = buf->dma[GROUP_BUF_GAIN];
850 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN, val);
851 }
852 is_start = true;
853 }
854
855 if (vdev->nr.is_end && is_quick)
856 is_start = true;
857
858 if (vdev->nr.cur_wr && is_start) {
859 dummy = vdev->nr.cur_wr;
860 val = dummy->dma_addr;
861 rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
862 val += vdev->fec.uv_offset;
863 rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val);
864 }
865
866 if (is_start) {
867 u32 seq = 0;
868 u64 timestamp = 0;
869
870 if (vdev->nr.cur_rd) {
871 seq = vdev->nr.cur_rd->frame_id;
872 timestamp = vdev->nr.cur_rd->frame_timestamp;
873 if (vdev->nr.cur_wr) {
874 vdev->nr.cur_wr->id = seq;
875 vdev->nr.cur_wr->timestamp = timestamp;
876 } else {
877 vdev->nr.buf.wr[0].id = seq;
878 vdev->nr.buf.wr[0].timestamp = timestamp;
879 }
880 if (!is_fec_en && !is_quick) {
881 dev->ispp_sdev.frame_timestamp = timestamp;
882 dev->ispp_sdev.frm_sync_seq = seq;
883 }
884 }
885
886 /* check MB config and output buf beforce start, when MB connect to SHARP
887 * MB update by OTHER_FORCE_UPD
888 */
889 stream = &vdev->stream[STREAM_MB];
890 if (!is_fec_en && stream->streaming) {
891 if (!stream->is_cfg) {
892 secure_config_mb(stream);
893 } else if (!stream->curr_buf) {
894 get_stream_buf(stream);
895 if (stream->curr_buf)
896 vdev->stream_ops->update_mi(stream);
897 }
898 }
899
900 /* check SCL output buf beforce start
901 * SCL update by OTHER_FORCE_UPD
902 */
903 for (val = STREAM_S0; val <= STREAM_S2; val++) {
904 stream = &vdev->stream[val];
905 if (!stream->streaming || !stream->is_cfg || stream->curr_buf)
906 continue;
907 get_stream_buf(stream);
908 if (stream->curr_buf) {
909 vdev->stream_ops->update_mi(stream);
910 rkispp_set_bits(dev, stream->config->reg.ctrl, 0, SW_SCL_ENABLE);
911 } else {
912 rkispp_clear_bits(dev, stream->config->reg.ctrl, SW_SCL_ENABLE);
913 }
914 }
915
916 if (!dev->hw_dev->is_single) {
917 if (vdev->nr.cur_rd &&
918 (vdev->nr.cur_rd->is_isp || vdev->nr.cur_rd->priv)) {
919 rkispp_update_regs(dev, RKISPP_CTRL, RKISPP_TNR_CTRL);
920 writel(TNR_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
921 }
922 rkispp_update_regs(dev, RKISPP_NR, RKISPP_ORB_MAX_FEATURE);
923 }
924
925 writel(OTHER_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
926
927 val = readl(base + RKISPP_SHARP_CORE_CTRL);
928 if (!(val & SW_SHP_EN) && !is_fec_en && !stream->streaming)
929 writel(val | SW_SHP_DMA_DIS, base + RKISPP_SHARP_CORE_CTRL);
930 else if (val & SW_SHP_EN)
931 writel(val & ~SW_SHP_DMA_DIS, base + RKISPP_SHARP_CORE_CTRL);
932
933 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
934 "NR start seq:%d | Y_SHD rd:0x%x wr:0x%x\n",
935 seq, readl(base + RKISPP_NR_ADDR_BASE_Y_SHD),
936 readl(base + RKISPP_SHARP_WR_Y_BASE_SHD));
937
938 for (val = STREAM_S0; val <= STREAM_S2 && !is_fec_en; val++) {
939 stream = &vdev->stream[val];
940 /* check scale stream stop state */
941 if (stream->streaming && stream->stopping) {
942 if (stream->ops->is_stopped(stream)) {
943 stream->stopping = false;
944 stream->streaming = false;
945 wake_up(&stream->done);
946 } else {
947 stream->ops->stop(stream);
948 }
949 }
950 }
951
952 vdev->nr.dbg.id = seq;
953 vdev->nr.dbg.timestamp = ktime_get_ns();
954 if (monitor->is_en) {
955 monitor->nr.time = vdev->nr.dbg.interval / 1000 / 1000;
956 monitor->monitoring_module |= MONITOR_NR;
957 monitor->nr.is_err = false;
958 if (!completion_done(&monitor->nr.cmpl))
959 complete(&monitor->nr.cmpl);
960 }
961
962 if (rkispp_is_reg_withstream_global())
963 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id, seq);
964 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_NR)) {
965 u32 offset, size;
966
967 offset = reg_buf->reg_size;
968 size = 4 + RKISPP_NR_BUFFER_READY - RKISPP_NR_CTRL;
969 reg_buf->ispp_size[ISPP_ID_NR] = size;
970 reg_buf->ispp_offset[ISPP_ID_NR] = offset;
971 memcpy_fromio(®_buf->reg[offset], base + RKISPP_NR_CTRL, size);
972
973 offset += size;
974 reg_buf->reg_size = offset;
975 }
976 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_SHP)) {
977 u32 offset, size;
978
979 offset = reg_buf->reg_size;
980 size = 4 + RKISPP_SHARP_GRAD_RATIO - RKISPP_SHARP_CTRL;
981 reg_buf->ispp_size[ISPP_ID_SHP] = size;
982 reg_buf->ispp_offset[ISPP_ID_SHP] = offset;
983 memcpy_fromio(®_buf->reg[offset], base + RKISPP_SHARP_CTRL, size);
984
985 offset += size;
986 reg_buf->reg_size = offset;
987 }
988 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_ORB)) {
989 u32 offset, size;
990
991 offset = reg_buf->reg_size;
992 size = 4 + RKISPP_ORB_MAX_FEATURE - RKISPP_ORB_WR_BASE;
993 reg_buf->ispp_size[ISPP_ID_ORB] = size;
994 reg_buf->ispp_offset[ISPP_ID_ORB] = offset;
995 memcpy_fromio(®_buf->reg[offset], base + RKISPP_ORB_WR_BASE, size);
996
997 offset += size;
998 reg_buf->reg_size = offset;
999 }
1000
1001 if (!is_quick && !dev->hw_dev->is_shutdown) {
1002 writel(NR_SHP_ST, base + RKISPP_CTRL_STRT);
1003
1004 if (!is_fec_en && vdev->is_done_early)
1005 hrtimer_start(&vdev->frame_qst,
1006 ns_to_ktime(1000000),
1007 HRTIMER_MODE_REL);
1008 }
1009 vdev->nr.is_end = false;
1010 }
1011 restart_unlock:
1012 spin_unlock_irqrestore(&monitor->lock, lock_flags1);
1013 end:
1014 /* nr_shp->fec->scl
1015 * fec start working should after nr
1016 * for scl will update by OTHER_FORCE_UPD
1017 */
1018 if (buf_to_fec)
1019 rkispp_module_work_event(dev, buf_to_fec, NULL,
1020 ISPP_MODULE_FEC, is_isr);
1021 spin_unlock_irqrestore(&vdev->nr.buf_lock, lock_flags);
1022
1023 if (is_fec_en && vdev->is_done_early &&
1024 is_start && !dev->hw_dev->is_first)
1025 hrtimer_start(&vdev->fec_qst,
1026 ns_to_ktime(1000000),
1027 HRTIMER_MODE_REL);
1028 }
1029
tnr_work_event(struct rkispp_device * dev,struct rkisp_ispp_buf * buf_rd,struct rkisp_ispp_buf * buf_wr,bool is_isr)1030 static void tnr_work_event(struct rkispp_device *dev,
1031 struct rkisp_ispp_buf *buf_rd,
1032 struct rkisp_ispp_buf *buf_wr,
1033 bool is_isr)
1034 {
1035 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1036 struct rkispp_stream *stream = &vdev->stream[STREAM_II];
1037 struct rkispp_monitor *monitor = &vdev->monitor;
1038 void __iomem *base = dev->hw_dev->base_addr;
1039 struct rkispp_dummy_buffer *dummy;
1040 struct rkispp_buffer *inbuf;
1041 struct v4l2_subdev *sd = NULL;
1042 struct list_head *list;
1043 struct dma_buf *dbuf;
1044 unsigned long lock_flags = 0, lock_flags1 = 0;
1045 u32 val, size = sizeof(vdev->tnr.buf) / sizeof(*dummy);
1046 bool is_3to1 = vdev->tnr.is_3to1, is_start = false;
1047 bool is_en = rkispp_read(dev, RKISPP_TNR_CORE_CTRL) & SW_TNR_EN;
1048 struct rkisp_ispp_reg *reg_buf = NULL;
1049
1050 if (!(vdev->module_ens & ISPP_MODULE_TNR) ||
1051 (dev->inp == INP_ISP && dev->isp_mode & ISP_ISPP_QUICK))
1052 return;
1053
1054 if (dev->inp == INP_ISP)
1055 sd = dev->ispp_sdev.remote_sd;
1056
1057 spin_lock_irqsave(&vdev->tnr.buf_lock, lock_flags);
1058
1059 /* event from tnr frame end */
1060 if (!buf_rd && !buf_wr && is_isr) {
1061 vdev->tnr.is_end = true;
1062
1063 if (vdev->tnr.cur_rd) {
1064 /* tnr read buf return to isp */
1065 if (sd) {
1066 v4l2_subdev_call(sd, video, s_rx_buffer, vdev->tnr.cur_rd, NULL);
1067 } else if (stream->streaming && vdev->tnr.cur_rd->priv) {
1068 inbuf = vdev->tnr.cur_rd->priv;
1069 vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1070 }
1071 if (vdev->tnr.cur_rd == vdev->tnr.nxt_rd)
1072 vdev->tnr.nxt_rd = NULL;
1073 vdev->tnr.cur_rd = NULL;
1074 }
1075
1076 if (vdev->tnr.cur_wr) {
1077 struct rkispp_tnr_inf tnr_inf;
1078
1079 if (!vdev->tnr.cur_wr->is_move_judge || !vdev->tnr.is_trigger) {
1080 /* tnr write buf to nr */
1081 rkispp_module_work_event(dev, vdev->tnr.cur_wr, NULL,
1082 ISPP_MODULE_NR, is_isr);
1083 } else {
1084 tnr_inf.dev_id = dev->dev_id;
1085 tnr_inf.frame_id = vdev->tnr.cur_wr->frame_id;
1086 tnr_inf.gainkg_idx = vdev->tnr.buf.gain_kg.index;
1087 tnr_inf.gainwr_idx = vdev->tnr.cur_wr->didx[GROUP_BUF_GAIN];
1088 tnr_inf.gainkg_size = vdev->tnr.buf.gain_kg.size;
1089 dbuf = vdev->tnr.cur_wr->dbuf[GROUP_BUF_GAIN];
1090 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
1091 tnr_inf.gainwr_size = dummy->size;
1092 rkispp_finish_buffer(dev, dummy);
1093 rkispp_finish_buffer(dev, &vdev->tnr.buf.gain_kg);
1094 rkispp_tnr_complete(dev, &tnr_inf);
1095 list_add_tail(&vdev->tnr.cur_wr->list, &vdev->tnr.list_rpt);
1096 }
1097 vdev->tnr.cur_wr = NULL;
1098 }
1099 }
1100
1101 if (!is_en) {
1102 if (buf_wr)
1103 list_add_tail(&buf_wr->list, &vdev->tnr.list_wr);
1104
1105 if (vdev->tnr.nxt_rd) {
1106 if (sd) {
1107 v4l2_subdev_call(sd, video, s_rx_buffer,
1108 vdev->tnr.nxt_rd, NULL);
1109 } else if (stream->streaming && vdev->tnr.nxt_rd->priv) {
1110 inbuf = vdev->tnr.nxt_rd->priv;
1111 vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1112 }
1113 vdev->tnr.nxt_rd = NULL;
1114 }
1115 list = &vdev->tnr.list_rd;
1116 while (!list_empty(list)) {
1117 struct rkisp_ispp_buf *buf = get_list_buf(list, true);
1118
1119 rkispp_module_work_event(dev, buf, NULL,
1120 ISPP_MODULE_NR, is_isr);
1121 }
1122 if (buf_rd)
1123 rkispp_module_work_event(dev, buf_rd, NULL,
1124 ISPP_MODULE_NR, is_isr);
1125 goto end;
1126 }
1127
1128 spin_lock_irqsave(&monitor->lock, lock_flags1);
1129 if (monitor->is_restart) {
1130 if (buf_rd)
1131 list_add_tail(&buf_rd->list, &vdev->tnr.list_rd);
1132 if (buf_wr)
1133 list_add_tail(&buf_wr->list, &vdev->tnr.list_wr);
1134 goto restart_unlock;
1135 }
1136
1137 list = &vdev->tnr.list_rd;
1138 if (buf_rd && vdev->tnr.is_end && list_empty(list)) {
1139 /* tnr read buf from isp */
1140 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1141 vdev->tnr.nxt_rd = buf_rd;
1142 /* first buf for 3to1 using twice */
1143 if (!is_3to1 ||
1144 (rkispp_read(dev, RKISPP_TNR_CTRL) & SW_TNR_1ST_FRM))
1145 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1146 } else if (vdev->tnr.is_end && !list_empty(list)) {
1147 /* tnr read buf from list
1148 * tnr processing slow than isp
1149 * new read buf from isp into list
1150 */
1151 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1152 vdev->tnr.nxt_rd = get_list_buf(list, true);
1153 if (!is_3to1)
1154 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1155
1156 if (buf_rd)
1157 list_add_tail(&buf_rd->list, list);
1158 } else if (!vdev->tnr.is_end && buf_rd) {
1159 /* tnr no idle
1160 * new read buf from isp into list
1161 */
1162 list_add_tail(&buf_rd->list, list);
1163 }
1164
1165 list = &vdev->tnr.list_wr;
1166 if (vdev->tnr.is_end && !vdev->tnr.cur_wr) {
1167 /* tnr idle, get new write buf */
1168 vdev->tnr.cur_wr =
1169 buf_wr ? buf_wr : get_list_buf(list, true);
1170 } else if (buf_wr) {
1171 /* tnr no idle, write buf from nr into list */
1172 list_add_tail(&buf_wr->list, list);
1173 }
1174
1175 if (vdev->tnr.cur_rd && vdev->tnr.nxt_rd && vdev->tnr.is_end) {
1176 if (vdev->tnr.cur_rd->priv) {
1177 inbuf = vdev->tnr.cur_rd->priv;
1178 val = inbuf->buff_addr[RKISPP_PLANE_Y];
1179 rkispp_write(dev, RKISPP_TNR_CUR_Y_BASE, val);
1180 val = inbuf->buff_addr[RKISPP_PLANE_UV];
1181 rkispp_write(dev, RKISPP_TNR_CUR_UV_BASE, val);
1182 } else {
1183 struct rkispp_isp_buf_pool *buf;
1184
1185 buf = get_pool_buf(dev, vdev->tnr.cur_rd);
1186 val = buf->dma[GROUP_BUF_PIC];
1187 rkispp_write(dev, RKISPP_TNR_CUR_Y_BASE, val);
1188 val += vdev->tnr.uv_offset;
1189 rkispp_write(dev, RKISPP_TNR_CUR_UV_BASE, val);
1190
1191 val = buf->dma[GROUP_BUF_GAIN];
1192 rkispp_write(dev, RKISPP_TNR_GAIN_CUR_Y_BASE, val);
1193 if (is_3to1) {
1194 buf = get_pool_buf(dev, vdev->tnr.nxt_rd);
1195 val = buf->dma[GROUP_BUF_PIC];
1196 rkispp_write(dev, RKISPP_TNR_NXT_Y_BASE, val);
1197 val += vdev->tnr.uv_offset;
1198 rkispp_write(dev, RKISPP_TNR_NXT_UV_BASE, val);
1199
1200 val = buf->dma[GROUP_BUF_GAIN];
1201 rkispp_write(dev, RKISPP_TNR_GAIN_NXT_Y_BASE, val);
1202
1203 if (rkispp_read(dev, RKISPP_TNR_CTRL) & SW_TNR_1ST_FRM)
1204 vdev->tnr.cur_rd = NULL;
1205 }
1206 }
1207 is_start = true;
1208 }
1209
1210 if (vdev->tnr.cur_wr && is_start) {
1211 dbuf = vdev->tnr.cur_wr->dbuf[GROUP_BUF_PIC];
1212 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
1213 val = dummy->dma_addr;
1214 rkispp_write(dev, RKISPP_TNR_WR_Y_BASE, val);
1215 val += vdev->tnr.uv_offset;
1216 rkispp_write(dev, RKISPP_TNR_WR_UV_BASE, val);
1217
1218 dbuf = vdev->tnr.cur_wr->dbuf[GROUP_BUF_GAIN];
1219 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
1220 val = dummy->dma_addr;
1221 rkispp_write(dev, RKISPP_TNR_GAIN_WR_Y_BASE, val);
1222 }
1223
1224 if (is_start) {
1225 u32 seq = 0;
1226
1227 if (vdev->tnr.nxt_rd) {
1228 seq = vdev->tnr.nxt_rd->frame_id;
1229 if (vdev->tnr.cur_wr) {
1230 vdev->tnr.cur_wr->frame_id = seq;
1231 vdev->tnr.cur_wr->frame_timestamp =
1232 vdev->tnr.nxt_rd->frame_timestamp;
1233 vdev->tnr.cur_wr->is_move_judge =
1234 vdev->tnr.nxt_rd->is_move_judge;
1235 }
1236 }
1237
1238 if (!dev->hw_dev->is_single)
1239 rkispp_update_regs(dev, RKISPP_CTRL, RKISPP_TNR_CORE_WEIGHT);
1240 writel(TNR_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1241
1242 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1243 "TNR start seq:%d | Y_SHD nxt:0x%x cur:0x%x iir:0x%x wr:0x%x\n",
1244 seq, readl(base + RKISPP_TNR_NXT_Y_BASE_SHD),
1245 readl(base + RKISPP_TNR_CUR_Y_BASE_SHD),
1246 readl(base + RKISPP_TNR_IIR_Y_BASE_SHD),
1247 readl(base + RKISPP_TNR_WR_Y_BASE_SHD));
1248
1249 /* iir using previous tnr write frame */
1250 rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE,
1251 rkispp_read(dev, RKISPP_TNR_WR_Y_BASE));
1252 rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE,
1253 rkispp_read(dev, RKISPP_TNR_WR_UV_BASE));
1254
1255 rkispp_prepare_buffer(dev, &vdev->tnr.buf.gain_kg);
1256
1257 vdev->tnr.dbg.id = seq;
1258 vdev->tnr.dbg.timestamp = ktime_get_ns();
1259 if (monitor->is_en) {
1260 monitor->tnr.time = vdev->tnr.dbg.interval / 1000 / 1000;
1261 monitor->monitoring_module |= MONITOR_TNR;
1262 monitor->tnr.is_err = false;
1263 if (!completion_done(&monitor->tnr.cmpl))
1264 complete(&monitor->tnr.cmpl);
1265 }
1266
1267 if (rkispp_is_reg_withstream_global())
1268 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id, seq);
1269 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_TNR)) {
1270 u32 offset, size;
1271
1272 offset = reg_buf->reg_size;
1273 size = 4 + RKISPP_TNR_STATE - RKISPP_TNR_CTRL;
1274 reg_buf->ispp_size[ISPP_ID_TNR] = size;
1275 reg_buf->ispp_offset[ISPP_ID_TNR] = offset;
1276 memcpy_fromio(®_buf->reg[offset], base + RKISPP_TNR_CTRL, size);
1277
1278 offset += size;
1279 reg_buf->reg_size = offset;
1280 }
1281
1282 if (!dev->hw_dev->is_shutdown)
1283 writel(TNR_ST, base + RKISPP_CTRL_STRT);
1284 vdev->tnr.is_end = false;
1285 }
1286
1287 restart_unlock:
1288 spin_unlock_irqrestore(&monitor->lock, lock_flags1);
1289 end:
1290 spin_unlock_irqrestore(&vdev->tnr.buf_lock, lock_flags);
1291 }
1292
fec_work_event(struct rkispp_device * dev,void * buff_rd,bool is_isr,bool is_quick)1293 static void fec_work_event(struct rkispp_device *dev,
1294 void *buff_rd,
1295 bool is_isr, bool is_quick)
1296 {
1297 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1298 struct rkispp_monitor *monitor = &vdev->monitor;
1299 struct list_head *list = &vdev->fec.list_rd;
1300 void __iomem *base = dev->hw_dev->base_addr;
1301 struct rkispp_dummy_buffer *dummy;
1302 struct rkispp_stream *stream;
1303 unsigned long lock_flags = 0, lock_flags1 = 0;
1304 bool is_start = false;
1305 struct rkisp_ispp_reg *reg_buf = NULL;
1306 u32 val;
1307 struct rkispp_dummy_buffer *buf_rd = buff_rd;
1308
1309 if (!(vdev->module_ens & ISPP_MODULE_FEC))
1310 return;
1311
1312 spin_lock_irqsave(&vdev->fec.buf_lock, lock_flags);
1313
1314 /* event from fec frame end */
1315 if (!buf_rd && is_isr) {
1316 vdev->fec.is_end = true;
1317
1318 if (vdev->fec.dummy_cur_rd || vdev->is_done_early)
1319 rkispp_module_work_event(dev, NULL, vdev->fec.dummy_cur_rd,
1320 ISPP_MODULE_NR, false);
1321 vdev->fec.dummy_cur_rd = NULL;
1322 }
1323
1324 spin_lock_irqsave(&monitor->lock, lock_flags1);
1325 if (monitor->is_restart && buf_rd) {
1326 list_add_tail(&buf_rd->list, list);
1327 goto restart_unlock;
1328 }
1329
1330 if (buf_rd && vdev->fec.is_end && list_empty(list)) {
1331 /* fec read buf from nr */
1332 vdev->fec.dummy_cur_rd = buf_rd;
1333 } else if (vdev->fec.is_end && !list_empty(list)) {
1334 /* fec read buf from list
1335 * fec processing slow than nr
1336 * new read buf from nr into list
1337 */
1338 vdev->fec.dummy_cur_rd = get_list_buf(list, false);
1339 if (buf_rd)
1340 list_add_tail(&buf_rd->list, list);
1341 } else if (!vdev->fec.is_end && buf_rd) {
1342 /* fec no idle
1343 * new read buf from nr into list
1344 */
1345 list_add_tail(&buf_rd->list, list);
1346 }
1347
1348 if (vdev->fec.dummy_cur_rd && vdev->fec.is_end) {
1349 dummy = vdev->fec.dummy_cur_rd;
1350 val = dummy->dma_addr;
1351 rkispp_write(dev, RKISPP_FEC_RD_Y_BASE, val);
1352 val += vdev->fec.uv_offset;
1353 rkispp_write(dev, RKISPP_FEC_RD_UV_BASE, val);
1354 is_start = true;
1355 }
1356
1357 if (is_start || is_quick) {
1358 u32 seq = 0;
1359
1360 if (vdev->fec.dummy_cur_rd) {
1361 seq = vdev->fec.dummy_cur_rd->id;
1362 dev->ispp_sdev.frame_timestamp =
1363 vdev->fec.dummy_cur_rd->timestamp;
1364 dev->ispp_sdev.frm_sync_seq = seq;
1365 } else {
1366 seq = vdev->nr.buf.wr[0].id;
1367 dev->ispp_sdev.frame_timestamp =
1368 vdev->nr.buf.wr[0].timestamp;
1369 dev->ispp_sdev.frm_sync_seq = seq;
1370 }
1371
1372 /* check MB config and output buf beforce start, when MB connect to FEC
1373 * MB update by FEC_FORCE_UPD
1374 */
1375 stream = &vdev->stream[STREAM_MB];
1376 if (stream->streaming) {
1377 if (!stream->is_cfg) {
1378 secure_config_mb(stream);
1379 } else if (!stream->curr_buf) {
1380 get_stream_buf(stream);
1381 if (stream->curr_buf)
1382 update_mi(stream);
1383 }
1384 }
1385
1386 if (!dev->hw_dev->is_single)
1387 rkispp_update_regs(dev, RKISPP_FEC, RKISPP_FEC_CROP);
1388 writel(FEC_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1389 if (vdev->nr.is_end) {
1390 if (!dev->hw_dev->is_single)
1391 rkispp_update_regs(dev, RKISPP_SCL0_CTRL, RKISPP_SCL2_FACTOR);
1392 writel(OTHER_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1393 /* check scale stream stop state */
1394 for (val = STREAM_S0; val <= STREAM_S2; val++) {
1395 stream = &vdev->stream[val];
1396 if (stream->streaming && stream->stopping) {
1397 if (stream->ops->is_stopped(stream)) {
1398 stream->stopping = false;
1399 stream->streaming = false;
1400 wake_up(&stream->done);
1401 } else {
1402 stream->ops->stop(stream);
1403 }
1404 }
1405 }
1406 }
1407 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1408 "FEC start seq:%d | Y_SHD rd:0x%x\n"
1409 "\txint:0x%x xfra:0x%x yint:0x%x yfra:0x%x\n",
1410 seq, readl(base + RKISPP_FEC_RD_Y_BASE_SHD),
1411 readl(base + RKISPP_FEC_MESH_XINT_BASE_SHD),
1412 readl(base + RKISPP_FEC_MESH_XFRA_BASE_SHD),
1413 readl(base + RKISPP_FEC_MESH_YINT_BASE_SHD),
1414 readl(base + RKISPP_FEC_MESH_YFRA_BASE_SHD));
1415
1416 vdev->fec.dbg.id = seq;
1417 vdev->fec.dbg.timestamp = ktime_get_ns();
1418 if (monitor->is_en) {
1419 monitor->fec.time = vdev->fec.dbg.interval / 1000 / 1000;
1420 monitor->monitoring_module |= MONITOR_FEC;
1421 if (!completion_done(&monitor->fec.cmpl))
1422 complete(&monitor->fec.cmpl);
1423 }
1424
1425 if (rkispp_is_reg_withstream_global())
1426 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id, seq);
1427 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_FEC)) {
1428 u32 offset, size;
1429
1430 offset = reg_buf->reg_size;
1431 size = 4 + RKISPP_FEC_CROP - RKISPP_FEC_CTRL;
1432 reg_buf->ispp_size[ISPP_ID_FEC] = size;
1433 reg_buf->ispp_offset[ISPP_ID_FEC] = offset;
1434 memcpy_fromio(®_buf->reg[offset], base + RKISPP_FEC_CTRL, size);
1435
1436 offset += size;
1437 reg_buf->reg_size = offset;
1438 }
1439
1440 if (!dev->hw_dev->is_shutdown) {
1441 writel(FEC_ST, base + RKISPP_CTRL_STRT);
1442
1443 if (vdev->is_done_early)
1444 hrtimer_start(&vdev->frame_qst,
1445 ns_to_ktime(5000000),
1446 HRTIMER_MODE_REL);
1447 }
1448 vdev->fec.is_end = false;
1449 }
1450 restart_unlock:
1451 spin_unlock_irqrestore(&monitor->lock, lock_flags1);
1452 spin_unlock_irqrestore(&vdev->fec.buf_lock, lock_flags);
1453 }
1454
1455
rkispp_sendbuf_to_nr(struct rkispp_device * dev,struct rkispp_tnr_inf * tnr_inf)1456 void rkispp_sendbuf_to_nr(struct rkispp_device *dev,
1457 struct rkispp_tnr_inf *tnr_inf)
1458 {
1459 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1460 struct rkispp_dummy_buffer *dummy;
1461 struct rkisp_ispp_buf *cur_buf;
1462 unsigned long lock_flags = 0;
1463 bool find_flg = false;
1464 struct dma_buf *dbuf;
1465 u32 size;
1466
1467 size = sizeof(vdev->tnr.buf) / sizeof(*dummy);
1468 spin_lock_irqsave(&vdev->tnr.buf_lock, lock_flags);
1469 list_for_each_entry(cur_buf, &vdev->tnr.list_rpt, list) {
1470 if (cur_buf->index == tnr_inf->dev_id &&
1471 cur_buf->didx[GROUP_BUF_GAIN] == tnr_inf->gainwr_idx) {
1472 find_flg = true;
1473 break;
1474 }
1475 }
1476
1477 if (find_flg) {
1478 list_del(&cur_buf->list);
1479
1480 dbuf = cur_buf->dbuf[GROUP_BUF_GAIN];
1481 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
1482 rkispp_prepare_buffer(dev, dummy);
1483
1484 /* tnr write buf to nr */
1485 rkispp_module_work_event(dev, cur_buf, NULL,
1486 ISPP_MODULE_NR, false);
1487 }
1488 spin_unlock_irqrestore(&vdev->tnr.buf_lock, lock_flags);
1489 }
1490
rkispp_set_trigger_mode(struct rkispp_device * dev,struct rkispp_trigger_mode * mode)1491 void rkispp_set_trigger_mode(struct rkispp_device *dev,
1492 struct rkispp_trigger_mode *mode)
1493 {
1494 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1495
1496 if (mode->module & ISPP_MODULE_TNR)
1497 vdev->tnr.is_trigger = mode->on;
1498 }
1499
rkispp_get_tnrbuf_fd(struct rkispp_device * dev,struct rkispp_buf_idxfd * idxfd)1500 int rkispp_get_tnrbuf_fd(struct rkispp_device *dev, struct rkispp_buf_idxfd *idxfd)
1501 {
1502 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1503 struct rkisp_ispp_buf *dbufs;
1504 struct rkispp_dummy_buffer *buf;
1505 unsigned long lock_flags = 0;
1506 int j, buf_idx, ret = 0;
1507
1508 spin_lock_irqsave(&vdev->tnr.buf_lock, lock_flags);
1509 if (!vdev->tnr.is_but_init) {
1510 spin_unlock_irqrestore(&vdev->tnr.buf_lock, lock_flags);
1511 ret = -EAGAIN;
1512 return ret;
1513 }
1514 spin_unlock_irqrestore(&vdev->tnr.buf_lock, lock_flags);
1515
1516 buf_idx = 0;
1517 list_for_each_entry(dbufs, &vdev->tnr.list_wr, list) {
1518 for (j = 0; j < GROUP_BUF_MAX; j++) {
1519 dbufs->dfd[j] = dma_buf_fd(dbufs->dbuf[j], O_CLOEXEC);
1520 get_dma_buf(dbufs->dbuf[j]);
1521 idxfd->index[buf_idx] = dbufs->didx[j];
1522 idxfd->dmafd[buf_idx] = dbufs->dfd[j];
1523 buf_idx++;
1524 }
1525 }
1526
1527 list_for_each_entry(dbufs, &vdev->tnr.list_rpt, list) {
1528 for (j = 0; j < GROUP_BUF_MAX; j++) {
1529 dbufs->dfd[j] = dma_buf_fd(dbufs->dbuf[j], O_CLOEXEC);
1530 get_dma_buf(dbufs->dbuf[j]);
1531 idxfd->index[buf_idx] = dbufs->didx[j];
1532 idxfd->dmafd[buf_idx] = dbufs->dfd[j];
1533 buf_idx++;
1534 }
1535 }
1536
1537 if (vdev->tnr.cur_wr) {
1538 for (j = 0; j < GROUP_BUF_MAX; j++) {
1539 vdev->tnr.cur_wr->dfd[j] = dma_buf_fd(vdev->tnr.cur_wr->dbuf[j], O_CLOEXEC);
1540 get_dma_buf(vdev->tnr.cur_wr->dbuf[j]);
1541 idxfd->index[buf_idx] = vdev->tnr.cur_wr->didx[j];
1542 idxfd->dmafd[buf_idx] = vdev->tnr.cur_wr->dfd[j];
1543 buf_idx++;
1544 }
1545 }
1546
1547 buf = &vdev->tnr.buf.gain_kg;
1548 buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
1549 get_dma_buf(buf->dbuf);
1550 idxfd->index[buf_idx] = buf->index;
1551 idxfd->dmafd[buf_idx] = buf->dma_fd;
1552 buf_idx++;
1553
1554 idxfd->buf_num = buf_idx;
1555
1556 return ret;
1557 }
1558
rkispp_module_work_event(struct rkispp_device * dev,void * buf_rd,void * buf_wr,u32 module,bool is_isr)1559 static void rkispp_module_work_event(struct rkispp_device *dev,
1560 void *buf_rd, void *buf_wr,
1561 u32 module, bool is_isr)
1562 {
1563 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1564 bool is_fec_en = !!(vdev->module_ens & ISPP_MODULE_FEC);
1565 bool is_single = dev->hw_dev->is_single;
1566 //bool is_early = vdev->is_done_early;
1567
1568 if (dev->hw_dev->is_shutdown)
1569 return;
1570
1571 if (dev->ispp_sdev.state != ISPP_STOP) {
1572 if (module & ISPP_MODULE_TNR)
1573 tnr_work_event(dev, buf_rd, buf_wr, is_isr);
1574 else if (module & ISPP_MODULE_NR)
1575 nr_work_event(dev, buf_rd, buf_wr, is_isr);
1576 else
1577 fec_work_event(dev, buf_rd, is_isr, false);
1578 }
1579
1580 /*
1581 * ispp frame done to do next conditions
1582 * mulit dev: cur frame (tnr->nr->fec) done for next frame
1583 * 1.single dev: fec async with tnr, and sync with nr:
1584 * { f0 }
1585 * tnr->nr->fec->|
1586 * |->tnr->nr->fec
1587 * { f1 }
1588 * 2.single dev and early mode:
1589 * { f0 } { f1 } { f2 }
1590 * tnr->nr->tnr->nr->tnr->nr
1591 * |->fec->||->fec->|
1592 * { f0 }{ f1 }
1593 * 3.single fec
1594 *
1595 */
1596 if (is_isr && !buf_rd && !buf_wr &&
1597 ((!is_fec_en && module == ISPP_MODULE_NR) ||
1598 (is_fec_en &&
1599 ((module == ISPP_MODULE_NR && is_single) ||
1600 (module == ISPP_MODULE_FEC && !is_single))))) {
1601 dev->stream_vdev.monitor.retry = 0;
1602 rkispp_soft_reset(dev->hw_dev);
1603 rkispp_event_handle(dev, CMD_QUEUE_DMABUF, NULL);
1604 }
1605
1606 if (dev->ispp_sdev.state == ISPP_STOP) {
1607 if ((module & (ISPP_MODULE_TNR | ISPP_MODULE_NR)) && buf_rd) {
1608 struct rkisp_ispp_buf *buf = buf_rd;
1609
1610 if (buf->is_isp)
1611 v4l2_subdev_call(dev->ispp_sdev.remote_sd,
1612 video, s_rx_buffer, buf, NULL);
1613 }
1614 if (!dev->hw_dev->is_idle)
1615 dev->hw_dev->is_idle = true;
1616 }
1617 }
1618
start_isp(struct rkispp_device * dev)1619 static int start_isp(struct rkispp_device *dev)
1620 {
1621 struct rkispp_subdev *ispp_sdev = &dev->ispp_sdev;
1622 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1623 struct rkispp_stream *stream;
1624 struct rkisp_ispp_mode mode;
1625 int i, ret;
1626
1627 if (dev->inp != INP_ISP || ispp_sdev->state)
1628 return 0;
1629
1630 if (dev->stream_sync) {
1631 /* output stream enable then start isp */
1632 for (i = STREAM_MB; i <= STREAM_S2; i++) {
1633 stream = &vdev->stream[i];
1634 if (stream->linked && !stream->streaming)
1635 return 0;
1636 }
1637 } else if (atomic_read(&vdev->refcnt) > 1) {
1638 return 0;
1639 }
1640
1641 rkispp_start_3a_run(dev);
1642
1643 mutex_lock(&dev->hw_dev->dev_lock);
1644
1645 mode.work_mode = dev->isp_mode;
1646 mode.buf_num = ((vdev->module_ens & ISPP_MODULE_TNR_3TO1) ==
1647 ISPP_MODULE_TNR_3TO1) ? 2 : 1;
1648 mode.buf_num += RKISP_BUF_MAX + 2 * (dev->hw_dev->dev_num - 1);
1649 ret = v4l2_subdev_call(ispp_sdev->remote_sd, core, ioctl,
1650 RKISP_ISPP_CMD_SET_MODE, &mode);
1651 if (ret)
1652 goto err;
1653
1654 ret = config_modules(dev);
1655 if (ret) {
1656 rkispp_event_handle(dev, CMD_FREE_POOL, NULL);
1657 mode.work_mode = ISP_ISPP_INIT_FAIL;
1658 v4l2_subdev_call(ispp_sdev->remote_sd, core, ioctl,
1659 RKISP_ISPP_CMD_SET_MODE, &mode);
1660 goto err;
1661 }
1662 if (dev->hw_dev->is_single)
1663 writel(ALL_FORCE_UPD, dev->hw_dev->base_addr + RKISPP_CTRL_UPDATE);
1664 for (i = STREAM_MB; i <= STREAM_S2; i++) {
1665 stream = &vdev->stream[i];
1666 if (stream->streaming)
1667 stream->is_upd = true;
1668 }
1669 if (dev->isp_mode & ISP_ISPP_QUICK)
1670 rkispp_set_bits(dev, RKISPP_CTRL_QUICK, 0, GLB_QUICK_EN);
1671
1672 dev->isr_cnt = 0;
1673 dev->isr_err_cnt = 0;
1674 ret = v4l2_subdev_call(&ispp_sdev->sd, video, s_stream, true);
1675 err:
1676 mutex_unlock(&dev->hw_dev->dev_lock);
1677 return ret;
1678 }
1679
check_to_force_update(struct rkispp_device * dev,u32 mis_val)1680 static void check_to_force_update(struct rkispp_device *dev, u32 mis_val)
1681 {
1682 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1683 struct rkispp_stream *stream;
1684 u32 i, mask = NR_INT | SHP_INT;
1685 bool is_fec_en = (vdev->module_ens & ISPP_MODULE_FEC);
1686
1687 if (mis_val & TNR_INT)
1688 rkispp_module_work_event(dev, NULL, NULL,
1689 ISPP_MODULE_TNR, true);
1690 if (mis_val & FEC_INT)
1691 rkispp_module_work_event(dev, NULL, NULL,
1692 ISPP_MODULE_FEC, true);
1693
1694 /* wait nr_shp/fec/scl idle */
1695 for (i = STREAM_S0; i <= STREAM_S2; i++) {
1696 stream = &vdev->stream[i];
1697 if (stream->is_upd && !is_fec_en &&
1698 rkispp_read(dev, stream->config->reg.ctrl) & SW_SCL_ENABLE)
1699 mask |= stream->config->frame_end_id;
1700 }
1701
1702 vdev->irq_ends |= (mis_val & mask);
1703 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1704 "irq_ends:0x%x mask:0x%x\n",
1705 vdev->irq_ends, mask);
1706 if (vdev->irq_ends != mask)
1707 return;
1708 vdev->irq_ends = 0;
1709 rkispp_module_work_event(dev, NULL, NULL,
1710 ISPP_MODULE_NR, true);
1711
1712 for (i = STREAM_MB; i <= STREAM_S2; i++) {
1713 stream = &vdev->stream[i];
1714 if (stream->streaming)
1715 stream->is_upd = true;
1716 }
1717 }
1718
1719 static struct rkispp_stream_ops rkispp_stream_ops = {
1720 .config_modules = config_modules,
1721 .destroy_buf = rkispp_destroy_buf,
1722 .fec_work_event = fec_work_event,
1723 .start_isp = start_isp,
1724 .check_to_force_update = check_to_force_update,
1725 .update_mi = update_mi,
1726 .rkispp_frame_done_early = rkispp_frame_done_early,
1727 .rkispp_module_work_event = rkispp_module_work_event,
1728 };
1729
rkispp_stream_init_ops_v10(struct rkispp_stream_vdev * stream_vdev)1730 void rkispp_stream_init_ops_v10(struct rkispp_stream_vdev *stream_vdev)
1731 {
1732 stream_vdev->stream_ops = &rkispp_stream_ops;
1733 }
1734