1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/clk.h>
5 #include <linux/delay.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/slab.h>
8 #include <media/v4l2-common.h>
9 #include <media/v4l2-event.h>
10 #include <media/v4l2-fh.h>
11 #include <media/v4l2-ioctl.h>
12 #include <media/v4l2-mc.h>
13 #include <media/v4l2-subdev.h>
14 #include <media/videobuf2-dma-contig.h>
15 #include <media/videobuf2-dma-sg.h>
16 #include <linux/rkisp1-config.h>
17 #include <uapi/linux/rk-video-format.h>
18
19 #include "dev.h"
20 #include "regs.h"
21
22 #define STREAM_IN_REQ_BUFS_MIN 1
23 #define STREAM_OUT_REQ_BUFS_MIN 0
24
25 /* memory align for mpp */
26 #define RK_MPP_ALIGN 4096
27
28 static const struct capture_fmt input_fmts[] = {
29 {
30 .fourcc = V4L2_PIX_FMT_YUYV,
31 .bpp = { 16 },
32 .cplanes = 1,
33 .mplanes = 1,
34 .wr_fmt = FMT_YC_SWAP | FMT_YUYV | FMT_YUV422,
35 }, {
36 .fourcc = V4L2_PIX_FMT_UYVY,
37 .bpp = { 16 },
38 .cplanes = 1,
39 .mplanes = 1,
40 .wr_fmt = FMT_YUYV | FMT_YUV422,
41 }, {
42 .fourcc = V4L2_PIX_FMT_NV16,
43 .bpp = { 8, 16 },
44 .cplanes = 2,
45 .mplanes = 1,
46 .wr_fmt = FMT_YUV422,
47 }, {
48 .fourcc = V4L2_PIX_FMT_NV12,
49 .bpp = { 8, 16 },
50 .cplanes = 2,
51 .mplanes = 1,
52 .wr_fmt = FMT_YUV420,
53 }
54 };
55
56 static const struct capture_fmt mb_fmts[] = {
57 {
58 .fourcc = V4L2_PIX_FMT_YUYV,
59 .bpp = { 16 },
60 .cplanes = 1,
61 .mplanes = 1,
62 .wr_fmt = FMT_YC_SWAP | FMT_YUYV | FMT_YUV422,
63 }, {
64 .fourcc = V4L2_PIX_FMT_UYVY,
65 .bpp = { 16 },
66 .cplanes = 1,
67 .mplanes = 1,
68 .wr_fmt = FMT_YUYV | FMT_YUV422,
69 }, {
70 .fourcc = V4L2_PIX_FMT_NV16,
71 .bpp = { 8, 16 },
72 .cplanes = 2,
73 .mplanes = 1,
74 .wr_fmt = FMT_YUV422,
75 }, {
76 .fourcc = V4L2_PIX_FMT_NV12,
77 .bpp = { 8, 16 },
78 .cplanes = 2,
79 .mplanes = 1,
80 .wr_fmt = FMT_YUV420,
81 }, {
82 .fourcc = V4L2_PIX_FMT_FBC2,
83 .bpp = { 8, 16 },
84 .cplanes = 2,
85 .mplanes = 1,
86 .wr_fmt = FMT_YUV422 | FMT_FBC,
87 }, {
88 .fourcc = V4L2_PIX_FMT_FBC0,
89 .bpp = { 8, 16 },
90 .cplanes = 2,
91 .mplanes = 1,
92 .wr_fmt = FMT_YUV420 | FMT_FBC,
93 }
94 };
95
96 static const struct capture_fmt scl_fmts[] = {
97 {
98 .fourcc = V4L2_PIX_FMT_NV16,
99 .bpp = { 8, 16 },
100 .cplanes = 2,
101 .mplanes = 1,
102 .wr_fmt = FMT_YUV422,
103 }, {
104 .fourcc = V4L2_PIX_FMT_NV12,
105 .bpp = { 8, 16 },
106 .cplanes = 2,
107 .mplanes = 1,
108 .wr_fmt = FMT_YUV420,
109 }, {
110 .fourcc = V4L2_PIX_FMT_GREY,
111 .bpp = { 8 },
112 .cplanes = 1,
113 .mplanes = 1,
114 .wr_fmt = FMT_YUV422,
115 }, {
116 .fourcc = V4L2_PIX_FMT_YUYV,
117 .bpp = { 16 },
118 .cplanes = 1,
119 .mplanes = 1,
120 .wr_fmt = FMT_YC_SWAP | FMT_YUYV | FMT_YUV422,
121 }, {
122 .fourcc = V4L2_PIX_FMT_UYVY,
123 .bpp = { 16 },
124 .cplanes = 1,
125 .mplanes = 1,
126 .wr_fmt = FMT_YUYV | FMT_YUV422,
127 }
128 };
129
130 static struct stream_config input_config = {
131 .fmts = input_fmts,
132 .fmt_size = ARRAY_SIZE(input_fmts),
133 };
134
135 static struct stream_config mb_config = {
136 .fmts = mb_fmts,
137 .fmt_size = ARRAY_SIZE(mb_fmts),
138 };
139
140 static struct stream_config scl0_config = {
141 .fmts = scl_fmts,
142 .fmt_size = ARRAY_SIZE(scl_fmts),
143 .frame_end_id = SCL0_INT,
144 .reg = {
145 .ctrl = RKISPP_SCL0_CTRL,
146 .factor = RKISPP_SCL0_FACTOR,
147 .cur_y_base = RKISPP_SCL0_CUR_Y_BASE,
148 .cur_uv_base = RKISPP_SCL0_CUR_UV_BASE,
149 .cur_vir_stride = RKISPP_SCL0_CUR_VIR_STRIDE,
150 .cur_y_base_shd = RKISPP_SCL0_CUR_Y_BASE_SHD,
151 .cur_uv_base_shd = RKISPP_SCL0_CUR_UV_BASE_SHD,
152 },
153 };
154
155 static struct stream_config scl1_config = {
156 .fmts = scl_fmts,
157 .fmt_size = ARRAY_SIZE(scl_fmts),
158 .frame_end_id = SCL1_INT,
159 .reg = {
160 .ctrl = RKISPP_SCL1_CTRL,
161 .factor = RKISPP_SCL1_FACTOR,
162 .cur_y_base = RKISPP_SCL1_CUR_Y_BASE,
163 .cur_uv_base = RKISPP_SCL1_CUR_UV_BASE,
164 .cur_vir_stride = RKISPP_SCL1_CUR_VIR_STRIDE,
165 .cur_y_base_shd = RKISPP_SCL1_CUR_Y_BASE_SHD,
166 .cur_uv_base_shd = RKISPP_SCL1_CUR_UV_BASE_SHD,
167 },
168 };
169
170 static struct stream_config scl2_config = {
171 .fmts = scl_fmts,
172 .fmt_size = ARRAY_SIZE(scl_fmts),
173 .frame_end_id = SCL2_INT,
174 .reg = {
175 .ctrl = RKISPP_SCL2_CTRL,
176 .factor = RKISPP_SCL2_FACTOR,
177 .cur_y_base = RKISPP_SCL2_CUR_Y_BASE,
178 .cur_uv_base = RKISPP_SCL2_CUR_UV_BASE,
179 .cur_vir_stride = RKISPP_SCL2_CUR_VIR_STRIDE,
180 .cur_y_base_shd = RKISPP_SCL2_CUR_Y_BASE_SHD,
181 .cur_uv_base_shd = RKISPP_SCL2_CUR_UV_BASE_SHD,
182 },
183 };
184
set_vir_stride(struct rkispp_stream * stream,u32 val)185 static void set_vir_stride(struct rkispp_stream *stream, u32 val)
186 {
187 rkispp_write(stream->isppdev, stream->config->reg.cur_vir_stride, val);
188 }
189
set_scl_factor(struct rkispp_stream * stream,u32 val)190 static void set_scl_factor(struct rkispp_stream *stream, u32 val)
191 {
192 rkispp_write(stream->isppdev, stream->config->reg.factor, val);
193 }
194
fcc_xysubs(u32 fcc,u32 * xsubs,u32 * ysubs)195 static int fcc_xysubs(u32 fcc, u32 *xsubs, u32 *ysubs)
196 {
197 switch (fcc) {
198 case V4L2_PIX_FMT_GREY:
199 *xsubs = 1;
200 *ysubs = 1;
201 break;
202 case V4L2_PIX_FMT_NV16:
203 case V4L2_PIX_FMT_NV61:
204 case V4L2_PIX_FMT_FBC2:
205 *xsubs = 2;
206 *ysubs = 1;
207 break;
208 case V4L2_PIX_FMT_NV12:
209 case V4L2_PIX_FMT_NV21:
210 case V4L2_PIX_FMT_FBC0:
211 *xsubs = 2;
212 *ysubs = 2;
213 break;
214 default:
215 return -EINVAL;
216 }
217 return 0;
218 }
219
220 static const
find_fmt(struct rkispp_stream * stream,const u32 pixelfmt)221 struct capture_fmt *find_fmt(struct rkispp_stream *stream,
222 const u32 pixelfmt)
223 {
224 const struct capture_fmt *fmt;
225 unsigned int i;
226
227 for (i = 0; i < stream->config->fmt_size; i++) {
228 fmt = &stream->config->fmts[i];
229 if (fmt->fourcc == pixelfmt)
230 return fmt;
231 }
232 return NULL;
233 }
234
vir_cpy_image(struct work_struct * work)235 static void vir_cpy_image(struct work_struct *work)
236 {
237 struct rkispp_vir_cpy *cpy =
238 container_of(work, struct rkispp_vir_cpy, work);
239 struct rkispp_stream *vir = cpy->stream;
240 struct rkispp_buffer *src_buf = NULL;
241 unsigned long lock_flags = 0;
242 u32 i;
243
244 v4l2_dbg(1, rkispp_debug, &vir->isppdev->v4l2_dev,
245 "%s enter\n", __func__);
246
247 vir->streaming = true;
248 spin_lock_irqsave(&vir->vbq_lock, lock_flags);
249 if (!list_empty(&cpy->queue)) {
250 src_buf = list_first_entry(&cpy->queue,
251 struct rkispp_buffer, queue);
252 list_del(&src_buf->queue);
253 }
254 spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
255
256 while (src_buf || vir->streaming) {
257 if (vir->stopping || !vir->streaming)
258 goto end;
259 if (!src_buf)
260 wait_for_completion(&cpy->cmpl);
261
262 vir->is_end = false;
263 spin_lock_irqsave(&vir->vbq_lock, lock_flags);
264 if (!src_buf && !list_empty(&cpy->queue)) {
265 src_buf = list_first_entry(&cpy->queue,
266 struct rkispp_buffer, queue);
267 list_del(&src_buf->queue);
268 }
269 if (src_buf && !vir->curr_buf && !list_empty(&vir->buf_queue)) {
270 vir->curr_buf = list_first_entry(&vir->buf_queue,
271 struct rkispp_buffer, queue);
272 list_del(&vir->curr_buf->queue);
273 }
274 spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
275 if (!vir->curr_buf || !src_buf)
276 goto end;
277 for (i = 0; i < vir->out_cap_fmt.mplanes; i++) {
278 u32 payload_size = vir->out_fmt.plane_fmt[i].sizeimage;
279 void *src = vb2_plane_vaddr(&src_buf->vb.vb2_buf, i);
280 void *dst = vb2_plane_vaddr(&vir->curr_buf->vb.vb2_buf, i);
281
282 if (!src || !dst)
283 break;
284 vb2_set_plane_payload(&vir->curr_buf->vb.vb2_buf, i, payload_size);
285 memcpy(dst, src, payload_size);
286 }
287 vir->curr_buf->vb.sequence = src_buf->vb.sequence;
288 vir->curr_buf->vb.vb2_buf.timestamp = src_buf->vb.vb2_buf.timestamp;
289 vb2_buffer_done(&vir->curr_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
290 vir->curr_buf = NULL;
291 end:
292 if (src_buf)
293 vb2_buffer_done(&src_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
294 src_buf = NULL;
295 spin_lock_irqsave(&vir->vbq_lock, lock_flags);
296 if (!list_empty(&cpy->queue)) {
297 src_buf = list_first_entry(&cpy->queue,
298 struct rkispp_buffer, queue);
299 list_del(&src_buf->queue);
300 } else if (vir->stopping) {
301 vir->streaming = false;
302 }
303 spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
304 }
305
306 vir->is_end = true;
307 if (vir->stopping) {
308 vir->stopping = false;
309 vir->streaming = false;
310 wake_up(&vir->done);
311 }
312 v4l2_dbg(1, rkispp_debug, &vir->isppdev->v4l2_dev,
313 "%s exit\n", __func__);
314 }
315
irq_work(struct work_struct * work)316 static void irq_work(struct work_struct *work)
317 {
318 struct rkispp_device *dev = container_of(work, struct rkispp_device, irq_work);
319
320 rkispp_set_clk_rate(dev->hw_dev->clks[0], dev->hw_dev->core_clk_max);
321 dev->stream_vdev.stream_ops->check_to_force_update(dev, dev->mis_val);
322 dev->hw_dev->is_first = false;
323 }
324
get_stream_buf(struct rkispp_stream * stream)325 void get_stream_buf(struct rkispp_stream *stream)
326 {
327 unsigned long lock_flags = 0;
328
329 spin_lock_irqsave(&stream->vbq_lock, lock_flags);
330 if (!list_empty(&stream->buf_queue) && !stream->curr_buf) {
331 stream->curr_buf =
332 list_first_entry(&stream->buf_queue,
333 struct rkispp_buffer, queue);
334 list_del(&stream->curr_buf->queue);
335 }
336 spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
337 }
338
rkispp_frame_end(struct rkispp_stream * stream,u32 state)339 int rkispp_frame_end(struct rkispp_stream *stream, u32 state)
340 {
341 struct rkispp_device *dev = stream->isppdev;
342 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
343 struct capture_fmt *fmt = &stream->out_cap_fmt;
344 struct rkisp_ispp_reg *reg_buf = NULL;
345 unsigned long lock_flags = 0;
346 int i = 0;
347
348 if (state == FRAME_IRQ && dev->stream_vdev.is_done_early)
349 return 0;
350
351 if (stream->curr_buf) {
352 struct rkispp_stream *vir = &dev->stream_vdev.stream[STREAM_VIR];
353 u64 ns = dev->ispp_sdev.frame_timestamp;
354
355 if (!ns)
356 ns = ktime_get_ns();
357
358 for (i = 0; i < fmt->mplanes; i++) {
359 u32 payload_size =
360 stream->out_fmt.plane_fmt[i].sizeimage;
361 vb2_set_plane_payload(&stream->curr_buf->vb.vb2_buf, i,
362 payload_size);
363 }
364 stream->curr_buf->vb.sequence = dev->ispp_sdev.frm_sync_seq;
365 stream->curr_buf->vb.vb2_buf.timestamp = ns;
366
367 if (stream->is_reg_withstream &&
368 (fmt->wr_fmt & FMT_FBC || fmt->wr_fmt == FMT_YUV420)) {
369 void *addr = vb2_plane_vaddr(&stream->curr_buf->vb.vb2_buf, i);
370
371 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id,
372 stream->curr_buf->vb.sequence);
373 if (reg_buf) {
374 u32 cpy_size = offsetof(struct rkisp_ispp_reg, reg);
375
376 cpy_size += reg_buf->reg_size;
377 memcpy(addr, reg_buf, cpy_size);
378
379 rkispp_release_regbuf(dev, reg_buf);
380 vb2_set_plane_payload(&stream->curr_buf->vb.vb2_buf, 1, cpy_size);
381 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
382 "stream(0x%x) write reg buf to last plane\n",
383 stream->id);
384 } else {
385 v4l2_err(&dev->v4l2_dev,
386 "%s can not find reg buf: dev_id %d, sequence %d\n",
387 __func__, dev->dev_id, stream->curr_buf->vb.sequence);
388 }
389 }
390
391 if (vir->streaming && vir->conn_id == stream->id) {
392 spin_lock_irqsave(&vir->vbq_lock, lock_flags);
393 if (vir->streaming)
394 list_add_tail(&stream->curr_buf->queue,
395 &dev->stream_vdev.vir_cpy.queue);
396 spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
397 if (!completion_done(&dev->stream_vdev.vir_cpy.cmpl))
398 complete(&dev->stream_vdev.vir_cpy.cmpl);
399 if (!vir->streaming)
400 vb2_buffer_done(&stream->curr_buf->vb.vb2_buf,
401 VB2_BUF_STATE_DONE);
402 } else {
403 vb2_buffer_done(&stream->curr_buf->vb.vb2_buf,
404 VB2_BUF_STATE_DONE);
405 }
406 ns = ktime_get_ns();
407 stream->dbg.interval = ns - stream->dbg.timestamp;
408 stream->dbg.timestamp = ns;
409 stream->dbg.delay = ns - stream->curr_buf->vb.vb2_buf.timestamp;
410 stream->dbg.id = stream->curr_buf->vb.sequence;
411
412 stream->curr_buf = NULL;
413 } else {
414 u32 frame_id = dev->ispp_sdev.frm_sync_seq;
415
416 if (stream->is_cfg) {
417 stream->dbg.frameloss++;
418 v4l2_dbg(0, rkispp_debug, &dev->v4l2_dev,
419 "stream:%d no buf, lost frame:%d\n",
420 stream->id, frame_id);
421 }
422
423 if (stream->is_reg_withstream &&
424 (fmt->wr_fmt & FMT_FBC || fmt->wr_fmt == FMT_YUV420)) {
425 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id, frame_id);
426 if (reg_buf) {
427 rkispp_release_regbuf(dev, reg_buf);
428 v4l2_info(&dev->v4l2_dev,
429 "%s: current frame use dummy buffer(dev_id %d, sequence %d)\n",
430 __func__, dev->dev_id, frame_id);
431 }
432 }
433 }
434
435 get_stream_buf(stream);
436 vdev->stream_ops->update_mi(stream);
437 return 0;
438 }
439
get_pool_buf(struct rkispp_device * dev,struct rkisp_ispp_buf * dbufs)440 void *get_pool_buf(struct rkispp_device *dev,
441 struct rkisp_ispp_buf *dbufs)
442 {
443 int i;
444
445 for (i = 0; i < RKISPP_BUF_POOL_MAX; i++)
446 if (dev->hw_dev->pool[i].dbufs == dbufs)
447 return &dev->hw_dev->pool[i];
448
449 return NULL;
450 }
451
dbuf_to_dummy(struct dma_buf * dbuf,struct rkispp_dummy_buffer * pool,int num)452 void *dbuf_to_dummy(struct dma_buf *dbuf,
453 struct rkispp_dummy_buffer *pool,
454 int num)
455 {
456 int i;
457
458 for (i = 0; i < num; i++) {
459 if (pool->dbuf == dbuf)
460 return pool;
461 pool++;
462 }
463
464 return NULL;
465 }
466
get_list_buf(struct list_head * list,bool is_isp_ispp)467 void *get_list_buf(struct list_head *list, bool is_isp_ispp)
468 {
469 void *buf = NULL;
470
471 if (!list_empty(list)) {
472 if (is_isp_ispp) {
473 buf = list_first_entry(list,
474 struct rkisp_ispp_buf, list);
475 list_del(&((struct rkisp_ispp_buf *)buf)->list);
476 } else {
477 buf = list_first_entry(list,
478 struct rkispp_dummy_buffer, list);
479 list_del(&((struct rkispp_dummy_buffer *)buf)->list);
480 }
481 }
482 return buf;
483 }
484
rkispp_start_3a_run(struct rkispp_device * dev)485 void rkispp_start_3a_run(struct rkispp_device *dev)
486 {
487 struct rkispp_params_vdev *params_vdev = &dev->params_vdev;
488 struct video_device *vdev = ¶ms_vdev->vnode.vdev;
489 struct v4l2_event ev = {
490 .type = CIFISP_V4L2_EVENT_STREAM_START,
491 };
492 int ret;
493
494 if (!params_vdev->is_subs_evt)
495 return;
496
497 v4l2_event_queue(vdev, &ev);
498 ret = wait_event_timeout(dev->sync_onoff,
499 params_vdev->streamon && !params_vdev->first_params,
500 msecs_to_jiffies(1000));
501 if (!ret)
502 v4l2_warn(&dev->v4l2_dev,
503 "waiting on params stream on event timeout\n");
504 else
505 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
506 "Waiting for 3A on use %d ms\n", 1000 - ret);
507 }
508
rkispp_stop_3a_run(struct rkispp_device * dev)509 static void rkispp_stop_3a_run(struct rkispp_device *dev)
510 {
511 struct rkispp_params_vdev *params_vdev = &dev->params_vdev;
512 struct video_device *vdev = ¶ms_vdev->vnode.vdev;
513 struct v4l2_event ev = {
514 .type = CIFISP_V4L2_EVENT_STREAM_STOP,
515 };
516 int ret;
517
518 if (!params_vdev->is_subs_evt)
519 return;
520
521 v4l2_event_queue(vdev, &ev);
522 ret = wait_event_timeout(dev->sync_onoff, !params_vdev->streamon,
523 msecs_to_jiffies(1000));
524 if (!ret)
525 v4l2_warn(&dev->v4l2_dev,
526 "waiting on params stream off event timeout\n");
527 else
528 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
529 "Waiting for 3A off use %d ms\n", 1000 - ret);
530 }
531
start_ii(struct rkispp_stream * stream)532 static int start_ii(struct rkispp_stream *stream)
533 {
534 struct rkispp_device *dev = stream->isppdev;
535 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
536 unsigned long lock_flags = 0;
537 struct rkispp_buffer *buf;
538 int i;
539
540 v4l2_subdev_call(&dev->ispp_sdev.sd, video, s_stream, true);
541 spin_lock_irqsave(&stream->vbq_lock, lock_flags);
542 while (!list_empty(&stream->buf_queue)) {
543 buf = list_first_entry(&stream->buf_queue, struct rkispp_buffer, queue);
544 list_del(&buf->queue);
545 i = buf->vb.vb2_buf.index;
546 vdev->input[i].priv = buf;
547 vdev->input[i].index = dev->dev_id;
548 vdev->input[i].frame_timestamp = buf->vb.vb2_buf.timestamp;
549 vdev->input[i].frame_id = ++dev->ispp_sdev.frm_sync_seq;
550 rkispp_event_handle(dev, CMD_QUEUE_DMABUF, &vdev->input[i]);
551 }
552 stream->streaming = true;
553 spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
554 return 0;
555 }
556
config_ii(struct rkispp_stream * stream)557 static int config_ii(struct rkispp_stream *stream)
558 {
559 struct rkispp_stream_vdev *stream_vdev = &stream->isppdev->stream_vdev;
560
561 stream->is_cfg = true;
562 rkispp_start_3a_run(stream->isppdev);
563 return stream_vdev->stream_ops->config_modules(stream->isppdev);
564 }
565
is_stopped_ii(struct rkispp_stream * stream)566 static int is_stopped_ii(struct rkispp_stream *stream)
567 {
568 stream->streaming = false;
569 return true;
570 }
571
secure_config_mb(struct rkispp_stream * stream)572 void secure_config_mb(struct rkispp_stream *stream)
573 {
574 struct rkispp_device *dev = stream->isppdev;
575 u32 limit_range, mult = 1;
576
577 /* enable dma immediately, config in idle state */
578 switch (stream->last_module) {
579 case ISPP_MODULE_TNR:
580 rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_WR_MASK,
581 SW_TNR_1ST_FRM | stream->out_cap_fmt.wr_fmt << 4);
582 break;
583 case ISPP_MODULE_NR:
584 case ISPP_MODULE_SHP:
585 limit_range = (stream->out_fmt.quantization != V4L2_QUANTIZATION_LIM_RANGE) ?
586 0 : SW_SHP_WR_YUV_LIMIT;
587 rkispp_set_bits(dev, RKISPP_SHARP_CTRL,
588 SW_SHP_WR_YUV_LIMIT | SW_SHP_WR_FORMAT_MASK,
589 limit_range | stream->out_cap_fmt.wr_fmt);
590 rkispp_clear_bits(dev, RKISPP_SHARP_CORE_CTRL, SW_SHP_DMA_DIS);
591 break;
592 case ISPP_MODULE_FEC:
593 limit_range = (stream->out_fmt.quantization != V4L2_QUANTIZATION_LIM_RANGE) ?
594 0 : SW_FEC_WR_YUV_LIMIT;
595 rkispp_set_bits(dev, RKISPP_FEC_CTRL, SW_FEC_WR_YUV_LIMIT | FMT_WR_MASK,
596 limit_range | stream->out_cap_fmt.wr_fmt << 4);
597 rkispp_write(dev, RKISPP_FEC_DST_SIZE,
598 stream->out_fmt.height << 16 | stream->out_fmt.width);
599 rkispp_clear_bits(dev, RKISPP_FEC_CORE_CTRL, SW_FEC2DDR_DIS);
600 break;
601 default:
602 break;
603 }
604
605 if (stream->out_cap_fmt.wr_fmt & FMT_YUYV)
606 mult = 2;
607 else if (stream->out_cap_fmt.wr_fmt & FMT_FBC)
608 mult = 0;
609 set_vir_stride(stream, ALIGN(stream->out_fmt.width * mult, 16) >> 2);
610
611 /* config first buf */
612 rkispp_frame_end(stream, FRAME_INIT);
613
614 stream->is_cfg = true;
615 }
616
config_mb(struct rkispp_stream * stream)617 static int config_mb(struct rkispp_stream *stream)
618 {
619 struct rkispp_device *dev = stream->isppdev;
620 u32 i;
621
622 for (i = ISPP_MODULE_FEC; i > 0; i = i >> 1) {
623 if (dev->stream_vdev.module_ens & i)
624 break;
625 }
626 if (!i)
627 return -EINVAL;
628
629 stream->last_module = i;
630 switch (i) {
631 case ISPP_MODULE_TNR:
632 stream->config->frame_end_id = TNR_INT;
633 stream->config->reg.cur_y_base = RKISPP_TNR_WR_Y_BASE;
634 stream->config->reg.cur_uv_base = RKISPP_TNR_WR_UV_BASE;
635 stream->config->reg.cur_vir_stride = RKISPP_TNR_WR_VIR_STRIDE;
636 stream->config->reg.cur_y_base_shd = RKISPP_TNR_WR_Y_BASE_SHD;
637 stream->config->reg.cur_uv_base_shd = RKISPP_TNR_WR_UV_BASE_SHD;
638 break;
639 case ISPP_MODULE_NR:
640 case ISPP_MODULE_SHP:
641 stream->config->frame_end_id = SHP_INT;
642 stream->config->reg.cur_y_base = RKISPP_SHARP_WR_Y_BASE;
643 stream->config->reg.cur_uv_base = RKISPP_SHARP_WR_UV_BASE;
644 stream->config->reg.cur_vir_stride = RKISPP_SHARP_WR_VIR_STRIDE;
645 stream->config->reg.cur_y_base_shd = RKISPP_SHARP_WR_Y_BASE_SHD;
646 stream->config->reg.cur_uv_base_shd = RKISPP_SHARP_WR_UV_BASE_SHD;
647 break;
648 default:
649 stream->config->frame_end_id = FEC_INT;
650 stream->config->reg.cur_y_base = RKISPP_FEC_WR_Y_BASE;
651 stream->config->reg.cur_uv_base = RKISPP_FEC_WR_UV_BASE;
652 stream->config->reg.cur_vir_stride = RKISPP_FEC_WR_VIR_STRIDE;
653 stream->config->reg.cur_y_base_shd = RKISPP_FEC_WR_Y_BASE_SHD;
654 stream->config->reg.cur_uv_base_shd = RKISPP_FEC_WR_UV_BASE_SHD;
655 }
656
657 if (dev->ispp_sdev.state == ISPP_STOP)
658 secure_config_mb(stream);
659 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
660 "%s last module:%d\n", __func__, i);
661 return 0;
662 }
663
is_stopped_mb(struct rkispp_stream * stream)664 static int is_stopped_mb(struct rkispp_stream *stream)
665 {
666 struct rkispp_device *dev = stream->isppdev;
667 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
668 bool is_stopped = true;
669 u32 val;
670
671 if (vdev->module_ens & ISPP_MODULE_FEC) {
672 /* close dma write immediately */
673 rkispp_clear_bits(dev, RKISPP_FEC_CTRL, FMT_FBC << 4);
674 rkispp_set_bits(dev, RKISPP_FEC_CORE_CTRL,
675 0, SW_FEC2DDR_DIS);
676 } else if (vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)) {
677 val = dev->hw_dev->dummy_buf.dma_addr;
678 rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
679 rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val);
680 if (dev->inp == INP_ISP)
681 rkispp_set_bits(dev, RKISPP_SHARP_CTRL, SW_SHP_WR_FORMAT_MASK, FMT_FBC);
682 }
683
684 /* for wait last frame */
685 if (atomic_read(&dev->stream_vdev.refcnt) == 1) {
686 val = readl(dev->hw_dev->base_addr + RKISPP_CTRL_SYS_STATUS);
687 is_stopped = (val & 0x8f) ? false : true;
688 }
689
690 return is_stopped;
691 }
692
limit_check_mb(struct rkispp_stream * stream,struct v4l2_pix_format_mplane * try_fmt)693 static int limit_check_mb(struct rkispp_stream *stream,
694 struct v4l2_pix_format_mplane *try_fmt)
695 {
696 struct rkispp_device *dev = stream->isppdev;
697 struct rkispp_subdev *sdev = &dev->ispp_sdev;
698 u32 *w = try_fmt ? &try_fmt->width : &stream->out_fmt.width;
699 u32 *h = try_fmt ? &try_fmt->height : &stream->out_fmt.height;
700
701 if (*w != sdev->out_fmt.width || *h != sdev->out_fmt.height) {
702 v4l2_err(&dev->v4l2_dev,
703 "output:%dx%d should euqal to input:%dx%d\n",
704 *w, *h, sdev->out_fmt.width, sdev->out_fmt.height);
705 if (!try_fmt) {
706 *w = 0;
707 *h = 0;
708 }
709 return -EINVAL;
710 }
711
712 return 0;
713 }
714
config_scl(struct rkispp_stream * stream)715 static int config_scl(struct rkispp_stream *stream)
716 {
717 struct rkispp_device *dev = stream->isppdev;
718 const struct capture_fmt *fmt = &stream->out_cap_fmt;
719 u32 in_width = dev->ispp_sdev.out_fmt.width;
720 u32 in_height = dev->ispp_sdev.out_fmt.height;
721 u32 hy_fac = (stream->out_fmt.width - 1) * 8192 /
722 (in_width - 1) + 1;
723 u32 vy_fac = (stream->out_fmt.height - 1) * 8192 /
724 (in_height - 1) + 1;
725 u32 val = SW_SCL_ENABLE, mult = 1;
726 u32 mask = SW_SCL_WR_YUV_LIMIT | SW_SCL_WR_YUYV_YCSWAP |
727 SW_SCL_WR_YUYV_FORMAT | SW_SCL_WR_YUV_FORMAT |
728 SW_SCL_WR_UV_DIS | SW_SCL_BYPASS;
729
730 /* config first buf */
731 rkispp_frame_end(stream, FRAME_INIT);
732 if (hy_fac == 8193 && vy_fac == 8193)
733 val |= SW_SCL_BYPASS;
734 if (fmt->wr_fmt & FMT_YUYV)
735 mult = 2;
736 set_vir_stride(stream, ALIGN(stream->out_fmt.width * mult, 16) >> 2);
737 set_scl_factor(stream, vy_fac << 16 | hy_fac);
738 val |= fmt->wr_fmt << 3 |
739 ((fmt->fourcc != V4L2_PIX_FMT_GREY) ? 0 : SW_SCL_WR_UV_DIS) |
740 ((stream->out_fmt.quantization != V4L2_QUANTIZATION_LIM_RANGE) ?
741 0 : SW_SCL_WR_YUV_LIMIT);
742 rkispp_set_bits(dev, stream->config->reg.ctrl, mask, val);
743 stream->is_cfg = true;
744
745 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
746 "scl%d ctrl:0x%x stride:0x%x factor:0x%x\n",
747 stream->id - STREAM_S0,
748 rkispp_read(dev, stream->config->reg.ctrl),
749 rkispp_read(dev, stream->config->reg.cur_vir_stride),
750 rkispp_read(dev, stream->config->reg.factor));
751 return 0;
752 }
753
stop_scl(struct rkispp_stream * stream)754 static void stop_scl(struct rkispp_stream *stream)
755 {
756 struct rkispp_device *dev = stream->isppdev;
757
758 rkispp_clear_bits(dev, stream->config->reg.ctrl, SW_SCL_ENABLE);
759 }
760
is_stopped_scl(struct rkispp_stream * stream)761 static int is_stopped_scl(struct rkispp_stream *stream)
762 {
763 struct rkispp_device *dev = stream->isppdev;
764 u32 scl_en, other_en = 0, val = SW_SCL_ENABLE;
765 bool is_stopped;
766
767 if (dev->hw_dev->is_single)
768 val = SW_SCL_ENABLE_SHD;
769 scl_en = rkispp_read(dev, stream->config->reg.ctrl) & val;
770 if (atomic_read(&dev->stream_vdev.refcnt) == 1) {
771 val = readl(dev->hw_dev->base_addr + RKISPP_CTRL_SYS_STATUS);
772 other_en = val & 0x8f;
773 }
774 is_stopped = (scl_en | other_en) ? false : true;
775 return is_stopped;
776 }
777
limit_check_scl(struct rkispp_stream * stream,struct v4l2_pix_format_mplane * try_fmt)778 static int limit_check_scl(struct rkispp_stream *stream,
779 struct v4l2_pix_format_mplane *try_fmt)
780 {
781 struct rkispp_device *dev = stream->isppdev;
782 struct rkispp_subdev *sdev = &dev->ispp_sdev;
783 u32 max_width = 1280, max_ratio = 8, min_ratio = 2;
784 u32 *w = try_fmt ? &try_fmt->width : &stream->out_fmt.width;
785 u32 *h = try_fmt ? &try_fmt->height : &stream->out_fmt.height;
786 u32 forcc = try_fmt ? try_fmt->pixelformat : stream->out_fmt.pixelformat;
787 int ret = 0;
788
789 /* bypass scale */
790 if (*w == sdev->out_fmt.width && *h == sdev->out_fmt.height)
791 return ret;
792
793 if (stream->id == STREAM_S0) {
794 if (*h == sdev->out_fmt.height || (forcc != V4L2_PIX_FMT_NV12))
795 max_width = 3264;
796 else
797 max_width = 2080;
798 min_ratio = 1;
799 }
800
801 if (*w > max_width ||
802 *w * max_ratio < sdev->out_fmt.width ||
803 *h * max_ratio < sdev->out_fmt.height ||
804 *w * min_ratio > sdev->out_fmt.width ||
805 *h * min_ratio > sdev->out_fmt.height) {
806 v4l2_err(&dev->v4l2_dev,
807 "scale%d:%dx%d out of range:\n"
808 "\t[width max:%d ratio max:%d min:%d]\n",
809 stream->id - STREAM_S0, *w, *h,
810 max_width, max_ratio, min_ratio);
811 if (!try_fmt) {
812 *w = 0;
813 *h = 0;
814 }
815 ret = -EINVAL;
816 }
817
818 return ret;
819 }
820
821 static struct streams_ops input_stream_ops = {
822 .config = config_ii,
823 .start = start_ii,
824 .is_stopped = is_stopped_ii,
825 };
826
827 static struct streams_ops mb_stream_ops = {
828 .config = config_mb,
829 .is_stopped = is_stopped_mb,
830 .limit_check = limit_check_mb,
831 };
832
833 static struct streams_ops scal_stream_ops = {
834 .config = config_scl,
835 .stop = stop_scl,
836 .is_stopped = is_stopped_scl,
837 .limit_check = limit_check_scl,
838 };
839
840 /***************************** vb2 operations*******************************/
841
rkispp_queue_setup(struct vb2_queue * queue,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])842 static int rkispp_queue_setup(struct vb2_queue *queue,
843 unsigned int *num_buffers,
844 unsigned int *num_planes,
845 unsigned int sizes[],
846 struct device *alloc_ctxs[])
847 {
848 struct rkispp_stream *stream = queue->drv_priv;
849 struct rkispp_device *dev = stream->isppdev;
850 const struct v4l2_pix_format_mplane *pixm = NULL;
851 const struct capture_fmt *cap_fmt = NULL;
852 u32 i;
853
854 pixm = &stream->out_fmt;
855 if (!pixm->width || !pixm->height)
856 return -EINVAL;
857 cap_fmt = &stream->out_cap_fmt;
858 *num_planes = cap_fmt->mplanes;
859
860 for (i = 0; i < cap_fmt->mplanes; i++) {
861 const struct v4l2_plane_pix_format *plane_fmt;
862
863 plane_fmt = &pixm->plane_fmt[i];
864 /* height to align with 16 when allocating memory
865 * so that Rockchip encoder can use DMA buffer directly
866 */
867 sizes[i] = (stream->type == STREAM_OUTPUT &&
868 cap_fmt->wr_fmt != FMT_FBC) ?
869 plane_fmt->sizeimage / pixm->height *
870 ALIGN(pixm->height, 16) :
871 plane_fmt->sizeimage;
872 }
873
874 if (stream->is_reg_withstream &&
875 (cap_fmt->wr_fmt & FMT_FBC || cap_fmt->wr_fmt == FMT_YUV420)) {
876 (*num_planes)++;
877 sizes[1] = sizeof(struct rkisp_ispp_reg);
878 }
879
880 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
881 "%s stream:%d count %d, size %d\n",
882 v4l2_type_names[queue->type],
883 stream->id, *num_buffers, sizes[0]);
884
885 return 0;
886 }
887
rkispp_buf_queue(struct vb2_buffer * vb)888 static void rkispp_buf_queue(struct vb2_buffer *vb)
889 {
890 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
891 struct rkispp_buffer *isppbuf = to_rkispp_buffer(vbuf);
892 struct vb2_queue *queue = vb->vb2_queue;
893 struct rkispp_stream *stream = queue->drv_priv;
894 struct rkispp_device *dev = stream->isppdev;
895 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
896 struct v4l2_pix_format_mplane *pixm = &stream->out_fmt;
897 struct capture_fmt *cap_fmt = &stream->out_cap_fmt;
898 unsigned long lock_flags = 0;
899 u32 height, size, offset;
900 struct sg_table *sgt;
901 int i;
902
903 memset(isppbuf->buff_addr, 0, sizeof(isppbuf->buff_addr));
904 for (i = 0; i < cap_fmt->mplanes; i++) {
905 vb2_plane_vaddr(vb, i);
906 if (stream->isppdev->hw_dev->is_dma_sg_ops) {
907 sgt = vb2_dma_sg_plane_desc(vb, i);
908 isppbuf->buff_addr[i] = sg_dma_address(sgt->sgl);
909 } else {
910 isppbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
911 }
912 }
913 /*
914 * NOTE: plane_fmt[0].sizeimage is total size of all planes for single
915 * memory plane formats, so calculate the size explicitly.
916 */
917 if (cap_fmt->mplanes == 1) {
918 for (i = 0; i < cap_fmt->cplanes - 1; i++) {
919 /* FBC mode calculate payload offset */
920 height = (cap_fmt->wr_fmt & FMT_FBC) ?
921 ALIGN(pixm->height, 16) >> 4 : pixm->height;
922 size = (i == 0) ?
923 pixm->plane_fmt[i].bytesperline * height :
924 pixm->plane_fmt[i].sizeimage;
925 offset = (cap_fmt->wr_fmt & FMT_FBC) ?
926 ALIGN(size, RK_MPP_ALIGN) : size;
927 if (cap_fmt->wr_fmt & FMT_FBC && dev->ispp_ver == ISPP_V20)
928 rkispp_write(dev, RKISPP_FEC_FBCE_HEAD_OFFSET,
929 offset | SW_OFFSET_ENABLE);
930
931 isppbuf->buff_addr[i + 1] =
932 isppbuf->buff_addr[i] + offset;
933 }
934 }
935
936 v4l2_dbg(2, rkispp_debug, &stream->isppdev->v4l2_dev,
937 "%s stream:%d buf:0x%x\n", __func__,
938 stream->id, isppbuf->buff_addr[0]);
939
940 spin_lock_irqsave(&stream->vbq_lock, lock_flags);
941 if (stream->type == STREAM_OUTPUT ||
942 (stream->id == STREAM_II && !stream->streaming)) {
943 list_add_tail(&isppbuf->queue, &stream->buf_queue);
944 } else {
945 i = vb->index;
946 vdev->input[i].priv = isppbuf;
947 vdev->input[i].index = dev->dev_id;
948 vdev->input[i].frame_timestamp = vb->timestamp;
949 vdev->input[i].frame_id = ++dev->ispp_sdev.frm_sync_seq;
950 rkispp_event_handle(dev, CMD_QUEUE_DMABUF, &vdev->input[i]);
951 }
952 spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
953 }
954
rkispp_stream_stop(struct rkispp_stream * stream)955 static void rkispp_stream_stop(struct rkispp_stream *stream)
956 {
957 struct rkispp_device *dev = stream->isppdev;
958 bool is_wait = true;
959 int ret = 0;
960
961 stream->stopping = true;
962 if (atomic_read(&dev->stream_vdev.refcnt) == 1) {
963 v4l2_subdev_call(&dev->ispp_sdev.sd, video, s_stream, false);
964 rkispp_stop_3a_run(dev);
965 if (dev->stream_vdev.fec.is_end &&
966 (dev->dev_id != dev->hw_dev->cur_dev_id || dev->hw_dev->is_idle))
967 is_wait = false;
968 }
969 if (is_wait) {
970 ret = wait_event_timeout(stream->done,
971 !stream->streaming,
972 msecs_to_jiffies(500));
973 if (!ret)
974 v4l2_warn(&dev->v4l2_dev,
975 "stream:%d stop timeout\n", stream->id);
976 }
977 if (stream->ops) {
978 /* scl stream close dma write */
979 if (stream->ops->stop)
980 stream->ops->stop(stream);
981 else if (stream->ops->is_stopped)
982 /* mb stream close dma write immediately */
983 stream->ops->is_stopped(stream);
984 }
985 stream->is_upd = false;
986 stream->streaming = false;
987 stream->stopping = false;
988 }
989
destroy_buf_queue(struct rkispp_stream * stream,enum vb2_buffer_state state)990 static void destroy_buf_queue(struct rkispp_stream *stream,
991 enum vb2_buffer_state state)
992 {
993 struct vb2_queue *queue = &stream->vnode.buf_queue;
994 unsigned long lock_flags = 0;
995 struct rkispp_buffer *buf;
996 u32 i;
997
998 spin_lock_irqsave(&stream->vbq_lock, lock_flags);
999 if (stream->curr_buf) {
1000 list_add_tail(&stream->curr_buf->queue, &stream->buf_queue);
1001 stream->curr_buf = NULL;
1002 }
1003 while (!list_empty(&stream->buf_queue)) {
1004 buf = list_first_entry(&stream->buf_queue,
1005 struct rkispp_buffer, queue);
1006 list_del(&buf->queue);
1007 vb2_buffer_done(&buf->vb.vb2_buf, state);
1008 }
1009 spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
1010
1011 for (i = 0; i < queue->num_buffers; ++i) {
1012 if (queue->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
1013 vb2_buffer_done(queue->bufs[i], VB2_BUF_STATE_ERROR);
1014 }
1015 }
1016
rkispp_stop_streaming(struct vb2_queue * queue)1017 static void rkispp_stop_streaming(struct vb2_queue *queue)
1018 {
1019 struct rkispp_stream *stream = queue->drv_priv;
1020 struct rkispp_device *dev = stream->isppdev;
1021 struct rkispp_hw_dev *hw = dev->hw_dev;
1022 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1023
1024 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1025 "%s id:%d enter\n", __func__, stream->id);
1026
1027 if (!stream->streaming)
1028 return;
1029
1030 if (stream->id == STREAM_VIR) {
1031 stream->stopping = true;
1032 wait_event_timeout(stream->done,
1033 stream->is_end,
1034 msecs_to_jiffies(500));
1035 stream->streaming = false;
1036 stream->stopping = false;
1037 destroy_buf_queue(stream, VB2_BUF_STATE_ERROR);
1038 if (!completion_done(&dev->stream_vdev.vir_cpy.cmpl))
1039 complete(&dev->stream_vdev.vir_cpy.cmpl);
1040 return;
1041 }
1042
1043 mutex_lock(&dev->hw_dev->dev_lock);
1044 rkispp_stream_stop(stream);
1045 destroy_buf_queue(stream, VB2_BUF_STATE_ERROR);
1046 vdev->stream_ops->destroy_buf(stream);
1047 mutex_unlock(&dev->hw_dev->dev_lock);
1048 rkispp_free_common_dummy_buf(dev);
1049 atomic_dec(&dev->stream_vdev.refcnt);
1050
1051 if (!atomic_read(&hw->refcnt) &&
1052 !atomic_read(&dev->stream_vdev.refcnt)) {
1053 rkispp_set_clk_rate(hw->clks[0], hw->core_clk_min);
1054 hw->is_idle = true;
1055 hw->is_first = true;
1056 }
1057 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1058 "%s id:%d exit\n", __func__, stream->id);
1059 }
1060
rkispp_start_streaming(struct vb2_queue * queue,unsigned int count)1061 static int rkispp_start_streaming(struct vb2_queue *queue,
1062 unsigned int count)
1063 {
1064 struct rkispp_stream *stream = queue->drv_priv;
1065 struct rkispp_device *dev = stream->isppdev;
1066 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1067 struct rkispp_hw_dev *hw = dev->hw_dev;
1068 int ret = -1;
1069
1070 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1071 "%s id:%d enter\n", __func__, stream->id);
1072
1073 if (stream->streaming)
1074 return -EBUSY;
1075
1076 stream->is_end = true;
1077 if (stream->id == STREAM_VIR) {
1078 struct rkispp_stream *t = &dev->stream_vdev.stream[stream->conn_id];
1079
1080 if (t->streaming) {
1081 INIT_WORK(&dev->stream_vdev.vir_cpy.work, vir_cpy_image);
1082 init_completion(&dev->stream_vdev.vir_cpy.cmpl);
1083 INIT_LIST_HEAD(&dev->stream_vdev.vir_cpy.queue);
1084 dev->stream_vdev.vir_cpy.stream = stream;
1085 schedule_work(&dev->stream_vdev.vir_cpy.work);
1086 ret = 0;
1087 } else {
1088 v4l2_err(&dev->v4l2_dev,
1089 "no stream enable for iqtool\n");
1090 destroy_buf_queue(stream, VB2_BUF_STATE_QUEUED);
1091 ret = -EINVAL;
1092 }
1093 return ret;
1094 }
1095
1096 if (dev->inp == INP_DDR &&
1097 !atomic_read(&hw->refcnt) &&
1098 !atomic_read(&dev->stream_vdev.refcnt) &&
1099 clk_get_rate(hw->clks[0]) <= hw->core_clk_min) {
1100 dev->hw_dev->is_first = false;
1101 rkispp_set_clk_rate(hw->clks[0], hw->core_clk_max);
1102 }
1103
1104 stream->is_upd = false;
1105 stream->is_cfg = false;
1106 atomic_inc(&dev->stream_vdev.refcnt);
1107 if (!dev->inp || !stream->linked) {
1108 v4l2_err(&dev->v4l2_dev,
1109 "no link or invalid input source\n");
1110 goto free_buf_queue;
1111 }
1112
1113 ret = rkispp_alloc_common_dummy_buf(stream->isppdev);
1114 if (ret < 0)
1115 goto free_buf_queue;
1116
1117 if (dev->inp == INP_ISP) {
1118 if (dev->ispp_ver == ISPP_V10)
1119 dev->stream_vdev.module_ens |= ISPP_MODULE_NR;
1120 else if (dev->ispp_ver == ISPP_V20)
1121 dev->stream_vdev.module_ens = ISPP_MODULE_FEC;
1122 }
1123
1124 if (stream->ops && stream->ops->config) {
1125 ret = stream->ops->config(stream);
1126 if (ret < 0)
1127 goto free_dummy_buf;
1128 }
1129
1130 /* start from ddr */
1131 if (stream->ops && stream->ops->start)
1132 stream->ops->start(stream);
1133
1134 stream->streaming = true;
1135
1136 /* start from isp */
1137 ret = vdev->stream_ops->start_isp(dev);
1138 if (ret)
1139 goto free_dummy_buf;
1140
1141 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1142 "%s id:%d exit\n", __func__, stream->id);
1143 return 0;
1144 free_dummy_buf:
1145 rkispp_free_common_dummy_buf(stream->isppdev);
1146 free_buf_queue:
1147 destroy_buf_queue(stream, VB2_BUF_STATE_QUEUED);
1148 vdev->stream_ops->destroy_buf(stream);
1149 atomic_dec(&dev->stream_vdev.refcnt);
1150 stream->streaming = false;
1151 stream->is_upd = false;
1152 v4l2_err(&dev->v4l2_dev, "%s id:%d failed ret:%d\n",
1153 __func__, stream->id, ret);
1154 return ret;
1155 }
1156
1157 static struct vb2_ops stream_vb2_ops = {
1158 .queue_setup = rkispp_queue_setup,
1159 .buf_queue = rkispp_buf_queue,
1160 .wait_prepare = vb2_ops_wait_prepare,
1161 .wait_finish = vb2_ops_wait_finish,
1162 .stop_streaming = rkispp_stop_streaming,
1163 .start_streaming = rkispp_start_streaming,
1164 };
1165
rkispp_init_vb2_queue(struct vb2_queue * q,struct rkispp_stream * stream,enum v4l2_buf_type buf_type)1166 static int rkispp_init_vb2_queue(struct vb2_queue *q,
1167 struct rkispp_stream *stream,
1168 enum v4l2_buf_type buf_type)
1169 {
1170 q->type = buf_type;
1171 q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
1172 q->drv_priv = stream;
1173 q->ops = &stream_vb2_ops;
1174 q->mem_ops = stream->isppdev->hw_dev->mem_ops;
1175 q->buf_struct_size = sizeof(struct rkispp_buffer);
1176 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1177 q->min_buffers_needed = STREAM_IN_REQ_BUFS_MIN;
1178 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1179 } else {
1180 q->min_buffers_needed = STREAM_OUT_REQ_BUFS_MIN;
1181 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1182 }
1183 q->lock = &stream->isppdev->apilock;
1184 q->dev = stream->isppdev->hw_dev->dev;
1185 q->allow_cache_hints = 1;
1186 q->bidirectional = 1;
1187 if (stream->isppdev->hw_dev->is_dma_contig)
1188 q->dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
1189 q->gfp_flags = GFP_DMA32;
1190 return vb2_queue_init(q);
1191 }
1192
rkispp_set_fmt(struct rkispp_stream * stream,struct v4l2_pix_format_mplane * pixm,bool try)1193 static int rkispp_set_fmt(struct rkispp_stream *stream,
1194 struct v4l2_pix_format_mplane *pixm,
1195 bool try)
1196 {
1197 struct rkispp_device *dev = stream->isppdev;
1198 struct rkispp_subdev *sdev = &dev->ispp_sdev;
1199 const struct capture_fmt *fmt;
1200 unsigned int imagsize = 0;
1201 unsigned int planes;
1202 u32 xsubs = 1, ysubs = 1;
1203 unsigned int i;
1204
1205 if (stream->id == STREAM_VIR) {
1206 for (i = STREAM_MB; i <= STREAM_S2; i++) {
1207 struct rkispp_stream *t = &dev->stream_vdev.stream[i];
1208
1209 if (t->out_cap_fmt.wr_fmt & FMT_FBC || !t->streaming)
1210 continue;
1211 if (t->out_fmt.plane_fmt[0].sizeimage > imagsize) {
1212 imagsize = t->out_fmt.plane_fmt[0].sizeimage;
1213 *pixm = t->out_fmt;
1214 stream->conn_id = t->id;
1215 }
1216 }
1217 if (!imagsize) {
1218 v4l2_err(&dev->v4l2_dev, "no output stream for iqtool\n");
1219 return -EINVAL;
1220 }
1221 imagsize = 0;
1222 }
1223
1224 fmt = find_fmt(stream, pixm->pixelformat);
1225 if (!fmt) {
1226 v4l2_err(&dev->v4l2_dev,
1227 "nonsupport pixelformat:%c%c%c%c\n",
1228 pixm->pixelformat,
1229 pixm->pixelformat >> 8,
1230 pixm->pixelformat >> 16,
1231 pixm->pixelformat >> 24);
1232 return -EINVAL;
1233 }
1234
1235 pixm->num_planes = fmt->mplanes;
1236 pixm->field = V4L2_FIELD_NONE;
1237 if (!pixm->quantization)
1238 pixm->quantization = V4L2_QUANTIZATION_FULL_RANGE;
1239
1240 /* calculate size */
1241 fcc_xysubs(fmt->fourcc, &xsubs, &ysubs);
1242 planes = fmt->cplanes ? fmt->cplanes : fmt->mplanes;
1243 for (i = 0; i < planes; i++) {
1244 struct v4l2_plane_pix_format *plane_fmt;
1245 unsigned int width, height, bytesperline, w, h;
1246
1247 plane_fmt = pixm->plane_fmt + i;
1248
1249 if (pixm->width == RKISPP_MAX_WIDTH_V20) {
1250 w = ALIGN(pixm->width, 16);
1251 h = ALIGN(pixm->height, 16);
1252 } else {
1253 w = (fmt->wr_fmt & FMT_FBC) ?
1254 ALIGN(pixm->width, 16) : pixm->width;
1255 h = (fmt->wr_fmt & FMT_FBC) ?
1256 ALIGN(pixm->height, 16) : pixm->height;
1257 }
1258
1259 width = i ? w / xsubs : w;
1260 height = i ? h / ysubs : h;
1261
1262 bytesperline = width * DIV_ROUND_UP(fmt->bpp[i], 8);
1263
1264 if (i != 0 || plane_fmt->bytesperline < bytesperline)
1265 plane_fmt->bytesperline = bytesperline;
1266
1267 plane_fmt->sizeimage = plane_fmt->bytesperline * height;
1268 /* FBC header: width * height / 16, and 4096 align for mpp
1269 * FBC payload: yuv420 or yuv422 size
1270 * FBC width and height need 16 align
1271 */
1272 if (fmt->wr_fmt & FMT_FBC && i == 0)
1273 plane_fmt->sizeimage =
1274 ALIGN(plane_fmt->sizeimage >> 4, RK_MPP_ALIGN);
1275 else if (fmt->wr_fmt & FMT_FBC)
1276 plane_fmt->sizeimage += w * h;
1277 imagsize += plane_fmt->sizeimage;
1278 }
1279
1280 if (fmt->mplanes == 1)
1281 pixm->plane_fmt[0].sizeimage = imagsize;
1282
1283 stream->is_reg_withstream = rkispp_is_reg_withstream_local(&stream->vnode.vdev.dev);
1284 if (stream->is_reg_withstream &&
1285 (fmt->wr_fmt & FMT_FBC || fmt->wr_fmt == FMT_YUV420))
1286 pixm->num_planes++;
1287
1288 if (!try) {
1289 stream->out_cap_fmt = *fmt;
1290 stream->out_fmt = *pixm;
1291
1292 if (stream->id == STREAM_II && stream->linked) {
1293 sdev->in_fmt.width = pixm->width;
1294 sdev->in_fmt.height = pixm->height;
1295 sdev->out_fmt.width = pixm->width;
1296 sdev->out_fmt.height = pixm->height;
1297 }
1298 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1299 "%s: stream: %d req(%d, %d) out(%d, %d)\n",
1300 __func__, stream->id, pixm->width, pixm->height,
1301 stream->out_fmt.width, stream->out_fmt.height);
1302
1303 if (dev->ispp_ver == ISPP_V10) {
1304 if (sdev->out_fmt.width > RKISPP_MAX_WIDTH_V10 ||
1305 sdev->out_fmt.height > RKISPP_MAX_HEIGHT_V10 ||
1306 sdev->out_fmt.width < RKISPP_MIN_WIDTH_V10 ||
1307 sdev->out_fmt.height < RKISPP_MIN_HEIGHT_V10) {
1308 v4l2_err(&dev->v4l2_dev,
1309 "ispp input min:%dx%d max:%dx%d\n",
1310 RKISPP_MIN_WIDTH_V10, RKISPP_MIN_HEIGHT_V10,
1311 RKISPP_MAX_WIDTH_V10, RKISPP_MAX_HEIGHT_V10);
1312 stream->out_fmt.width = 0;
1313 stream->out_fmt.height = 0;
1314 return -EINVAL;
1315 }
1316 } else if (dev->ispp_ver == ISPP_V20) {
1317 if (sdev->out_fmt.width > RKISPP_MAX_WIDTH_V20 ||
1318 sdev->out_fmt.height > RKISPP_MAX_HEIGHT_V20 ||
1319 sdev->out_fmt.width < RKISPP_MIN_WIDTH_V20 ||
1320 sdev->out_fmt.height < RKISPP_MIN_HEIGHT_V20) {
1321 v4l2_err(&dev->v4l2_dev,
1322 "ispp input min:%dx%d max:%dx%d\n",
1323 RKISPP_MIN_WIDTH_V20, RKISPP_MIN_HEIGHT_V20,
1324 RKISPP_MAX_WIDTH_V20, RKISPP_MAX_HEIGHT_V20);
1325 stream->out_fmt.width = 0;
1326 stream->out_fmt.height = 0;
1327 return -EINVAL;
1328 }
1329 }
1330 }
1331
1332 if (stream->ops && stream->ops->limit_check)
1333 return stream->ops->limit_check(stream, try ? pixm : NULL);
1334
1335 return 0;
1336 }
1337
1338 /************************* v4l2_file_operations***************************/
1339
rkispp_fh_open(struct file * filp)1340 static int rkispp_fh_open(struct file *filp)
1341 {
1342 struct rkispp_stream *stream = video_drvdata(filp);
1343 struct rkispp_device *isppdev = stream->isppdev;
1344 int ret;
1345
1346 ret = v4l2_fh_open(filp);
1347 if (!ret) {
1348 ret = v4l2_pipeline_pm_get(&stream->vnode.vdev.entity);
1349 if (ret < 0) {
1350 v4l2_err(&isppdev->v4l2_dev,
1351 "pipeline power on failed %d\n", ret);
1352 vb2_fop_release(filp);
1353 }
1354 }
1355 return ret;
1356 }
1357
rkispp_fh_release(struct file * filp)1358 static int rkispp_fh_release(struct file *filp)
1359 {
1360 struct rkispp_stream *stream = video_drvdata(filp);
1361 int ret;
1362
1363 ret = vb2_fop_release(filp);
1364 if (!ret)
1365 v4l2_pipeline_pm_put(&stream->vnode.vdev.entity);
1366 return ret;
1367 }
1368
1369 static const struct v4l2_file_operations rkispp_fops = {
1370 .open = rkispp_fh_open,
1371 .release = rkispp_fh_release,
1372 .unlocked_ioctl = video_ioctl2,
1373 .poll = vb2_fop_poll,
1374 .mmap = vb2_fop_mmap,
1375 };
1376
rkispp_enum_input(struct file * file,void * priv,struct v4l2_input * input)1377 static int rkispp_enum_input(struct file *file, void *priv,
1378 struct v4l2_input *input)
1379 {
1380 if (input->index > 0)
1381 return -EINVAL;
1382
1383 input->type = V4L2_INPUT_TYPE_CAMERA;
1384 strscpy(input->name, "Camera", sizeof(input->name));
1385
1386 return 0;
1387 }
1388
rkispp_try_fmt_vid_mplane(struct file * file,void * fh,struct v4l2_format * f)1389 static int rkispp_try_fmt_vid_mplane(struct file *file, void *fh,
1390 struct v4l2_format *f)
1391 {
1392 struct rkispp_stream *stream = video_drvdata(file);
1393
1394 return rkispp_set_fmt(stream, &f->fmt.pix_mp, true);
1395 }
1396
rkispp_enum_fmt_vid_mplane(struct file * file,void * priv,struct v4l2_fmtdesc * f)1397 static int rkispp_enum_fmt_vid_mplane(struct file *file, void *priv,
1398 struct v4l2_fmtdesc *f)
1399 {
1400 struct rkispp_stream *stream = video_drvdata(file);
1401 const struct capture_fmt *fmt = NULL;
1402
1403 if (f->index >= stream->config->fmt_size)
1404 return -EINVAL;
1405
1406 fmt = &stream->config->fmts[f->index];
1407 f->pixelformat = fmt->fourcc;
1408
1409 return 0;
1410 }
1411
rkispp_s_fmt_vid_mplane(struct file * file,void * priv,struct v4l2_format * f)1412 static int rkispp_s_fmt_vid_mplane(struct file *file,
1413 void *priv, struct v4l2_format *f)
1414 {
1415 struct rkispp_stream *stream = video_drvdata(file);
1416 struct video_device *vdev = &stream->vnode.vdev;
1417 struct rkispp_vdev_node *node = vdev_to_node(vdev);
1418 struct rkispp_device *dev = stream->isppdev;
1419
1420 /* Change not allowed if queue is streaming. */
1421 if (vb2_is_streaming(&node->buf_queue)) {
1422 v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
1423 return -EBUSY;
1424 }
1425
1426 return rkispp_set_fmt(stream, &f->fmt.pix_mp, false);
1427 }
1428
rkispp_g_fmt_vid_mplane(struct file * file,void * fh,struct v4l2_format * f)1429 static int rkispp_g_fmt_vid_mplane(struct file *file, void *fh,
1430 struct v4l2_format *f)
1431 {
1432 struct rkispp_stream *stream = video_drvdata(file);
1433
1434 f->fmt.pix_mp = stream->out_fmt;
1435
1436 return 0;
1437 }
1438
rkispp_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1439 static int rkispp_querycap(struct file *file, void *priv,
1440 struct v4l2_capability *cap)
1441 {
1442 struct rkispp_stream *stream = video_drvdata(file);
1443 struct device *dev = stream->isppdev->dev;
1444 struct video_device *vdev = video_devdata(file);
1445
1446 strlcpy(cap->card, vdev->name, sizeof(cap->card));
1447 snprintf(cap->driver, sizeof(cap->driver),
1448 "%s_v%d", dev->driver->name,
1449 stream->isppdev->ispp_ver >> 4);
1450 snprintf(cap->bus_info, sizeof(cap->bus_info),
1451 "platform:%s", dev_name(dev));
1452
1453 return 0;
1454 }
1455
1456 static const struct v4l2_ioctl_ops rkispp_v4l2_ioctl_ops = {
1457 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1458 .vidioc_querybuf = vb2_ioctl_querybuf,
1459 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1460 .vidioc_qbuf = vb2_ioctl_qbuf,
1461 .vidioc_expbuf = vb2_ioctl_expbuf,
1462 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1463 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1464 .vidioc_streamon = vb2_ioctl_streamon,
1465 .vidioc_streamoff = vb2_ioctl_streamoff,
1466 .vidioc_enum_input = rkispp_enum_input,
1467 .vidioc_try_fmt_vid_cap_mplane = rkispp_try_fmt_vid_mplane,
1468 .vidioc_enum_fmt_vid_cap = rkispp_enum_fmt_vid_mplane,
1469 .vidioc_s_fmt_vid_cap_mplane = rkispp_s_fmt_vid_mplane,
1470 .vidioc_g_fmt_vid_cap_mplane = rkispp_g_fmt_vid_mplane,
1471 .vidioc_try_fmt_vid_out_mplane = rkispp_try_fmt_vid_mplane,
1472 .vidioc_s_fmt_vid_out_mplane = rkispp_s_fmt_vid_mplane,
1473 .vidioc_g_fmt_vid_out_mplane = rkispp_g_fmt_vid_mplane,
1474 .vidioc_querycap = rkispp_querycap,
1475 };
1476
rkispp_unregister_stream_video(struct rkispp_stream * stream)1477 static void rkispp_unregister_stream_video(struct rkispp_stream *stream)
1478 {
1479 media_entity_cleanup(&stream->vnode.vdev.entity);
1480 video_unregister_device(&stream->vnode.vdev);
1481 }
1482
rkispp_register_stream_video(struct rkispp_stream * stream)1483 static int rkispp_register_stream_video(struct rkispp_stream *stream)
1484 {
1485 struct rkispp_device *dev = stream->isppdev;
1486 struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
1487 struct video_device *vdev = &stream->vnode.vdev;
1488 struct rkispp_vdev_node *node;
1489 enum v4l2_buf_type buf_type;
1490 int ret = 0;
1491
1492 node = vdev_to_node(vdev);
1493 vdev->release = video_device_release_empty;
1494 vdev->fops = &rkispp_fops;
1495 vdev->minor = -1;
1496 vdev->v4l2_dev = v4l2_dev;
1497 vdev->lock = &dev->apilock;
1498 video_set_drvdata(vdev, stream);
1499
1500 vdev->ioctl_ops = &rkispp_v4l2_ioctl_ops;
1501 if (stream->type == STREAM_INPUT) {
1502 vdev->device_caps = V4L2_CAP_STREAMING |
1503 V4L2_CAP_VIDEO_OUTPUT_MPLANE;
1504 vdev->vfl_dir = VFL_DIR_TX;
1505 node->pad.flags = MEDIA_PAD_FL_SOURCE;
1506 buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1507 } else {
1508 vdev->device_caps = V4L2_CAP_STREAMING |
1509 V4L2_CAP_VIDEO_CAPTURE_MPLANE;
1510 vdev->vfl_dir = VFL_DIR_RX;
1511 node->pad.flags = MEDIA_PAD_FL_SINK;
1512 buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1513 }
1514
1515 rkispp_init_vb2_queue(&node->buf_queue, stream, buf_type);
1516 vdev->queue = &node->buf_queue;
1517
1518 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1519 if (ret < 0) {
1520 v4l2_err(v4l2_dev,
1521 "video register failed with error %d\n", ret);
1522 return ret;
1523 }
1524
1525 ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
1526 if (ret < 0)
1527 goto unreg;
1528 return 0;
1529 unreg:
1530 video_unregister_device(vdev);
1531 return ret;
1532 }
1533
dump_file(struct rkispp_device * dev,u32 restart_module)1534 static void dump_file(struct rkispp_device *dev, u32 restart_module)
1535 {
1536 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1537 void __iomem *base = dev->hw_dev->base_addr;
1538 struct rkispp_isp_buf_pool *buf;
1539 struct rkispp_dummy_buffer *dummy;
1540 struct file *fp = NULL;
1541 char file[160], reg[48];
1542 int i;
1543
1544 snprintf(file, sizeof(file), "%s/%s%d.reg",
1545 rkispp_dump_path, DRIVER_NAME, dev->dev_id);
1546 fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1547 if (IS_ERR(fp)) {
1548 v4l2_err(&dev->v4l2_dev, "%s open %s fail\n", __func__, file);
1549 return;
1550 }
1551 for (i = 0; i < 0x1000; i += 16) {
1552 snprintf(reg, sizeof(reg), "ffb6%04x: %08x %08x %08x %08x\n",
1553 i, readl(base + i), readl(base + i + 4),
1554 readl(base + i + 8), readl(base + i + 12));
1555 kernel_write(fp, reg, strlen(reg), &fp->f_pos);
1556 }
1557 filp_close(fp, NULL);
1558
1559 if (restart_module & MONITOR_TNR) {
1560 if (vdev->tnr.cur_rd) {
1561 snprintf(file, sizeof(file), "%s/%s%d_tnr_cur.fbc",
1562 rkispp_dump_path, DRIVER_NAME, dev->dev_id);
1563 fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1564 if (IS_ERR(fp)) {
1565 v4l2_err(&dev->v4l2_dev,
1566 "%s open %s fail\n", __func__, file);
1567 return;
1568 }
1569 buf = get_pool_buf(dev, vdev->tnr.cur_rd);
1570 kernel_write(fp, buf->vaddr[0], vdev->tnr.cur_rd->dbuf[0]->size, &fp->f_pos);
1571 filp_close(fp, NULL);
1572 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1573 "dump tnr cur_rd dma:%pad vaddr:%p\n",
1574 &buf->dma[0], buf->vaddr[0]);
1575 }
1576
1577 if (vdev->tnr.nxt_rd && vdev->tnr.nxt_rd != vdev->tnr.cur_rd) {
1578 snprintf(file, sizeof(file), "%s/%s%d_tnr_nxt.fbc",
1579 rkispp_dump_path, DRIVER_NAME, dev->dev_id);
1580 fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1581 if (IS_ERR(fp)) {
1582 v4l2_err(&dev->v4l2_dev,
1583 "%s open %s fail\n", __func__, file);
1584 return;
1585 }
1586 buf = get_pool_buf(dev, vdev->tnr.nxt_rd);
1587 kernel_write(fp, buf->vaddr[0], vdev->tnr.nxt_rd->dbuf[0]->size, &fp->f_pos);
1588 filp_close(fp, NULL);
1589 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1590 "dump tnr nxt_rd dma:%pad vaddr:%p\n",
1591 &buf->dma[0], buf->vaddr[0]);
1592 }
1593 }
1594
1595 if (!(restart_module & MONITOR_FEC)) {
1596 for (i = 0; i < RKISPP_BUF_MAX; i++) {
1597 dummy = &vdev->tnr.buf.wr[i][0];
1598 if (!dummy->mem_priv)
1599 break;
1600 snprintf(file, sizeof(file), "%s/%s%d_iir%d.fbc",
1601 rkispp_dump_path, DRIVER_NAME, dev->dev_id, i);
1602 fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1603 if (IS_ERR(fp)) {
1604 v4l2_err(&dev->v4l2_dev,
1605 "%s open %s fail\n", __func__, file);
1606 return;
1607 }
1608 kernel_write(fp, dummy->vaddr, dummy->size, &fp->f_pos);
1609 filp_close(fp, NULL);
1610 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1611 "dump tnr wr dma:%pad vaddr:%p\n",
1612 &dummy->dma_addr, dummy->vaddr);
1613 }
1614 }
1615 }
1616
restart_module(struct rkispp_device * dev)1617 static void restart_module(struct rkispp_device *dev)
1618 {
1619 struct rkispp_monitor *monitor = &dev->stream_vdev.monitor;
1620 void __iomem *base = dev->hw_dev->base_addr;
1621 u32 val = 0;
1622
1623 monitor->retry++;
1624 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1625 "%s enter\n", __func__);
1626 if (dev->ispp_sdev.state == ISPP_STOP || monitor->retry > 3) {
1627 monitor->is_restart = false;
1628 monitor->is_en = false;
1629 goto end;
1630 }
1631 if (monitor->monitoring_module)
1632 wait_for_completion_timeout(&monitor->cmpl,
1633 msecs_to_jiffies(500));
1634 if (dev->ispp_sdev.state == ISPP_STOP) {
1635 monitor->is_restart = false;
1636 monitor->is_en = false;
1637 goto end;
1638 }
1639
1640 if (rkispp_dump_path[0] == '/')
1641 dump_file(dev, monitor->restart_module);
1642
1643 if (monitor->restart_module & MONITOR_TNR && monitor->tnr.is_err) {
1644 rkispp_set_bits(dev, RKISPP_TNR_CTRL, 0, SW_TNR_1ST_FRM);
1645 monitor->tnr.is_err = false;
1646 }
1647 rkispp_soft_reset(dev->hw_dev);
1648 rkispp_update_regs(dev, RKISPP_CTRL_QUICK, RKISPP_FEC_CROP);
1649 writel(ALL_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1650 if (monitor->restart_module & MONITOR_TNR) {
1651 val |= TNR_ST;
1652 rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE,
1653 rkispp_read(dev, RKISPP_TNR_WR_Y_BASE));
1654 rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE,
1655 rkispp_read(dev, RKISPP_TNR_WR_UV_BASE));
1656 monitor->monitoring_module |= MONITOR_TNR;
1657 if (!completion_done(&monitor->tnr.cmpl))
1658 complete(&monitor->tnr.cmpl);
1659 }
1660 if (monitor->restart_module & MONITOR_NR) {
1661 if (monitor->nr.is_err) {
1662 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1663 struct v4l2_subdev *sd = dev->ispp_sdev.remote_sd;
1664 struct rkispp_buffer *inbuf;
1665
1666 if (vdev->nr.cur_rd) {
1667 if (vdev->nr.cur_rd->is_isp) {
1668 v4l2_subdev_call(sd, video, s_rx_buffer,
1669 vdev->nr.cur_rd, NULL);
1670 } else if (!vdev->nr.cur_rd->priv) {
1671 list_add_tail(&vdev->nr.cur_rd->list,
1672 &vdev->tnr.list_wr);
1673 } else {
1674 inbuf = vdev->nr.cur_rd->priv;
1675 vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1676 }
1677 vdev->nr.cur_rd = NULL;
1678 }
1679 rkispp_set_bits(dev, RKISPP_TNR_CTRL, 0, SW_TNR_1ST_FRM);
1680 vdev->nr.is_end = true;
1681 monitor->nr.is_err = false;
1682 monitor->is_restart = false;
1683 monitor->restart_module = 0;
1684 rkispp_event_handle(dev, CMD_QUEUE_DMABUF, NULL);
1685 goto end;
1686 }
1687 val |= NR_SHP_ST;
1688 monitor->monitoring_module |= MONITOR_NR;
1689 if (!completion_done(&monitor->nr.cmpl))
1690 complete(&monitor->nr.cmpl);
1691 }
1692 if (monitor->restart_module & MONITOR_FEC) {
1693 val |= FEC_ST;
1694 monitor->monitoring_module |= MONITOR_FEC;
1695 if (!completion_done(&monitor->fec.cmpl))
1696 complete(&monitor->fec.cmpl);
1697 }
1698 if (!dev->hw_dev->is_shutdown)
1699 writel(val, base + RKISPP_CTRL_STRT);
1700 monitor->is_restart = false;
1701 monitor->restart_module = 0;
1702 end:
1703 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1704 "%s exit en:%d cnt:%d, monitoring:0x%x\n", __func__,
1705 monitor->is_en, monitor->retry, monitor->monitoring_module);
1706 }
1707
restart_monitor(struct work_struct * work)1708 static void restart_monitor(struct work_struct *work)
1709 {
1710 struct module_monitor *m_monitor =
1711 container_of(work, struct module_monitor, work);
1712 struct rkispp_device *dev = m_monitor->dev;
1713 struct rkispp_monitor *monitor = &dev->stream_vdev.monitor;
1714 unsigned long lock_flags = 0;
1715 long time;
1716 int ret;
1717
1718 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1719 "%s module:0x%x enter\n", __func__, m_monitor->module);
1720 while (monitor->is_en) {
1721 /* max timeout for module idle */
1722 time = MAX_SCHEDULE_TIMEOUT;
1723 if (monitor->monitoring_module & m_monitor->module)
1724 time = (m_monitor->time <= 0 ? 300 : m_monitor->time) + 150;
1725 ret = wait_for_completion_timeout(&m_monitor->cmpl,
1726 msecs_to_jiffies(time));
1727 if (dev->hw_dev->is_shutdown || dev->ispp_sdev.state == ISPP_STOP)
1728 break;
1729 if (!(monitor->monitoring_module & m_monitor->module) ||
1730 ret || !monitor->is_en)
1731 continue;
1732 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1733 "module:0x%x wait %ldms timeout ret:%d, monitoring:0x%x\n",
1734 m_monitor->module, time, ret, monitor->monitoring_module);
1735
1736 spin_lock_irqsave(&monitor->lock, lock_flags);
1737 monitor->monitoring_module &= ~m_monitor->module;
1738 monitor->restart_module |= m_monitor->module;
1739 if (monitor->is_restart)
1740 ret = true;
1741 else
1742 monitor->is_restart = true;
1743 if (m_monitor->module == MONITOR_TNR) {
1744 rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE,
1745 readl(dev->hw_dev->base_addr + RKISPP_TNR_IIR_Y_BASE_SHD));
1746 rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE,
1747 readl(dev->hw_dev->base_addr + RKISPP_TNR_IIR_UV_BASE_SHD));
1748 }
1749 spin_unlock_irqrestore(&monitor->lock, lock_flags);
1750 if (!ret && monitor->is_restart)
1751 restart_module(dev);
1752 /* waitting for other working module if need restart ispp */
1753 if (monitor->is_restart &&
1754 !monitor->monitoring_module &&
1755 !completion_done(&monitor->cmpl))
1756 complete(&monitor->cmpl);
1757 }
1758 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1759 "%s module:0x%x exit\n", __func__, m_monitor->module);
1760 }
1761
monitor_init(struct rkispp_device * dev)1762 static void monitor_init(struct rkispp_device *dev)
1763 {
1764 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1765 struct rkispp_monitor *monitor = &vdev->monitor;
1766
1767 monitor->tnr.dev = dev;
1768 monitor->nr.dev = dev;
1769 monitor->fec.dev = dev;
1770 monitor->tnr.module = MONITOR_TNR;
1771 monitor->nr.module = MONITOR_NR;
1772 monitor->fec.module = MONITOR_FEC;
1773 INIT_WORK(&monitor->tnr.work, restart_monitor);
1774 INIT_WORK(&monitor->nr.work, restart_monitor);
1775 INIT_WORK(&monitor->fec.work, restart_monitor);
1776 init_completion(&monitor->tnr.cmpl);
1777 init_completion(&monitor->nr.cmpl);
1778 init_completion(&monitor->fec.cmpl);
1779 init_completion(&monitor->cmpl);
1780 spin_lock_init(&monitor->lock);
1781 monitor->is_restart = false;
1782 }
1783
rkispp_fec_do_early(struct hrtimer * timer)1784 static enum hrtimer_restart rkispp_fec_do_early(struct hrtimer *timer)
1785 {
1786 struct rkispp_stream_vdev *vdev =
1787 container_of(timer, struct rkispp_stream_vdev, fec_qst);
1788 struct rkispp_stream *stream = &vdev->stream[0];
1789 struct rkispp_device *dev = stream->isppdev;
1790 void __iomem *base = dev->hw_dev->base_addr;
1791 enum hrtimer_restart ret = HRTIMER_NORESTART;
1792 u32 ycnt, tile = readl(base + RKISPP_CTRL_SYS_CTL_STA0);
1793 u32 working = readl(base + RKISPP_CTRL_SYS_STATUS);
1794 u64 ns = ktime_get_ns();
1795 u32 time;
1796
1797 working &= NR_WORKING;
1798 tile &= NR_TILE_LINE_CNT_MASK;
1799 ycnt = tile >> 8;
1800 time = (u32)(ns - vdev->nr.dbg.timestamp);
1801 if (dev->ispp_sdev.state == ISPP_STOP) {
1802 vdev->is_done_early = false;
1803 goto end;
1804 } else if (working && !ycnt) {
1805 hrtimer_forward(timer, timer->base->get_time(), ns_to_ktime(500000));
1806 ret = HRTIMER_RESTART;
1807 } else {
1808 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1809 "%s seq:%d ycnt:%d time:%dus\n",
1810 __func__, vdev->nr.dbg.id, ycnt * 128, time / 1000);
1811 vdev->stream_ops->fec_work_event(dev, NULL, false, true);
1812 }
1813 end:
1814 return ret;
1815 }
1816
rkispp_isr(u32 mis_val,struct rkispp_device * dev)1817 void rkispp_isr(u32 mis_val, struct rkispp_device *dev)
1818 {
1819 struct rkispp_stream_vdev *vdev;
1820 struct rkispp_stream *stream;
1821 u32 i, nr_err = NR_LOST_ERR | FBCH_EMPTY_NR |
1822 FBCD_DEC_ERR_NR | BUS_ERR_NR;
1823 u32 tnr_err = TNR_LOST_ERR | FBCH_EMPTY_TNR |
1824 FBCD_DEC_ERR_TNR | BUS_ERR_TNR;
1825 u64 ns = ktime_get_ns();
1826
1827 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1828 "isr:0x%x\n", mis_val);
1829
1830 vdev = &dev->stream_vdev;
1831 dev->isr_cnt++;
1832 if (mis_val & (tnr_err | nr_err)) {
1833 if (mis_val & tnr_err)
1834 vdev->monitor.tnr.is_err = true;
1835 if (mis_val & nr_err)
1836 vdev->monitor.nr.is_err = true;
1837 dev->isr_err_cnt++;
1838 v4l2_err(&dev->v4l2_dev,
1839 "ispp err:0x%x, seq:%d\n",
1840 mis_val, dev->ispp_sdev.frm_sync_seq);
1841 }
1842
1843 if (mis_val & TNR_INT) {
1844 if (vdev->monitor.is_en) {
1845 vdev->monitor.monitoring_module &= ~MONITOR_TNR;
1846 if (!completion_done(&vdev->monitor.tnr.cmpl))
1847 complete(&vdev->monitor.tnr.cmpl);
1848 }
1849 vdev->tnr.dbg.interval = ns - vdev->tnr.dbg.timestamp;
1850 }
1851 if (mis_val & NR_INT) {
1852 if (vdev->monitor.is_en) {
1853 vdev->monitor.monitoring_module &= ~MONITOR_NR;
1854 if (!completion_done(&vdev->monitor.nr.cmpl))
1855 complete(&vdev->monitor.nr.cmpl);
1856 }
1857 vdev->nr.dbg.interval = ns - vdev->nr.dbg.timestamp;
1858 }
1859 if (mis_val & FEC_INT) {
1860 if (vdev->monitor.is_en) {
1861 vdev->monitor.monitoring_module &= ~MONITOR_FEC;
1862 if (!completion_done(&vdev->monitor.fec.cmpl))
1863 complete(&vdev->monitor.fec.cmpl);
1864 }
1865 vdev->fec.dbg.interval = ns - vdev->fec.dbg.timestamp;
1866 }
1867
1868 if (mis_val & (CMD_TNR_ST_DONE | CMD_NR_SHP_ST_DONE) &&
1869 (dev->isp_mode & ISP_ISPP_QUICK))
1870 ++dev->ispp_sdev.frm_sync_seq;
1871
1872 if (mis_val & TNR_INT)
1873 if (rkispp_read(dev, RKISPP_TNR_CTRL) & SW_TNR_1ST_FRM)
1874 rkispp_clear_bits(dev, RKISPP_TNR_CTRL, SW_TNR_1ST_FRM);
1875
1876 rkispp_stats_isr(&dev->stats_vdev, mis_val);
1877
1878 for (i = 0; i <= STREAM_S2; i++) {
1879 stream = &vdev->stream[i];
1880
1881 if (!stream->streaming || !stream->is_cfg ||
1882 !(mis_val & INT_FRAME(stream)))
1883 continue;
1884 if (stream->stopping &&
1885 stream->ops->is_stopped &&
1886 (stream->ops->is_stopped(stream) ||
1887 dev->ispp_sdev.state == ISPP_STOP)) {
1888 stream->stopping = false;
1889 stream->streaming = false;
1890 stream->is_upd = false;
1891 wake_up(&stream->done);
1892 } else if (i != STREAM_II) {
1893 rkispp_frame_end(stream, FRAME_IRQ);
1894 }
1895 }
1896
1897 if ((mis_val & NR_INT || mis_val & FEC_INT) && dev->hw_dev->is_first) {
1898 dev->mis_val = mis_val;
1899 INIT_WORK(&dev->irq_work, irq_work);
1900 schedule_work(&dev->irq_work);
1901 } else {
1902 vdev->stream_ops->check_to_force_update(dev, mis_val);
1903 }
1904 }
1905
rkispp_register_stream_vdevs(struct rkispp_device * dev)1906 int rkispp_register_stream_vdevs(struct rkispp_device *dev)
1907 {
1908 struct rkispp_stream_vdev *stream_vdev;
1909 struct rkispp_stream *stream;
1910 struct video_device *vdev;
1911 char *vdev_name;
1912 int i, j, ret = 0;
1913
1914 stream_vdev = &dev->stream_vdev;
1915 memset(stream_vdev, 0, sizeof(*stream_vdev));
1916 atomic_set(&stream_vdev->refcnt, 0);
1917 INIT_LIST_HEAD(&stream_vdev->tnr.list_rd);
1918 INIT_LIST_HEAD(&stream_vdev->tnr.list_wr);
1919 INIT_LIST_HEAD(&stream_vdev->tnr.list_rpt);
1920 INIT_LIST_HEAD(&stream_vdev->nr.list_rd);
1921 INIT_LIST_HEAD(&stream_vdev->nr.list_wr);
1922 INIT_LIST_HEAD(&stream_vdev->fec.list_rd);
1923 spin_lock_init(&stream_vdev->tnr.buf_lock);
1924 spin_lock_init(&stream_vdev->nr.buf_lock);
1925 spin_lock_init(&stream_vdev->fec.buf_lock);
1926 stream_vdev->tnr.is_but_init = false;
1927
1928 if (dev->ispp_ver == ISPP_V10) {
1929 dev->stream_max = STREAM_MAX;
1930 rkispp_stream_init_ops_v10(stream_vdev);
1931 hrtimer_init(&stream_vdev->fec_qst, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1932 stream_vdev->fec_qst.function = rkispp_fec_do_early;
1933 hrtimer_init(&stream_vdev->frame_qst, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1934 stream_vdev->frame_qst.function = stream_vdev->stream_ops->rkispp_frame_done_early;
1935 dev->hw_dev->pool[0].group_buf_max = GROUP_BUF_MAX;
1936 } else if (dev->ispp_ver == ISPP_V20) {
1937 dev->stream_max = STREAM_VIR + 1;
1938 rkispp_stream_init_ops_v20(stream_vdev);
1939 dev->hw_dev->pool[0].group_buf_max = GROUP_BUF_GAIN;
1940 }
1941 for (i = 0; i < dev->stream_max; i++) {
1942 stream = &stream_vdev->stream[i];
1943 stream->id = i;
1944 stream->isppdev = dev;
1945 INIT_LIST_HEAD(&stream->buf_queue);
1946 init_waitqueue_head(&stream->done);
1947 spin_lock_init(&stream->vbq_lock);
1948 vdev = &stream->vnode.vdev;
1949 switch (i) {
1950 case STREAM_II:
1951 vdev_name = II_VDEV_NAME;
1952 stream->type = STREAM_INPUT;
1953 stream->ops = &input_stream_ops;
1954 stream->config = &input_config;
1955 break;
1956 case STREAM_MB:
1957 vdev_name = MB_VDEV_NAME;
1958 stream->type = STREAM_OUTPUT;
1959 stream->ops = &mb_stream_ops;
1960 stream->config = &mb_config;
1961 break;
1962 case STREAM_S0:
1963 vdev_name = S0_VDEV_NAME;
1964 stream->type = STREAM_OUTPUT;
1965 stream->ops = &scal_stream_ops;
1966 stream->config = &scl0_config;
1967 break;
1968 case STREAM_S1:
1969 vdev_name = S1_VDEV_NAME;
1970 stream->type = STREAM_OUTPUT;
1971 stream->ops = &scal_stream_ops;
1972 stream->config = &scl1_config;
1973 break;
1974 case STREAM_S2:
1975 vdev_name = S2_VDEV_NAME;
1976 stream->type = STREAM_OUTPUT;
1977 stream->ops = &scal_stream_ops;
1978 stream->config = &scl2_config;
1979 break;
1980 case STREAM_VIR:
1981 vdev_name = VIR_VDEV_NAME;
1982 stream->type = STREAM_OUTPUT;
1983 stream->config = &input_config;
1984 stream->ops = NULL;
1985 break;
1986 default:
1987 v4l2_err(&dev->v4l2_dev, "Invalid stream:%d\n", i);
1988 return -EINVAL;
1989 }
1990 strlcpy(vdev->name, vdev_name, sizeof(vdev->name));
1991 ret = rkispp_register_stream_video(stream);
1992 if (ret < 0)
1993 goto err;
1994 }
1995 monitor_init(dev);
1996 return 0;
1997 err:
1998 for (j = 0; j < i; j++) {
1999 stream = &stream_vdev->stream[j];
2000 rkispp_unregister_stream_video(stream);
2001 }
2002 return ret;
2003 }
2004
rkispp_unregister_stream_vdevs(struct rkispp_device * dev)2005 void rkispp_unregister_stream_vdevs(struct rkispp_device *dev)
2006 {
2007 struct rkispp_stream_vdev *stream_vdev;
2008 struct rkispp_stream *stream;
2009 int i;
2010
2011 stream_vdev = &dev->stream_vdev;
2012 for (i = 0; i < dev->stream_max; i++) {
2013 stream = &stream_vdev->stream[i];
2014 rkispp_unregister_stream_video(stream);
2015 }
2016 }
2017