• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Rockchip Electronics Co., Ltd. */
3 
4 #include <linux/delay.h>
5 #include <linux/pm_runtime.h>
6 #include <media/v4l2-common.h>
7 #include <media/v4l2-event.h>
8 #include <media/v4l2-fh.h>
9 #include <media/v4l2-ioctl.h>
10 #include <media/v4l2-subdev.h>
11 #include <media/videobuf2-dma-contig.h>
12 #include <media/videobuf2-dma-sg.h>
13 #include "dev.h"
14 #include "regs.h"
15 
16 #define CIF_ISP_REQ_BUFS_MIN			0
17 
18 static int mi_frame_end(struct rkisp_stream *stream);
19 static void rkisp_buf_queue(struct vb2_buffer *vb);
20 
21 static const struct capture_fmt dmatx_fmts[] = {
22 	/* raw */
23 	{
24 		.fourcc = V4L2_PIX_FMT_SRGGB8,
25 		.fmt_type = FMT_BAYER,
26 		.bpp = { 8 },
27 		.mplanes = 1,
28 	}, {
29 		.fourcc = V4L2_PIX_FMT_SGRBG8,
30 		.fmt_type = FMT_BAYER,
31 		.bpp = { 8 },
32 		.mplanes = 1,
33 	}, {
34 		.fourcc = V4L2_PIX_FMT_SGBRG8,
35 		.fmt_type = FMT_BAYER,
36 		.bpp = { 8 },
37 		.mplanes = 1,
38 	}, {
39 		.fourcc = V4L2_PIX_FMT_SBGGR8,
40 		.fmt_type = FMT_BAYER,
41 		.bpp = { 8 },
42 		.mplanes = 1,
43 	}, {
44 		.fourcc = V4L2_PIX_FMT_GREY,
45 		.fmt_type = FMT_BAYER,
46 		.bpp = { 8 },
47 		.mplanes = 1,
48 	}, {
49 		.fourcc = V4L2_PIX_FMT_SRGGB10,
50 		.fmt_type = FMT_BAYER,
51 		.bpp = { 10 },
52 		.mplanes = 1,
53 	}, {
54 		.fourcc = V4L2_PIX_FMT_SGRBG10,
55 		.fmt_type = FMT_BAYER,
56 		.bpp = { 10 },
57 		.mplanes = 1,
58 	}, {
59 		.fourcc = V4L2_PIX_FMT_SGBRG10,
60 		.fmt_type = FMT_BAYER,
61 		.bpp = { 10 },
62 		.mplanes = 1,
63 	}, {
64 		.fourcc = V4L2_PIX_FMT_SBGGR10,
65 		.fmt_type = FMT_BAYER,
66 		.bpp = { 10 },
67 		.mplanes = 1,
68 	}, {
69 		.fourcc = V4L2_PIX_FMT_Y10,
70 		.fmt_type = FMT_BAYER,
71 		.bpp = { 10 },
72 		.mplanes = 1,
73 	}, {
74 		.fourcc = V4L2_PIX_FMT_SRGGB12,
75 		.fmt_type = FMT_BAYER,
76 		.bpp = { 12 },
77 		.mplanes = 1,
78 	}, {
79 		.fourcc = V4L2_PIX_FMT_SGRBG12,
80 		.fmt_type = FMT_BAYER,
81 		.bpp = { 12 },
82 		.mplanes = 1,
83 	}, {
84 		.fourcc = V4L2_PIX_FMT_SGBRG12,
85 		.fmt_type = FMT_BAYER,
86 		.bpp = { 12 },
87 		.mplanes = 1,
88 	}, {
89 		.fourcc = V4L2_PIX_FMT_SBGGR12,
90 		.fmt_type = FMT_BAYER,
91 		.bpp = { 12 },
92 		.mplanes = 1,
93 	}, {
94 		.fourcc = V4L2_PIX_FMT_Y12,
95 		.fmt_type = FMT_BAYER,
96 		.bpp = { 12 },
97 		.mplanes = 1,
98 	}, {
99 		.fourcc = V4L2_PIX_FMT_YUYV,
100 		.fmt_type = FMT_YUV,
101 		.bpp = { 16 },
102 		.mplanes = 1,
103 	}, {
104 		.fourcc = V4L2_PIX_FMT_YVYU,
105 		.fmt_type = FMT_YUV,
106 		.bpp = { 16 },
107 		.mplanes = 1,
108 	}, {
109 		.fourcc = V4L2_PIX_FMT_UYVY,
110 		.fmt_type = FMT_YUV,
111 		.bpp = { 16 },
112 		.mplanes = 1,
113 	}, {
114 		.fourcc = V4L2_PIX_FMT_VYUY,
115 		.fmt_type = FMT_YUV,
116 		.bpp = { 16 },
117 		.mplanes = 1,
118 	}, {
119 		.fourcc = V4l2_PIX_FMT_EBD8,
120 		.fmt_type = FMT_EBD,
121 		.bpp = { 8 },
122 		.mplanes = 1,
123 	}, {
124 		.fourcc = V4l2_PIX_FMT_SPD16,
125 		.fmt_type = FMT_SPD,
126 		.bpp = { 16 },
127 		.mplanes = 1,
128 	}
129 };
130 
131 static struct stream_config rkisp2_dmatx0_stream_config = {
132 	.fmts = dmatx_fmts,
133 	.fmt_size = ARRAY_SIZE(dmatx_fmts),
134 	.frame_end_id = MI_RAW0_WR_FRAME,
135 	.mi = {
136 		.y_size_init = MI_RAW0_WR_SIZE,
137 		.y_base_ad_init = MI_RAW0_WR_BASE,
138 		.y_base_ad_shd = MI_RAW0_WR_BASE_SHD,
139 		.length = MI_RAW0_WR_LENGTH,
140 	},
141 	.dma = {
142 		.ctrl = CSI2RX_RAW0_WR_CTRL,
143 		.pic_size = CSI2RX_RAW0_WR_PIC_SIZE,
144 		.pic_offs = CSI2RX_RAW0_WR_PIC_OFF,
145 	},
146 };
147 
148 static struct stream_config rkisp2_dmatx1_stream_config = {
149 	.fmts = dmatx_fmts,
150 	.fmt_size = ARRAY_SIZE(dmatx_fmts),
151 	.frame_end_id = MI_RAW1_WR_FRAME,
152 	.mi = {
153 		.y_size_init = MI_RAW1_WR_SIZE,
154 		.y_base_ad_init = MI_RAW1_WR_BASE,
155 		.y_base_ad_shd = MI_RAW1_WR_BASE_SHD,
156 		.length = MI_RAW1_WR_LENGTH,
157 	},
158 	.dma = {
159 		.ctrl = CSI2RX_RAW1_WR_CTRL,
160 		.pic_size = CSI2RX_RAW1_WR_PIC_SIZE,
161 		.pic_offs = CSI2RX_RAW1_WR_PIC_OFF,
162 	},
163 };
164 
165 static struct stream_config rkisp2_dmatx3_stream_config = {
166 	.fmts = dmatx_fmts,
167 	.fmt_size = ARRAY_SIZE(dmatx_fmts),
168 	.frame_end_id = MI_RAW3_WR_FRAME,
169 	.mi = {
170 		.y_size_init = MI_RAW3_WR_SIZE,
171 		.y_base_ad_init = MI_RAW3_WR_BASE,
172 		.y_base_ad_shd = MI_RAW3_WR_BASE_SHD,
173 		.length = MI_RAW3_WR_LENGTH,
174 	},
175 	.dma = {
176 		.ctrl = CSI2RX_RAW3_WR_CTRL,
177 		.pic_size = CSI2RX_RAW3_WR_PIC_SIZE,
178 		.pic_offs = CSI2RX_RAW3_WR_PIC_OFF,
179 	},
180 };
181 
is_rdbk_stream(struct rkisp_stream * stream)182 static bool is_rdbk_stream(struct rkisp_stream *stream)
183 {
184 	struct rkisp_device *dev = stream->ispdev;
185 	bool en = false;
186 
187 	if ((dev->hdr.op_mode == HDR_RDBK_FRAME1 &&
188 	     stream->id == RKISP_STREAM_DMATX2) ||
189 	    (dev->hdr.op_mode == HDR_RDBK_FRAME2 &&
190 	     (stream->id == RKISP_STREAM_DMATX2 ||
191 	      stream->id == RKISP_STREAM_DMATX0)))
192 		en = true;
193 	return en;
194 }
195 
is_hdr_stream(struct rkisp_stream * stream)196 static bool is_hdr_stream(struct rkisp_stream *stream)
197 {
198 	struct rkisp_device *dev = stream->ispdev;
199 	bool en = false;
200 
201 	if (stream->id == RKISP_STREAM_DMATX0 &&
202 	    (dev->hdr.op_mode == HDR_FRAMEX2_DDR ||
203 	     dev->hdr.op_mode == HDR_LINEX2_DDR))
204 		en = true;
205 	return en;
206 }
207 
208 /* configure dual-crop unit */
rkisp_stream_config_dcrop(struct rkisp_stream * stream,bool async)209 static int rkisp_stream_config_dcrop(struct rkisp_stream *stream, bool async)
210 {
211 	struct rkisp_device *dev = stream->ispdev;
212 	struct v4l2_rect *dcrop = &stream->dcrop;
213 	struct v4l2_rect *input_win;
214 
215 	/* dual-crop unit get data from isp */
216 	input_win = rkisp_get_isp_sd_win(&dev->isp_sdev);
217 
218 	if (dcrop->width == input_win->width &&
219 	    dcrop->height == input_win->height &&
220 	    dcrop->left == 0 && dcrop->top == 0) {
221 		rkisp_disable_dcrop(stream, async);
222 		v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
223 			 "stream %d crop disabled\n", stream->id);
224 		return 0;
225 	}
226 
227 	rkisp_config_dcrop(stream, dcrop, async);
228 
229 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
230 		 "stream %d crop: %dx%d -> %dx%d\n", stream->id,
231 		 input_win->width, input_win->height,
232 		 dcrop->width, dcrop->height);
233 
234 	return 0;
235 }
236 
237 /* configure scale unit */
rkisp_stream_config_rsz(struct rkisp_stream * stream,bool async)238 static int rkisp_stream_config_rsz(struct rkisp_stream *stream, bool async)
239 {
240 	struct rkisp_device *dev = stream->ispdev;
241 	struct v4l2_pix_format_mplane output_fmt = stream->out_fmt;
242 	struct capture_fmt *output_isp_fmt = &stream->out_isp_fmt;
243 	struct ispsd_out_fmt *input_isp_fmt =
244 			rkisp_get_ispsd_out_fmt(&dev->isp_sdev);
245 	struct v4l2_rect in_y, in_c, out_y, out_c;
246 	u32 xsubs_in = 1, ysubs_in = 1;
247 	u32 xsubs_out = 1, ysubs_out = 1;
248 
249 	if (input_isp_fmt->fmt_type == FMT_BAYER)
250 		goto disable;
251 
252 	/* set input and output sizes for scale calculation */
253 	in_y.width = stream->dcrop.width;
254 	in_y.height = stream->dcrop.height;
255 	out_y.width = output_fmt.width;
256 	out_y.height = output_fmt.height;
257 
258 	/* The size of Cb,Cr are related to the format */
259 	if (rkisp_mbus_code_xysubs(input_isp_fmt->mbus_code, &xsubs_in, &ysubs_in)) {
260 		v4l2_err(&dev->v4l2_dev, "Not xsubs/ysubs found\n");
261 		return -EINVAL;
262 	}
263 	in_c.width = in_y.width / xsubs_in;
264 	in_c.height = in_y.height / ysubs_in;
265 
266 	if (output_isp_fmt->fmt_type == FMT_YUV) {
267 		rkisp_fcc_xysubs(output_isp_fmt->fourcc, &xsubs_out, &ysubs_out);
268 		out_c.width = out_y.width / xsubs_out;
269 		out_c.height = out_y.height / ysubs_out;
270 	} else {
271 		out_c.width = out_y.width / xsubs_in;
272 		out_c.height = out_y.height / ysubs_in;
273 	}
274 
275 	if (in_c.width == out_c.width && in_c.height == out_c.height)
276 		goto disable;
277 
278 	/* set RSZ input and output */
279 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
280 		 "stream %d rsz/scale: %dx%d -> %dx%d\n",
281 		 stream->id, stream->dcrop.width, stream->dcrop.height,
282 		 output_fmt.width, output_fmt.height);
283 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
284 		 "chroma scaling %dx%d -> %dx%d\n",
285 		 in_c.width, in_c.height, out_c.width, out_c.height);
286 
287 	/* calculate and set scale */
288 	rkisp_config_rsz(stream, &in_y, &in_c, &out_y, &out_c, async);
289 
290 	if (rkisp_debug)
291 		rkisp_dump_rsz_regs(stream);
292 
293 	return 0;
294 
295 disable:
296 	rkisp_disable_rsz(stream, async);
297 
298 	return 0;
299 }
300 
301 /***************************** stream operations*******************************/
302 
303 /*
304  * memory base addresses should be with respect
305  * to the burst alignment restriction for AXI.
306  */
calc_burst_len(struct rkisp_stream * stream)307 static u32 calc_burst_len(struct rkisp_stream *stream)
308 {
309 	struct rkisp_device *dev = stream->ispdev;
310 	u32 y_size = stream->out_fmt.plane_fmt[0].bytesperline *
311 		stream->out_fmt.height;
312 	u32 cb_size = stream->out_fmt.plane_fmt[1].sizeimage;
313 	u32 cr_size = stream->out_fmt.plane_fmt[2].sizeimage;
314 	u32 cb_offs, cr_offs;
315 	u32 bus = 16, burst;
316 	int i;
317 
318 	/* y/c base addr: burstN * bus alignment */
319 	cb_offs = y_size;
320 	cr_offs = cr_size ? (cb_size + cb_offs) : 0;
321 
322 	if (!(cb_offs % (bus * 16)) && !(cr_offs % (bus * 16)))
323 		burst = CIF_MI_CTRL_BURST_LEN_LUM_16 |
324 			CIF_MI_CTRL_BURST_LEN_CHROM_16;
325 	else if (!(cb_offs % (bus * 8)) && !(cr_offs % (bus * 8)))
326 		burst = CIF_MI_CTRL_BURST_LEN_LUM_8 |
327 			CIF_MI_CTRL_BURST_LEN_CHROM_8;
328 	else
329 		burst = CIF_MI_CTRL_BURST_LEN_LUM_4 |
330 			CIF_MI_CTRL_BURST_LEN_CHROM_4;
331 
332 	if (cb_offs % (bus * 4) || cr_offs % (bus * 4))
333 		v4l2_warn(&dev->v4l2_dev,
334 			"%dx%d fmt:0x%x not support, should be %d aligned\n",
335 			stream->out_fmt.width,
336 			stream->out_fmt.height,
337 			stream->out_fmt.pixelformat,
338 			(cr_offs == 0) ? bus * 4 : bus * 16);
339 
340 	stream->burst = burst;
341 	for (i = 0; i <= RKISP_STREAM_SP; i++)
342 		if (burst > dev->cap_dev.stream[i].burst)
343 			burst = dev->cap_dev.stream[i].burst;
344 
345 	if (stream->interlaced) {
346 		if (!stream->out_fmt.width % (bus * 16))
347 			stream->burst = CIF_MI_CTRL_BURST_LEN_LUM_16 |
348 				CIF_MI_CTRL_BURST_LEN_CHROM_16;
349 		else if (!stream->out_fmt.width % (bus * 8))
350 			stream->burst = CIF_MI_CTRL_BURST_LEN_LUM_8 |
351 				CIF_MI_CTRL_BURST_LEN_CHROM_8;
352 		else
353 			stream->burst = CIF_MI_CTRL_BURST_LEN_LUM_4 |
354 				CIF_MI_CTRL_BURST_LEN_CHROM_4;
355 		if (stream->out_fmt.width % (bus * 4))
356 			v4l2_warn(&dev->v4l2_dev,
357 				"interlaced: width should be %d aligned\n",
358 				bus * 4);
359 		burst = min(stream->burst, burst);
360 		stream->burst = burst;
361 	}
362 
363 	return burst;
364 }
365 
366 /*
367  * configure memory interface for mainpath
368  * This should only be called when stream-on
369  */
mp_config_mi(struct rkisp_stream * stream)370 static int mp_config_mi(struct rkisp_stream *stream)
371 {
372 	struct rkisp_device *dev = stream->ispdev;
373 
374        /*
375 	* NOTE: plane_fmt[0].sizeimage is total size of all planes for single
376 	* memory plane formats, so calculate the size explicitly.
377 	*/
378 	rkisp_write(dev, stream->config->mi.y_size_init,
379 		    stream->out_fmt.plane_fmt[0].bytesperline *
380 		    stream->out_fmt.height, false);
381 	rkisp_write(dev, stream->config->mi.cb_size_init,
382 		    stream->out_fmt.plane_fmt[1].sizeimage, false);
383 	rkisp_write(dev, stream->config->mi.cr_size_init,
384 		    stream->out_fmt.plane_fmt[2].sizeimage, false);
385 
386 	rkisp_set_bits(dev, CIF_MI_XTD_FORMAT_CTRL, CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP,
387 		 stream->out_isp_fmt.uv_swap ? CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP : 0, false);
388 
389 	rkisp_set_bits(dev, CIF_MI_CTRL, GENMASK(19, 16) | MI_CTRL_MP_FMT_MASK,
390 			calc_burst_len(stream) | CIF_MI_CTRL_INIT_BASE_EN |
391 			CIF_MI_CTRL_INIT_OFFSET_EN | CIF_MI_MP_AUTOUPDATE_ENABLE |
392 			stream->out_isp_fmt.write_format, false);
393 	mi_frame_end_int_enable(stream);
394 	/* set up first buffer */
395 	mi_frame_end(stream);
396 	return 0;
397 }
398 
mbus_code_sp_in_fmt(u32 in_mbus_code,u32 out_fourcc,u32 * format)399 static int mbus_code_sp_in_fmt(u32 in_mbus_code, u32 out_fourcc, u32 *format)
400 {
401 	switch (in_mbus_code) {
402 	case MEDIA_BUS_FMT_YUYV8_2X8:
403 		*format = MI_CTRL_SP_INPUT_YUV422;
404 		break;
405 	default:
406 		return -EINVAL;
407 	}
408 
409 	/*
410 	 * Only SP can support output format of YCbCr4:0:0,
411 	 * and the input format of SP must be YCbCr4:0:0
412 	 * when outputting YCbCr4:0:0.
413 	 * The output format of isp is YCbCr4:2:2,
414 	 * so the CbCr data is discarded here.
415 	 */
416 	if (out_fourcc == V4L2_PIX_FMT_GREY)
417 		*format = MI_CTRL_SP_INPUT_YUV400;
418 
419 	return 0;
420 }
421 
422 /*
423  * configure memory interface for selfpath
424  * This should only be called when stream-on
425  */
sp_config_mi(struct rkisp_stream * stream)426 static int sp_config_mi(struct rkisp_stream *stream)
427 {
428 	struct rkisp_device *dev = stream->ispdev;
429 	struct capture_fmt *output_isp_fmt = &stream->out_isp_fmt;
430 	struct ispsd_out_fmt *input_isp_fmt =
431 			rkisp_get_ispsd_out_fmt(&dev->isp_sdev);
432 	u32 sp_in_fmt, mul = 1;
433 
434 	if (mbus_code_sp_in_fmt(input_isp_fmt->mbus_code,
435 				output_isp_fmt->fourcc, &sp_in_fmt)) {
436 		v4l2_err(&dev->v4l2_dev, "Can't find the input format\n");
437 		return -EINVAL;
438 	}
439 
440        /*
441 	* NOTE: plane_fmt[0].sizeimage is total size of all planes for single
442 	* memory plane formats, so calculate the size explicitly.
443 	*/
444 	rkisp_write(dev, stream->config->mi.y_size_init,
445 		    stream->out_fmt.plane_fmt[0].bytesperline *
446 		    stream->out_fmt.height, false);
447 	rkisp_write(dev, stream->config->mi.cb_size_init,
448 		    stream->out_fmt.plane_fmt[1].sizeimage, false);
449 	rkisp_write(dev, stream->config->mi.cr_size_init,
450 		    stream->out_fmt.plane_fmt[2].sizeimage, false);
451 	rkisp_write(dev, CIF_MI_SP_Y_PIC_WIDTH, stream->out_fmt.width, false);
452 	if (stream->interlaced) {
453 		mul = 2;
454 		stream->u.sp.vir_offs = stream->out_fmt.plane_fmt[0].bytesperline;
455 	}
456 	rkisp_write(dev, CIF_MI_SP_Y_PIC_HEIGHT, stream->out_fmt.height / mul, false);
457 	rkisp_write(dev, CIF_MI_SP_Y_LLENGTH, stream->u.sp.y_stride * mul, false);
458 
459 	rkisp_set_bits(dev, CIF_MI_XTD_FORMAT_CTRL, CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP,
460 		output_isp_fmt->uv_swap ? CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP : 0, false);
461 
462 	rkisp_set_bits(dev, CIF_MI_CTRL, GENMASK(19, 16) | MI_CTRL_SP_FMT_MASK,
463 			calc_burst_len(stream) | CIF_MI_CTRL_INIT_BASE_EN |
464 			CIF_MI_CTRL_INIT_OFFSET_EN | stream->out_isp_fmt.write_format |
465 			sp_in_fmt | output_isp_fmt->output_format |
466 			CIF_MI_SP_AUTOUPDATE_ENABLE, false);
467 	mi_frame_end_int_enable(stream);
468 	/* set up first buffer */
469 	mi_frame_end(stream);
470 	return 0;
471 }
472 
dmatx3_config_mi(struct rkisp_stream * stream)473 static int dmatx3_config_mi(struct rkisp_stream *stream)
474 {
475 	void __iomem *base = stream->ispdev->base_addr;
476 	struct rkisp_device *dev = stream->ispdev;
477 	struct rkisp_csi_device *csi = &dev->csi_dev;
478 	u32 in_size;
479 	u8 vc;
480 
481 	if (!csi->sink[CSI_SRC_CH4 - 1].linked || stream->streaming)
482 		return -EBUSY;
483 
484 	if (!dev->active_sensor ||
485 	    (dev->active_sensor &&
486 	     dev->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)) {
487 		v4l2_err(&dev->v4l2_dev,
488 			 "only mipi sensor support rawwr3\n");
489 		return -EINVAL;
490 	}
491 	atomic_set(&stream->sequence, 0);
492 	in_size = stream->out_fmt.plane_fmt[0].sizeimage;
493 	raw_wr_set_pic_size(stream,
494 			    stream->out_fmt.width,
495 			    stream->out_fmt.height);
496 	raw_wr_set_pic_offs(stream, 0);
497 	mi_set_y_size(stream, in_size);
498 	mi_frame_end(stream);
499 	mi_frame_end_int_enable(stream);
500 	mi_wr_ctrl2(base, SW_RAW3_WR_AUTOUPD);
501 	mi_raw_length(stream);
502 	vc = csi->sink[CSI_SRC_CH4 - 1].index;
503 	raw_wr_ctrl(stream,
504 		SW_CSI_RAW_WR_CH_EN(vc) |
505 		stream->memory |
506 		SW_CSI_RAW_WR_EN_ORG);
507 	stream->u.dmatx.is_config = true;
508 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
509 		 "rawwr3 %dx%d ctrl:0x%x\n",
510 		 stream->out_fmt.width,
511 		 stream->out_fmt.height,
512 		 readl(base + CSI2RX_RAW3_WR_CTRL));
513 	return 0;
514 }
515 
dmatx2_config_mi(struct rkisp_stream * stream)516 static int dmatx2_config_mi(struct rkisp_stream *stream)
517 {
518 	void __iomem *base = stream->ispdev->base_addr;
519 	struct rkisp_device *dev = stream->ispdev;
520 	struct rkisp_csi_device *csi = &dev->csi_dev;
521 	u32 val, in_size;
522 	u8 vc;
523 
524 	if (!csi->sink[CSI_SRC_CH3 - 1].linked || stream->streaming)
525 		return -EBUSY;
526 
527 	if (!dev->active_sensor ||
528 	    (dev->active_sensor &&
529 	     dev->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)) {
530 		v4l2_err(&dev->v4l2_dev,
531 			 "only mipi sensor support rawwr2 path\n");
532 		return -EINVAL;
533 	}
534 
535 	if (!stream->u.dmatx.is_config) {
536 		stream->u.dmatx.is_config = true;
537 		atomic_set(&stream->sequence, 0);
538 		in_size = stream->out_fmt.plane_fmt[0].sizeimage;
539 		raw_wr_set_pic_size(stream,
540 				    stream->out_fmt.width,
541 				    stream->out_fmt.height);
542 		raw_wr_set_pic_offs(stream, 0);
543 		mi_set_y_size(stream, in_size);
544 		mi_frame_end(stream);
545 		mi_frame_end_int_enable(stream);
546 		mi_wr_ctrl2(base, SW_RAW1_WR_AUTOUPD);
547 		mi_raw_length(stream);
548 		vc = csi->sink[CSI_SRC_CH3 - 1].index;
549 		val = SW_CSI_RAW_WR_CH_EN(vc);
550 		val |= stream->memory;
551 		if (dev->hdr.op_mode != HDR_NORMAL)
552 			val |= SW_CSI_RAW_WR_EN_ORG;
553 		raw_wr_ctrl(stream, val);
554 	}
555 	return 0;
556 }
557 
dmatx0_config_mi(struct rkisp_stream * stream)558 static int dmatx0_config_mi(struct rkisp_stream *stream)
559 {
560 	void __iomem *base = stream->ispdev->base_addr;
561 	struct rkisp_device *dev = stream->ispdev;
562 	struct rkisp_csi_device *csi = &dev->csi_dev;
563 	u32 val, in_size;
564 	u8 vc;
565 
566 	if (!csi->sink[CSI_SRC_CH1 - 1].linked || stream->streaming)
567 		return -EBUSY;
568 
569 	if (!dev->active_sensor ||
570 	    (dev->active_sensor &&
571 	     dev->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)) {
572 		if (stream->id == RKISP_STREAM_DMATX0)
573 			v4l2_err(&dev->v4l2_dev,
574 				 "only mipi sensor support rawwr0 path\n");
575 		return -EINVAL;
576 	}
577 
578 	if (!stream->u.dmatx.is_config) {
579 		stream->u.dmatx.is_config = true;
580 		atomic_set(&stream->sequence, 0);
581 		in_size = stream->out_fmt.plane_fmt[0].sizeimage;
582 		raw_wr_set_pic_size(stream,
583 				    stream->out_fmt.width,
584 				    stream->out_fmt.height);
585 		raw_wr_set_pic_offs(stream, 0);
586 		mi_set_y_size(stream, in_size);
587 		mi_frame_end(stream);
588 		mi_frame_end_int_enable(stream);
589 		mi_wr_ctrl2(base, SW_RAW0_WR_AUTOUPD);
590 		mi_raw_length(stream);
591 		vc = csi->sink[CSI_SRC_CH1 - 1].index;
592 		val = SW_CSI_RAW_WR_CH_EN(vc);
593 		val |= stream->memory;
594 		if (dev->hdr.op_mode != HDR_NORMAL)
595 			val |= SW_CSI_RAW_WR_EN_ORG;
596 		raw_wr_ctrl(stream, val);
597 	}
598 
599 	return 0;
600 }
601 
mp_enable_mi(struct rkisp_stream * stream)602 static void mp_enable_mi(struct rkisp_stream *stream)
603 {
604 	struct capture_fmt *isp_fmt = &stream->out_isp_fmt;
605 	u32 val = CIF_MI_CTRL_MP_ENABLE;
606 
607 	if (isp_fmt->fmt_type == FMT_BAYER)
608 		val = CIF_MI_CTRL_RAW_ENABLE;
609 	rkisp_set_bits(stream->ispdev, CIF_MI_CTRL,
610 			CIF_MI_CTRL_MP_ENABLE | CIF_MI_CTRL_RAW_ENABLE, val, false);
611 }
612 
sp_enable_mi(struct rkisp_stream * stream)613 static void sp_enable_mi(struct rkisp_stream *stream)
614 {
615 	rkisp_set_bits(stream->ispdev, CIF_MI_CTRL, 0,
616 			CIF_MI_CTRL_SP_ENABLE, false);
617 }
618 
dmatx_enable_mi(struct rkisp_stream * stream)619 static void dmatx_enable_mi(struct rkisp_stream *stream)
620 {
621 	raw_wr_enable(stream);
622 }
623 
mp_disable_mi(struct rkisp_stream * stream)624 static void mp_disable_mi(struct rkisp_stream *stream)
625 {
626 	rkisp_clear_bits(stream->ispdev, CIF_MI_CTRL,
627 			 CIF_MI_CTRL_MP_ENABLE | CIF_MI_CTRL_RAW_ENABLE, false);
628 }
629 
sp_disable_mi(struct rkisp_stream * stream)630 static void sp_disable_mi(struct rkisp_stream *stream)
631 {
632 	rkisp_clear_bits(stream->ispdev, CIF_MI_CTRL, CIF_MI_CTRL_SP_ENABLE, false);
633 }
634 
update_dmatx_v2(struct rkisp_stream * stream)635 static void update_dmatx_v2(struct rkisp_stream *stream)
636 {
637 	struct rkisp_device *dev = stream->ispdev;
638 	struct rkisp_dummy_buffer *buf = NULL;
639 	u8 index;
640 
641 	if (stream->next_buf) {
642 		mi_set_y_addr(stream, stream->next_buf->buff_addr[RKISP_PLANE_Y]);
643 	} else {
644 		if (stream->id == RKISP_STREAM_DMATX0)
645 			index = dev->hdr.index[HDR_DMA0];
646 		else if (stream->id == RKISP_STREAM_DMATX2)
647 			index = dev->hdr.index[HDR_DMA2];
648 
649 		if ((stream->id == RKISP_STREAM_DMATX0 ||
650 		     stream->id == RKISP_STREAM_DMATX2)) {
651 			buf = hdr_dqbuf(&dev->hdr.q_tx[index]);
652 			if (IS_HDR_RDBK(dev->hdr.op_mode) &&
653 			    !dev->dmarx_dev.trigger)
654 				hdr_qbuf(&dev->hdr.q_rx[index], buf);
655 			else
656 				hdr_qbuf(&dev->hdr.q_tx[index], buf);
657 		}
658 		if (!buf && dev->hw_dev->dummy_buf.mem_priv) {
659 			buf = &dev->hw_dev->dummy_buf;
660 			stream->dbg.frameloss++;
661 		}
662 		if (buf)
663 			mi_set_y_addr(stream, buf->dma_addr);
664 	}
665 	v4l2_dbg(2, rkisp_debug, &dev->v4l2_dev,
666 		 "%s stream:%d Y:0x%x SHD:0x%x\n",
667 		 __func__, stream->id,
668 		 rkisp_read(dev, stream->config->mi.y_base_ad_init, true),
669 		 rkisp_read(dev, stream->config->mi.y_base_ad_shd, true));
670 }
671 
672 /* Update buffer info to memory interface, it's called in interrupt */
update_mi(struct rkisp_stream * stream)673 static void update_mi(struct rkisp_stream *stream)
674 {
675 	struct rkisp_dummy_buffer *dummy_buf = &stream->ispdev->hw_dev->dummy_buf;
676 	struct rkisp_device *dev = stream->ispdev;
677 
678 	/* The dummy space allocated by dma_alloc_coherent is used, we can
679 	 * throw data to it if there is no available buffer.
680 	 */
681 	if (stream->next_buf) {
682 		rkisp_write(dev, stream->config->mi.y_base_ad_init,
683 			    stream->next_buf->buff_addr[RKISP_PLANE_Y], false);
684 		rkisp_write(dev, stream->config->mi.cb_base_ad_init,
685 			    stream->next_buf->buff_addr[RKISP_PLANE_CB], false);
686 		rkisp_write(dev, stream->config->mi.cr_base_ad_init,
687 			    stream->next_buf->buff_addr[RKISP_PLANE_CR], false);
688 		/* mp/sp single buf updated at readback for multidevice */
689 		if (!dev->hw_dev->is_single) {
690 			stream->curr_buf = stream->next_buf;
691 			stream->next_buf = NULL;
692 		}
693 	} else if (dummy_buf->mem_priv) {
694 		rkisp_write(dev, stream->config->mi.y_base_ad_init,
695 			    dummy_buf->dma_addr, false);
696 		rkisp_write(dev, stream->config->mi.cb_base_ad_init,
697 			    dummy_buf->dma_addr, false);
698 		rkisp_write(dev, stream->config->mi.cr_base_ad_init,
699 			    dummy_buf->dma_addr, false);
700 		stream->dbg.frameloss++;
701 	}
702 
703 	mi_set_y_offset(stream, 0);
704 	mi_set_cb_offset(stream, 0);
705 	mi_set_cr_offset(stream, 0);
706 	v4l2_dbg(2, rkisp_debug, &stream->ispdev->v4l2_dev,
707 		 "%s stream:%d Y:0x%x CB:0x%x CR:0x%x | Y_SHD:0x%x\n",
708 		 __func__, stream->id,
709 		 rkisp_read(dev, stream->config->mi.y_base_ad_init, false),
710 		 rkisp_read(dev, stream->config->mi.cb_base_ad_init, false),
711 		 rkisp_read(dev, stream->config->mi.cr_base_ad_init, false),
712 		 rkisp_read(dev, stream->config->mi.y_base_ad_shd, true));
713 }
714 
mp_stop_mi(struct rkisp_stream * stream)715 static void mp_stop_mi(struct rkisp_stream *stream)
716 {
717 	stream->ops->disable_mi(stream);
718 }
719 
sp_stop_mi(struct rkisp_stream * stream)720 static void sp_stop_mi(struct rkisp_stream *stream)
721 {
722 	stream->ops->disable_mi(stream);
723 }
724 
dmatx_stop_mi(struct rkisp_stream * stream)725 static void dmatx_stop_mi(struct rkisp_stream *stream)
726 {
727 	raw_wr_disable(stream);
728 	stream->u.dmatx.is_config = false;
729 }
730 
731 static struct streams_ops rkisp_mp_streams_ops = {
732 	.config_mi = mp_config_mi,
733 	.enable_mi = mp_enable_mi,
734 	.disable_mi = mp_disable_mi,
735 	.stop_mi = mp_stop_mi,
736 	.set_data_path = stream_data_path,
737 	.is_stream_stopped = mp_is_stream_stopped,
738 	.update_mi = update_mi,
739 	.frame_end = mi_frame_end,
740 };
741 
742 static struct streams_ops rkisp_sp_streams_ops = {
743 	.config_mi = sp_config_mi,
744 	.enable_mi = sp_enable_mi,
745 	.disable_mi = sp_disable_mi,
746 	.stop_mi = sp_stop_mi,
747 	.set_data_path = stream_data_path,
748 	.is_stream_stopped = sp_is_stream_stopped,
749 	.update_mi = update_mi,
750 	.frame_end = mi_frame_end,
751 };
752 
753 static struct streams_ops rkisp2_dmatx0_streams_ops = {
754 	.config_mi = dmatx0_config_mi,
755 	.enable_mi = dmatx_enable_mi,
756 	.stop_mi = dmatx_stop_mi,
757 	.is_stream_stopped = dmatx0_is_stream_stopped,
758 	.update_mi = update_dmatx_v2,
759 	.frame_end = mi_frame_end,
760 };
761 
762 static struct streams_ops rkisp2_dmatx2_streams_ops = {
763 	.config_mi = dmatx2_config_mi,
764 	.enable_mi = dmatx_enable_mi,
765 	.stop_mi = dmatx_stop_mi,
766 	.is_stream_stopped = dmatx2_is_stream_stopped,
767 	.update_mi = update_dmatx_v2,
768 	.frame_end = mi_frame_end,
769 };
770 
771 static struct streams_ops rkisp2_dmatx3_streams_ops = {
772 	.config_mi = dmatx3_config_mi,
773 	.enable_mi = dmatx_enable_mi,
774 	.stop_mi = dmatx_stop_mi,
775 	.is_stream_stopped = dmatx3_is_stream_stopped,
776 	.update_mi = update_dmatx_v2,
777 	.frame_end = mi_frame_end,
778 };
779 
rdbk_frame_end(struct rkisp_stream * stream)780 static void rdbk_frame_end(struct rkisp_stream *stream)
781 {
782 	struct rkisp_device *isp_dev = stream->ispdev;
783 	struct rkisp_capture_device *cap = &isp_dev->cap_dev;
784 	struct rkisp_sensor_info *sensor = isp_dev->active_sensor;
785 	u32 denominator = sensor->fi.interval.denominator;
786 	u32 numerator = sensor->fi.interval.numerator;
787 	u64 l_ts, s_ts;
788 	int ret, fps = -1, time = 30000000;
789 
790 	if (stream->id != RKISP_STREAM_DMATX2)
791 		return;
792 
793 	if (isp_dev->hdr.op_mode == HDR_RDBK_FRAME1) {
794 		vb2_buffer_done(&cap->rdbk_buf[RDBK_S]->vb.vb2_buf, VB2_BUF_STATE_DONE);
795 		cap->rdbk_buf[RDBK_S] = NULL;
796 		return;
797 	}
798 
799 	if (denominator && numerator)
800 		time = numerator * 1000 / denominator * 1000 * 1000;
801 
802 	if (cap->rdbk_buf[RDBK_L] && cap->rdbk_buf[RDBK_S]) {
803 		l_ts = cap->rdbk_buf[RDBK_L]->vb.vb2_buf.timestamp;
804 		s_ts = cap->rdbk_buf[RDBK_S]->vb.vb2_buf.timestamp;
805 
806 		if ((s_ts - l_ts) > time) {
807 			ret = v4l2_subdev_call(sensor->sd,
808 				video, g_frame_interval, &sensor->fi);
809 			if (!ret) {
810 				denominator = sensor->fi.interval.denominator;
811 				numerator = sensor->fi.interval.numerator;
812 				time = numerator * 1000 / denominator * 1000 * 1000;
813 				if (numerator)
814 					fps = denominator / numerator;
815 			}
816 			if ((s_ts - l_ts) > time) {
817 				v4l2_err(&isp_dev->v4l2_dev,
818 					 "timestamp no match, s:%lld l:%lld, fps:%d\n",
819 					 s_ts, l_ts, fps);
820 				goto RDBK_FRM_UNMATCH;
821 			}
822 		}
823 
824 		if (s_ts < l_ts) {
825 			v4l2_err(&isp_dev->v4l2_dev,
826 				 "s/l frame err, timestamp s:%lld l:%lld\n",
827 				 s_ts, l_ts);
828 			goto RDBK_FRM_UNMATCH;
829 		}
830 
831 		cap->rdbk_buf[RDBK_S]->vb.sequence =
832 			cap->rdbk_buf[RDBK_L]->vb.sequence;
833 		vb2_buffer_done(&cap->rdbk_buf[RDBK_L]->vb.vb2_buf,
834 			VB2_BUF_STATE_DONE);
835 		vb2_buffer_done(&cap->rdbk_buf[RDBK_S]->vb.vb2_buf,
836 			VB2_BUF_STATE_DONE);
837 	} else {
838 		v4l2_err(&isp_dev->v4l2_dev, "lost long frames\n");
839 		goto RDBK_FRM_UNMATCH;
840 	}
841 
842 	cap->rdbk_buf[RDBK_L] = NULL;
843 	cap->rdbk_buf[RDBK_S] = NULL;
844 	return;
845 
846 RDBK_FRM_UNMATCH:
847 	stream->dbg.frameloss++;
848 	if (cap->rdbk_buf[RDBK_L])
849 		rkisp_buf_queue(&cap->rdbk_buf[RDBK_L]->vb.vb2_buf);
850 	if (cap->rdbk_buf[RDBK_S])
851 		rkisp_buf_queue(&cap->rdbk_buf[RDBK_S]->vb.vb2_buf);
852 
853 	cap->rdbk_buf[RDBK_L] = NULL;
854 	cap->rdbk_buf[RDBK_S] = NULL;
855 }
856 
857 /*
858  * This function is called when a frame end come. The next frame
859  * is processing and we should set up buffer for next-next frame,
860  * otherwise it will overflow.
861  */
mi_frame_end(struct rkisp_stream * stream)862 static int mi_frame_end(struct rkisp_stream *stream)
863 {
864 	struct rkisp_device *dev = stream->ispdev;
865 	struct rkisp_capture_device *cap = &dev->cap_dev;
866 	struct capture_fmt *isp_fmt = &stream->out_isp_fmt;
867 	bool interlaced = stream->interlaced;
868 	unsigned long lock_flags = 0;
869 	int i = 0;
870 
871 	if (!stream->next_buf && stream->streaming &&
872 	    dev->dmarx_dev.trigger == T_MANUAL &&
873 	    is_rdbk_stream(stream))
874 		v4l2_info(&dev->v4l2_dev,
875 			  "tx stream:%d lose frame:%d, isp state:0x%x frame:%d\n",
876 			  stream->id, atomic_read(&stream->sequence) - 1,
877 			  dev->isp_state, dev->dmarx_dev.cur_frame.id);
878 
879 	if (stream->curr_buf &&
880 	    (!interlaced ||
881 	     (stream->u.sp.field_rec == RKISP_FIELD_ODD &&
882 	      stream->u.sp.field == RKISP_FIELD_EVEN))) {
883 		struct vb2_buffer *vb2_buf = &stream->curr_buf->vb.vb2_buf;
884 		u64 ns = 0;
885 
886 		/* Dequeue a filled buffer */
887 		for (i = 0; i < isp_fmt->mplanes; i++) {
888 			u32 payload_size =
889 				stream->out_fmt.plane_fmt[i].sizeimage;
890 			vb2_set_plane_payload(vb2_buf, i, payload_size);
891 		}
892 		if (stream->id == RKISP_STREAM_MP ||
893 		    stream->id == RKISP_STREAM_SP) {
894 			rkisp_dmarx_get_frame(dev, &i, NULL, &ns, true);
895 			atomic_set(&stream->sequence, i);
896 			stream->curr_buf->vb.sequence = i;
897 		} else {
898 			stream->curr_buf->vb.sequence =
899 				atomic_read(&stream->sequence) - 1;
900 		}
901 		if (!ns)
902 			ns = ktime_get_ns();
903 		vb2_buf->timestamp = ns;
904 
905 		ns = ktime_get_ns();
906 		stream->dbg.interval = ns - stream->dbg.timestamp;
907 		stream->dbg.timestamp = ns;
908 		stream->dbg.id = stream->curr_buf->vb.sequence;
909 		if (stream->id == RKISP_STREAM_MP || stream->id == RKISP_STREAM_SP)
910 			stream->dbg.delay = ns - dev->isp_sdev.frm_timestamp;
911 
912 		if (!stream->streaming) {
913 			vb2_buffer_done(vb2_buf, VB2_BUF_STATE_ERROR);
914 		} else if (is_rdbk_stream(stream) &&
915 			   dev->dmarx_dev.trigger == T_MANUAL) {
916 			if (stream->id == RKISP_STREAM_DMATX0) {
917 				if (cap->rdbk_buf[RDBK_L]) {
918 					v4l2_err(&dev->v4l2_dev,
919 						 "multiple long data in hdr frame\n");
920 					rkisp_buf_queue(&cap->rdbk_buf[RDBK_L]->vb.vb2_buf);
921 				}
922 				cap->rdbk_buf[RDBK_L] = stream->curr_buf;
923 			} else {
924 				if (cap->rdbk_buf[RDBK_S]) {
925 					v4l2_err(&dev->v4l2_dev,
926 						 "multiple short data in hdr frame\n");
927 					rkisp_buf_queue(&cap->rdbk_buf[RDBK_S]->vb.vb2_buf);
928 				}
929 				cap->rdbk_buf[RDBK_S] = stream->curr_buf;
930 				rdbk_frame_end(stream);
931 			}
932 		} else {
933 			vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
934 		}
935 
936 		stream->curr_buf = NULL;
937 	}
938 
939 	if (!interlaced ||
940 		(stream->curr_buf == stream->next_buf &&
941 		stream->u.sp.field == RKISP_FIELD_ODD)) {
942 		/* Next frame is writing to it
943 		 * Interlaced: odd field next buffer address
944 		 */
945 		stream->curr_buf = stream->next_buf;
946 		stream->next_buf = NULL;
947 
948 		/* Set up an empty buffer for the next-next frame */
949 		spin_lock_irqsave(&stream->vbq_lock, lock_flags);
950 		if (!list_empty(&stream->buf_queue)) {
951 			stream->next_buf =
952 				list_first_entry(&stream->buf_queue,
953 						 struct rkisp_buffer,
954 						 queue);
955 			list_del(&stream->next_buf->queue);
956 		}
957 		spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
958 	} else if (stream->u.sp.field_rec == RKISP_FIELD_ODD &&
959 		stream->u.sp.field == RKISP_FIELD_EVEN) {
960 		/* Interlaced: event field next buffer address */
961 		if (stream->next_buf) {
962 			stream->next_buf->buff_addr[RKISP_PLANE_Y] +=
963 				stream->u.sp.vir_offs;
964 			stream->next_buf->buff_addr[RKISP_PLANE_CB] +=
965 				stream->u.sp.vir_offs;
966 			stream->next_buf->buff_addr[RKISP_PLANE_CR] +=
967 				stream->u.sp.vir_offs;
968 		}
969 		stream->curr_buf = stream->next_buf;
970 	}
971 
972 	stream->ops->update_mi(stream);
973 
974 	if (interlaced)
975 		stream->u.sp.field_rec = stream->u.sp.field;
976 
977 	return 0;
978 }
979 
980 /***************************** vb2 operations*******************************/
981 
982 /*
983  * Set flags and wait, it should stop in interrupt.
984  * If it didn't, stop it by force.
985  */
rkisp_stream_stop(struct rkisp_stream * stream)986 static void rkisp_stream_stop(struct rkisp_stream *stream)
987 {
988 	struct rkisp_device *dev = stream->ispdev;
989 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
990 	int ret = 0;
991 
992 	if (!dev->dmarx_dev.trigger &&
993 	    (is_rdbk_stream(stream) || is_hdr_stream(stream))) {
994 		stream->streaming = false;
995 		return;
996 	}
997 
998 	stream->stopping = true;
999 	if ((!dev->hw_dev->is_single && stream->id != RKISP_STREAM_MP &&
1000 	     stream->id != RKISP_STREAM_SP) || dev->hw_dev->is_single)
1001 		stream->ops->stop_mi(stream);
1002 
1003 	if (stream->id == RKISP_STREAM_MP || stream->id == RKISP_STREAM_SP)
1004 		hdr_stop_dmatx(dev);
1005 
1006 	if (dev->isp_state & ISP_START &&
1007 	    !stream->ops->is_stream_stopped(dev->base_addr)) {
1008 		ret = wait_event_timeout(stream->done,
1009 					 !stream->streaming,
1010 					 msecs_to_jiffies(500));
1011 		if (!ret)
1012 			v4l2_warn(v4l2_dev, "%s id:%d timeout\n",
1013 				  __func__, stream->id);
1014 	}
1015 
1016 	stream->stopping = false;
1017 	stream->streaming = false;
1018 	if (stream->id == RKISP_STREAM_MP || stream->id == RKISP_STREAM_SP) {
1019 		stream->ops->disable_mi(stream);
1020 		rkisp_disable_dcrop(stream, true);
1021 		rkisp_disable_rsz(stream, true);
1022 		ret = (stream->id == RKISP_STREAM_MP) ?
1023 			ISP_FRAME_MP : ISP_FRAME_SP;
1024 		dev->irq_ends_mask &= ~ret;
1025 	}
1026 
1027 	stream->burst =
1028 		CIF_MI_CTRL_BURST_LEN_LUM_16 |
1029 		CIF_MI_CTRL_BURST_LEN_CHROM_16;
1030 	stream->interlaced = false;
1031 }
1032 
1033 /*
1034  * Most of registers inside rockchip isp1 have shadow register since
1035  * they must be not changed during processing a frame.
1036  * Usually, each sub-module updates its shadow register after
1037  * processing the last pixel of a frame.
1038  */
rkisp_start(struct rkisp_stream * stream)1039 static int rkisp_start(struct rkisp_stream *stream)
1040 {
1041 	struct rkisp_device *dev = stream->ispdev;
1042 	bool is_update = false;
1043 	int ret;
1044 
1045 	if (stream->id == RKISP_STREAM_MP || stream->id == RKISP_STREAM_SP) {
1046 		is_update = (stream->id == RKISP_STREAM_MP) ?
1047 			!dev->cap_dev.stream[RKISP_STREAM_SP].streaming :
1048 			!dev->cap_dev.stream[RKISP_STREAM_MP].streaming;
1049 	}
1050 
1051 	if (stream->ops->set_data_path)
1052 		stream->ops->set_data_path(stream);
1053 	ret = stream->ops->config_mi(stream);
1054 	if (ret)
1055 		return ret;
1056 
1057 	stream->ops->enable_mi(stream);
1058 	if (stream->id == RKISP_STREAM_MP || stream->id == RKISP_STREAM_SP)
1059 		hdr_config_dmatx(dev);
1060 	if (is_update)
1061 		dev->irq_ends_mask |=
1062 			(stream->id == RKISP_STREAM_MP) ? ISP_FRAME_MP : ISP_FRAME_SP;
1063 	stream->streaming = true;
1064 
1065 	return 0;
1066 }
1067 
rkisp_queue_setup(struct vb2_queue * queue,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])1068 static int rkisp_queue_setup(struct vb2_queue *queue,
1069 			      unsigned int *num_buffers,
1070 			      unsigned int *num_planes,
1071 			      unsigned int sizes[],
1072 			      struct device *alloc_ctxs[])
1073 {
1074 	struct rkisp_stream *stream = queue->drv_priv;
1075 	struct rkisp_device *dev = stream->ispdev;
1076 	const struct v4l2_pix_format_mplane *pixm = NULL;
1077 	const struct capture_fmt *isp_fmt = NULL;
1078 	u32 i;
1079 
1080 	pixm = &stream->out_fmt;
1081 	isp_fmt = &stream->out_isp_fmt;
1082 	*num_planes = isp_fmt->mplanes;
1083 
1084 	for (i = 0; i < isp_fmt->mplanes; i++) {
1085 		const struct v4l2_plane_pix_format *plane_fmt;
1086 
1087 		plane_fmt = &pixm->plane_fmt[i];
1088 		/* height to align with 16 when allocating memory
1089 		 * so that Rockchip encoder can use DMA buffer directly
1090 		 */
1091 		sizes[i] = (isp_fmt->fmt_type == FMT_YUV) ?
1092 			plane_fmt->sizeimage / pixm->height *
1093 			ALIGN(pixm->height, 16) :
1094 			plane_fmt->sizeimage;
1095 	}
1096 
1097 	rkisp_chk_tb_over(dev);
1098 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev, "%s count %d, size %d\n",
1099 		 v4l2_type_names[queue->type], *num_buffers, sizes[0]);
1100 
1101 	return 0;
1102 }
1103 
1104 /*
1105  * The vb2_buffer are stored in rkisp_buffer, in order to unify
1106  * mplane buffer and none-mplane buffer.
1107  */
rkisp_buf_queue(struct vb2_buffer * vb)1108 static void rkisp_buf_queue(struct vb2_buffer *vb)
1109 {
1110 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1111 	struct rkisp_buffer *ispbuf = to_rkisp_buffer(vbuf);
1112 	struct vb2_queue *queue = vb->vb2_queue;
1113 	struct rkisp_stream *stream = queue->drv_priv;
1114 	unsigned long lock_flags = 0;
1115 	struct v4l2_pix_format_mplane *pixm = &stream->out_fmt;
1116 	struct capture_fmt *isp_fmt = &stream->out_isp_fmt;
1117 	struct sg_table *sgt;
1118 	int i;
1119 
1120 	memset(ispbuf->buff_addr, 0, sizeof(ispbuf->buff_addr));
1121 	for (i = 0; i < isp_fmt->mplanes; i++) {
1122 		vb2_plane_vaddr(vb, i);
1123 		if (stream->ispdev->hw_dev->is_dma_sg_ops) {
1124 			sgt = vb2_dma_sg_plane_desc(vb, i);
1125 			ispbuf->buff_addr[i] = sg_dma_address(sgt->sgl);
1126 		} else {
1127 			ispbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
1128 		}
1129 	}
1130 	/*
1131 	 * NOTE: plane_fmt[0].sizeimage is total size of all planes for single
1132 	 * memory plane formats, so calculate the size explicitly.
1133 	 */
1134 	if (isp_fmt->mplanes == 1) {
1135 		for (i = 0; i < isp_fmt->cplanes - 1; i++) {
1136 			ispbuf->buff_addr[i + 1] = (i == 0) ?
1137 				ispbuf->buff_addr[i] +
1138 				pixm->plane_fmt[i].bytesperline *
1139 				pixm->height :
1140 				ispbuf->buff_addr[i] +
1141 				pixm->plane_fmt[i].sizeimage;
1142 		}
1143 	}
1144 
1145 	v4l2_dbg(2, rkisp_debug, &stream->ispdev->v4l2_dev,
1146 		 "stream:%d queue buf:0x%x\n",
1147 		 stream->id, ispbuf->buff_addr[0]);
1148 
1149 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
1150 	list_add_tail(&ispbuf->queue, &stream->buf_queue);
1151 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
1152 }
1153 
rkisp_create_dummy_buf(struct rkisp_stream * stream)1154 static int rkisp_create_dummy_buf(struct rkisp_stream *stream)
1155 {
1156 	return rkisp_alloc_common_dummy_buf(stream->ispdev);
1157 }
1158 
rkisp_destroy_dummy_buf(struct rkisp_stream * stream)1159 static void rkisp_destroy_dummy_buf(struct rkisp_stream *stream)
1160 {
1161 	struct rkisp_device *dev = stream->ispdev;
1162 
1163 	hdr_destroy_buf(dev);
1164 	rkisp_free_common_dummy_buf(dev);
1165 }
1166 
destroy_buf_queue(struct rkisp_stream * stream,enum vb2_buffer_state state)1167 static void destroy_buf_queue(struct rkisp_stream *stream,
1168 			      enum vb2_buffer_state state)
1169 {
1170 	struct rkisp_device *isp_dev = stream->ispdev;
1171 	struct rkisp_capture_device *cap = &isp_dev->cap_dev;
1172 	unsigned long lock_flags = 0;
1173 	struct rkisp_buffer *buf;
1174 
1175 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
1176 	if (cap->rdbk_buf[RDBK_L] && stream->id == RKISP_STREAM_DMATX0) {
1177 		list_add_tail(&cap->rdbk_buf[RDBK_L]->queue,
1178 			&stream->buf_queue);
1179 		if (cap->rdbk_buf[RDBK_L] == stream->curr_buf)
1180 			stream->curr_buf = NULL;
1181 		if (cap->rdbk_buf[RDBK_L] == stream->next_buf)
1182 			stream->next_buf = NULL;
1183 		cap->rdbk_buf[RDBK_L] = NULL;
1184 	}
1185 	if (cap->rdbk_buf[RDBK_S] && stream->id == RKISP_STREAM_DMATX2) {
1186 		list_add_tail(&cap->rdbk_buf[RDBK_S]->queue,
1187 			&stream->buf_queue);
1188 		if (cap->rdbk_buf[RDBK_S] == stream->curr_buf)
1189 			stream->curr_buf = NULL;
1190 		if (cap->rdbk_buf[RDBK_S] == stream->next_buf)
1191 			stream->next_buf = NULL;
1192 		cap->rdbk_buf[RDBK_S] = NULL;
1193 	}
1194 	if (stream->curr_buf) {
1195 		list_add_tail(&stream->curr_buf->queue, &stream->buf_queue);
1196 		if (stream->curr_buf == stream->next_buf)
1197 			stream->next_buf = NULL;
1198 		stream->curr_buf = NULL;
1199 	}
1200 	if (stream->next_buf) {
1201 		list_add_tail(&stream->next_buf->queue, &stream->buf_queue);
1202 		stream->next_buf = NULL;
1203 	}
1204 	while (!list_empty(&stream->buf_queue)) {
1205 		buf = list_first_entry(&stream->buf_queue,
1206 			struct rkisp_buffer, queue);
1207 		list_del(&buf->queue);
1208 		vb2_buffer_done(&buf->vb.vb2_buf, state);
1209 	}
1210 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
1211 }
1212 
rkisp_stop_streaming(struct vb2_queue * queue)1213 static void rkisp_stop_streaming(struct vb2_queue *queue)
1214 {
1215 	struct rkisp_stream *stream = queue->drv_priv;
1216 	struct rkisp_vdev_node *node = &stream->vnode;
1217 	struct rkisp_device *dev = stream->ispdev;
1218 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
1219 	int ret;
1220 
1221 	mutex_lock(&dev->hw_dev->dev_lock);
1222 
1223 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
1224 		 "%s %d\n", __func__, stream->id);
1225 
1226 	if (!stream->streaming)
1227 		goto end;
1228 
1229 	rkisp_stream_stop(stream);
1230 	if (stream->id == RKISP_STREAM_MP ||
1231 	    stream->id == RKISP_STREAM_SP) {
1232 		/* call to the other devices */
1233 		media_pipeline_stop(&node->vdev.entity);
1234 		ret = dev->pipe.set_stream(&dev->pipe, false);
1235 		if (ret < 0)
1236 			v4l2_err(v4l2_dev,
1237 				 "pipeline stream-off failed:%d\n", ret);
1238 	}
1239 
1240 	/* release buffers */
1241 	destroy_buf_queue(stream, VB2_BUF_STATE_ERROR);
1242 
1243 	ret = dev->pipe.close(&dev->pipe);
1244 	if (ret < 0)
1245 		v4l2_err(v4l2_dev, "pipeline close failed error:%d\n", ret);
1246 	rkisp_destroy_dummy_buf(stream);
1247 	atomic_dec(&dev->cap_dev.refcnt);
1248 
1249 end:
1250 	mutex_unlock(&dev->hw_dev->dev_lock);
1251 }
1252 
rkisp_stream_start(struct rkisp_stream * stream)1253 static int rkisp_stream_start(struct rkisp_stream *stream)
1254 {
1255 	struct v4l2_device *v4l2_dev = &stream->ispdev->v4l2_dev;
1256 	struct rkisp_device *dev = stream->ispdev;
1257 	struct rkisp_stream *other = &dev->cap_dev.stream[stream->id ^ 1];
1258 	bool async = false;
1259 	int ret;
1260 
1261 	/* STREAM DMATX don't have rsz and dcrop */
1262 	if (stream->id == RKISP_STREAM_DMATX0 ||
1263 	    stream->id == RKISP_STREAM_DMATX1 ||
1264 	    stream->id == RKISP_STREAM_DMATX2 ||
1265 	    stream->id == RKISP_STREAM_DMATX3)
1266 		goto end;
1267 
1268 	if (other->streaming)
1269 		async = true;
1270 
1271 	ret = rkisp_stream_config_rsz(stream, async);
1272 	if (ret < 0) {
1273 		v4l2_err(v4l2_dev, "config rsz failed with error %d\n", ret);
1274 		return ret;
1275 	}
1276 
1277 	/*
1278 	 * can't be async now, otherwise the latter started stream fails to
1279 	 * produce mi interrupt.
1280 	 */
1281 	ret = rkisp_stream_config_dcrop(stream, false);
1282 	if (ret < 0) {
1283 		v4l2_err(v4l2_dev, "config dcrop failed with error %d\n", ret);
1284 		return ret;
1285 	}
1286 
1287 end:
1288 	return rkisp_start(stream);
1289 }
1290 
1291 static int
rkisp_start_streaming(struct vb2_queue * queue,unsigned int count)1292 rkisp_start_streaming(struct vb2_queue *queue, unsigned int count)
1293 {
1294 	struct rkisp_stream *stream = queue->drv_priv;
1295 	struct rkisp_vdev_node *node = &stream->vnode;
1296 	struct rkisp_device *dev = stream->ispdev;
1297 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
1298 	int ret = -1;
1299 
1300 	mutex_lock(&dev->hw_dev->dev_lock);
1301 
1302 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
1303 		 "%s %d\n", __func__, stream->id);
1304 
1305 	if (WARN_ON(stream->streaming)) {
1306 		mutex_unlock(&dev->hw_dev->dev_lock);
1307 		return -EBUSY;
1308 	}
1309 
1310 	memset(&stream->dbg, 0, sizeof(stream->dbg));
1311 	atomic_inc(&dev->cap_dev.refcnt);
1312 	if (!dev->isp_inp || !stream->linked) {
1313 		v4l2_err(v4l2_dev, "check video link or isp input\n");
1314 		goto buffer_done;
1315 	}
1316 
1317 	if (atomic_read(&dev->cap_dev.refcnt) == 1 &&
1318 	    (dev->isp_inp & INP_CSI || dev->isp_inp & INP_DVP)) {
1319 		/* update sensor info when first streaming */
1320 		ret = rkisp_update_sensor_info(dev);
1321 		if (ret < 0) {
1322 			v4l2_err(v4l2_dev,
1323 				 "update sensor info failed %d\n",
1324 				 ret);
1325 			goto buffer_done;
1326 		}
1327 	}
1328 
1329 	if (dev->active_sensor &&
1330 	    dev->active_sensor->fmt[0].format.field == V4L2_FIELD_INTERLACED) {
1331 		if (stream->id != RKISP_STREAM_SP) {
1332 			v4l2_err(v4l2_dev,
1333 				"only selfpath support interlaced\n");
1334 			ret = -EINVAL;
1335 			goto buffer_done;
1336 		}
1337 		stream->interlaced = true;
1338 		stream->u.sp.field = RKISP_FIELD_INVAL;
1339 		stream->u.sp.field_rec = RKISP_FIELD_INVAL;
1340 	}
1341 
1342 	ret = rkisp_create_dummy_buf(stream);
1343 	if (ret < 0)
1344 		goto buffer_done;
1345 
1346 	/* enable clocks/power-domains */
1347 	ret = dev->pipe.open(&dev->pipe, &node->vdev.entity, true);
1348 	if (ret < 0) {
1349 		v4l2_err(v4l2_dev, "open cif pipeline failed %d\n", ret);
1350 		goto destroy_dummy_buf;
1351 	}
1352 
1353 	/* configure stream hardware to start */
1354 	ret = rkisp_stream_start(stream);
1355 	if (ret < 0) {
1356 		v4l2_err(v4l2_dev, "start streaming failed\n");
1357 		goto close_pipe;
1358 	}
1359 
1360 	if (stream->id == RKISP_STREAM_MP ||
1361 	    stream->id == RKISP_STREAM_SP) {
1362 		/* start sub-devices */
1363 		ret = dev->pipe.set_stream(&dev->pipe, true);
1364 		if (ret < 0)
1365 			goto stop_stream;
1366 
1367 		ret = media_pipeline_start(&node->vdev.entity, &dev->pipe.pipe);
1368 		if (ret < 0) {
1369 			v4l2_err(&dev->v4l2_dev,
1370 				 "start pipeline failed %d\n", ret);
1371 			goto pipe_stream_off;
1372 		}
1373 	}
1374 
1375 	mutex_unlock(&dev->hw_dev->dev_lock);
1376 	return 0;
1377 
1378 pipe_stream_off:
1379 	dev->pipe.set_stream(&dev->pipe, false);
1380 stop_stream:
1381 	rkisp_stream_stop(stream);
1382 close_pipe:
1383 	dev->pipe.close(&dev->pipe);
1384 destroy_dummy_buf:
1385 	rkisp_destroy_dummy_buf(stream);
1386 buffer_done:
1387 	destroy_buf_queue(stream, VB2_BUF_STATE_QUEUED);
1388 	atomic_dec(&dev->cap_dev.refcnt);
1389 	stream->streaming = false;
1390 	mutex_unlock(&dev->hw_dev->dev_lock);
1391 	return ret;
1392 }
1393 
1394 static struct vb2_ops rkisp_vb2_ops = {
1395 	.queue_setup = rkisp_queue_setup,
1396 	.buf_queue = rkisp_buf_queue,
1397 	.wait_prepare = vb2_ops_wait_prepare,
1398 	.wait_finish = vb2_ops_wait_finish,
1399 	.stop_streaming = rkisp_stop_streaming,
1400 	.start_streaming = rkisp_start_streaming,
1401 };
1402 
rkisp_init_vb2_queue(struct vb2_queue * q,struct rkisp_stream * stream,enum v4l2_buf_type buf_type)1403 static int rkisp_init_vb2_queue(struct vb2_queue *q,
1404 				struct rkisp_stream *stream,
1405 				enum v4l2_buf_type buf_type)
1406 {
1407 	q->type = buf_type;
1408 	q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1409 	q->drv_priv = stream;
1410 	q->ops = &rkisp_vb2_ops;
1411 	q->mem_ops = stream->ispdev->hw_dev->mem_ops;
1412 	q->buf_struct_size = sizeof(struct rkisp_buffer);
1413 	q->min_buffers_needed = CIF_ISP_REQ_BUFS_MIN;
1414 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1415 	q->lock = &stream->apilock;
1416 	q->dev = stream->ispdev->hw_dev->dev;
1417 	q->allow_cache_hints = 1;
1418 	q->bidirectional = 1;
1419 	if (stream->ispdev->hw_dev->is_dma_contig)
1420 		q->dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
1421 	q->gfp_flags = GFP_DMA32;
1422 	return vb2_queue_init(q);
1423 }
1424 
rkisp_stream_init(struct rkisp_device * dev,u32 id)1425 static int rkisp_stream_init(struct rkisp_device *dev, u32 id)
1426 {
1427 	struct rkisp_capture_device *cap_dev = &dev->cap_dev;
1428 	struct rkisp_stream *stream;
1429 	struct video_device *vdev;
1430 	struct rkisp_vdev_node *node;
1431 	int ret = 0;
1432 
1433 	stream = &cap_dev->stream[id];
1434 	stream->id = id;
1435 	stream->ispdev = dev;
1436 	vdev = &stream->vnode.vdev;
1437 
1438 	INIT_LIST_HEAD(&stream->buf_queue);
1439 	init_waitqueue_head(&stream->done);
1440 	spin_lock_init(&stream->vbq_lock);
1441 	stream->linked = true;
1442 
1443 	switch (id) {
1444 	case RKISP_STREAM_SP:
1445 		strlcpy(vdev->name, SP_VDEV_NAME,
1446 			sizeof(vdev->name));
1447 		stream->ops = &rkisp_sp_streams_ops;
1448 		stream->config = &rkisp_sp_stream_config;
1449 		break;
1450 	case RKISP_STREAM_DMATX0:
1451 		strlcpy(vdev->name, DMATX0_VDEV_NAME,
1452 			sizeof(vdev->name));
1453 		stream->ops = &rkisp2_dmatx0_streams_ops;
1454 		stream->config = &rkisp2_dmatx0_stream_config;
1455 		break;
1456 	case RKISP_STREAM_DMATX2:
1457 		strlcpy(vdev->name, DMATX2_VDEV_NAME,
1458 			sizeof(vdev->name));
1459 		stream->ops = &rkisp2_dmatx2_streams_ops;
1460 		stream->config = &rkisp2_dmatx1_stream_config;
1461 		break;
1462 	case RKISP_STREAM_DMATX3:
1463 		strlcpy(vdev->name, DMATX3_VDEV_NAME,
1464 			sizeof(vdev->name));
1465 		stream->ops = &rkisp2_dmatx3_streams_ops;
1466 		stream->config = &rkisp2_dmatx3_stream_config;
1467 		break;
1468 	default:
1469 		strlcpy(vdev->name, MP_VDEV_NAME,
1470 			sizeof(vdev->name));
1471 		stream->ops = &rkisp_mp_streams_ops;
1472 		stream->config = &rkisp_mp_stream_config;
1473 	}
1474 
1475 	node = vdev_to_node(vdev);
1476 	rkisp_init_vb2_queue(&node->buf_queue, stream,
1477 			     V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
1478 	ret = rkisp_register_stream_vdev(stream);
1479 	if (ret < 0)
1480 		return ret;
1481 
1482 	stream->streaming = false;
1483 	stream->interlaced = false;
1484 	stream->burst =
1485 		CIF_MI_CTRL_BURST_LEN_LUM_16 |
1486 		CIF_MI_CTRL_BURST_LEN_CHROM_16;
1487 	atomic_set(&stream->sequence, 0);
1488 	return 0;
1489 }
1490 
rkisp_register_stream_v21(struct rkisp_device * dev)1491 int rkisp_register_stream_v21(struct rkisp_device *dev)
1492 {
1493 	struct rkisp_capture_device *cap_dev = &dev->cap_dev;
1494 	int ret;
1495 
1496 	ret = rkisp_stream_init(dev, RKISP_STREAM_MP);
1497 	if (ret < 0)
1498 		goto err;
1499 	ret = rkisp_stream_init(dev, RKISP_STREAM_SP);
1500 	if (ret < 0)
1501 		goto err_free_mp;
1502 	ret = rkisp_stream_init(dev, RKISP_STREAM_DMATX0);
1503 	if (ret < 0)
1504 		goto err_free_sp;
1505 	ret = rkisp_stream_init(dev, RKISP_STREAM_DMATX2);
1506 	if (ret < 0)
1507 		goto err_free_tx0;
1508 	ret = rkisp_stream_init(dev, RKISP_STREAM_DMATX3);
1509 	if (ret < 0)
1510 		goto err_free_tx2;
1511 
1512 	return 0;
1513 err_free_tx2:
1514 	rkisp_unregister_stream_vdev(&cap_dev->stream[RKISP_STREAM_DMATX2]);
1515 err_free_tx0:
1516 	rkisp_unregister_stream_vdev(&cap_dev->stream[RKISP_STREAM_DMATX0]);
1517 err_free_sp:
1518 	rkisp_unregister_stream_vdev(&cap_dev->stream[RKISP_STREAM_SP]);
1519 err_free_mp:
1520 	rkisp_unregister_stream_vdev(&cap_dev->stream[RKISP_STREAM_MP]);
1521 err:
1522 	return ret;
1523 }
1524 
rkisp_unregister_stream_v21(struct rkisp_device * dev)1525 void rkisp_unregister_stream_v21(struct rkisp_device *dev)
1526 {
1527 	struct rkisp_capture_device *cap_dev = &dev->cap_dev;
1528 	struct rkisp_stream *stream;
1529 
1530 	stream = &cap_dev->stream[RKISP_STREAM_MP];
1531 	rkisp_unregister_stream_vdev(stream);
1532 	stream = &cap_dev->stream[RKISP_STREAM_SP];
1533 	rkisp_unregister_stream_vdev(stream);
1534 	stream = &cap_dev->stream[RKISP_STREAM_DMATX0];
1535 	rkisp_unregister_stream_vdev(stream);
1536 	stream = &cap_dev->stream[RKISP_STREAM_DMATX2];
1537 	rkisp_unregister_stream_vdev(stream);
1538 	stream = &cap_dev->stream[RKISP_STREAM_DMATX3];
1539 	rkisp_unregister_stream_vdev(stream);
1540 }
1541 
1542 /****************  Interrupter Handler ****************/
1543 
rkisp_mi_v21_isr(u32 mis_val,struct rkisp_device * dev)1544 void rkisp_mi_v21_isr(u32 mis_val, struct rkisp_device *dev)
1545 {
1546 	struct rkisp_stream *stream;
1547 	unsigned int i;
1548 	static u8 end_tx0, end_tx2;
1549 
1550 	v4l2_dbg(3, rkisp_debug, &dev->v4l2_dev,
1551 		 "mi isr:0x%x\n", mis_val);
1552 
1553 	if (mis_val & CIF_MI_DMA_READY)
1554 		rkisp_dmarx_isr(mis_val, dev);
1555 
1556 	for (i = 0; i < RKISP_MAX_STREAM; ++i) {
1557 		stream = &dev->cap_dev.stream[i];
1558 
1559 		if (!(mis_val & CIF_MI_FRAME(stream)))
1560 			continue;
1561 
1562 		if (i == RKISP_STREAM_DMATX0)
1563 			end_tx0 = true;
1564 		if (i == RKISP_STREAM_DMATX2)
1565 			end_tx2 = true;
1566 
1567 		mi_frame_end_int_clear(stream);
1568 
1569 		if (stream->stopping) {
1570 			/*
1571 			 * Make sure stream is actually stopped, whose state
1572 			 * can be read from the shadow register, before
1573 			 * wake_up() thread which would immediately free all
1574 			 * frame buffers. stop_mi() takes effect at the next
1575 			 * frame end that sync the configurations to shadow
1576 			 * regs.
1577 			 */
1578 			if (!dev->hw_dev->is_single &&
1579 			    (stream->id == RKISP_STREAM_MP || stream->id == RKISP_STREAM_SP)) {
1580 				stream->stopping = false;
1581 				stream->streaming = false;
1582 				stream->ops->disable_mi(stream);
1583 				wake_up(&stream->done);
1584 			} else if (stream->ops->is_stream_stopped(dev->base_addr)) {
1585 				stream->stopping = false;
1586 				stream->streaming = false;
1587 				wake_up(&stream->done);
1588 			}
1589 			if (i == RKISP_STREAM_MP) {
1590 				end_tx0 = false;
1591 				end_tx2 = false;
1592 			}
1593 		} else {
1594 			mi_frame_end(stream);
1595 			if (dev->dmarx_dev.trigger == T_AUTO &&
1596 			    ((dev->hdr.op_mode == HDR_RDBK_FRAME1 && end_tx2) ||
1597 			     (dev->hdr.op_mode == HDR_RDBK_FRAME2 && end_tx2 && end_tx0))) {
1598 				end_tx0 = false;
1599 				end_tx2 = false;
1600 				rkisp_trigger_read_back(dev, false, false, false);
1601 			}
1602 		}
1603 	}
1604 
1605 	if (mis_val & CIF_MI_MP_FRAME) {
1606 		stream = &dev->cap_dev.stream[RKISP_STREAM_MP];
1607 		if (!stream->streaming)
1608 			dev->irq_ends_mask &= ~ISP_FRAME_MP;
1609 		else
1610 			dev->irq_ends_mask |= ISP_FRAME_MP;
1611 		rkisp_check_idle(dev, ISP_FRAME_MP);
1612 	}
1613 	if (mis_val & CIF_MI_SP_FRAME) {
1614 		stream = &dev->cap_dev.stream[RKISP_STREAM_SP];
1615 		if (!stream->streaming)
1616 			dev->irq_ends_mask &= ~ISP_FRAME_SP;
1617 		else
1618 			dev->irq_ends_mask |= ISP_FRAME_SP;
1619 		rkisp_check_idle(dev, ISP_FRAME_SP);
1620 	}
1621 }
1622 
rkisp_mipi_v21_isr(unsigned int phy,unsigned int packet,unsigned int overflow,unsigned int state,struct rkisp_device * dev)1623 void rkisp_mipi_v21_isr(unsigned int phy, unsigned int packet,
1624 			unsigned int overflow, unsigned int state,
1625 			struct rkisp_device *dev)
1626 {
1627 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
1628 	struct rkisp_stream *stream;
1629 	u32 packet_err = PACKET_ERR_F_BNDRY_MATCG | PACKET_ERR_F_SEQ |
1630 		PACKET_ERR_FRAME_DATA | PACKET_ERR_ECC_1BIT |
1631 		PACKET_ERR_ECC_2BIT | PACKET_ERR_CHECKSUM;
1632 	u32 state_err = RAW_WR_SIZE_ERR | RAW_RD_SIZE_ERR;
1633 	int i, id;
1634 
1635 	v4l2_dbg(3, rkisp_debug, &dev->v4l2_dev,
1636 		 "csi state:0x%x\n", state);
1637 	dev->csi_dev.irq_cnt++;
1638 	if (phy && (dev->isp_inp & INP_CSI) &&
1639 	    dev->csi_dev.err_cnt++ < RKISP_CONTI_ERR_MAX)
1640 		v4l2_warn(v4l2_dev, "MIPI error: phy: 0x%08x\n", phy);
1641 	if ((packet & packet_err) && (dev->isp_inp & INP_CSI) &&
1642 	    dev->csi_dev.err_cnt < RKISP_CONTI_ERR_MAX) {
1643 		if (packet & 0xfff)
1644 			dev->csi_dev.err_cnt++;
1645 		v4l2_warn(v4l2_dev, "MIPI error: packet: 0x%08x\n", packet);
1646 	}
1647 	if (overflow && dev->csi_dev.err_cnt++ < RKISP_CONTI_ERR_MAX)
1648 		v4l2_warn(v4l2_dev, "MIPI error: overflow: 0x%08x\n", overflow);
1649 	if (state & state_err)
1650 		v4l2_warn(v4l2_dev, "MIPI error: size: 0x%08x\n", state);
1651 	if (state & ISP21_MIPI_DROP_FRM)
1652 		v4l2_warn(v4l2_dev, "MIPI drop frame\n");
1653 
1654 	/* first Y_STATE irq as csi sof event */
1655 	if (state & (RAW0_Y_STATE | RAW1_Y_STATE)) {
1656 		for (i = 0; i < HDR_DMA_MAX - 1; i++) {
1657 			if (!((RAW0_Y_STATE << i) & state) ||
1658 			    dev->csi_dev.tx_first[i])
1659 				continue;
1660 			dev->csi_dev.tx_first[i] = true;
1661 			id = i ? 2 : 0;
1662 			rkisp_csi_sof(dev, id);
1663 			stream = &dev->cap_dev.stream[id + RKISP_STREAM_DMATX0];
1664 			atomic_inc(&stream->sequence);
1665 		}
1666 	}
1667 	if (state & (RAW0_WR_FRAME | RAW1_WR_FRAME)) {
1668 		dev->csi_dev.err_cnt = 0;
1669 		for (i = 0; i < HDR_DMA_MAX - 1; i++) {
1670 			if (!((RAW0_WR_FRAME << i) & state))
1671 				continue;
1672 			if (!dev->csi_dev.tx_first[i]) {
1673 				id = i ? RKISP_STREAM_DMATX2 : RKISP_STREAM_DMATX0;
1674 				stream = &dev->cap_dev.stream[id];
1675 				atomic_inc(&stream->sequence);
1676 			}
1677 			dev->csi_dev.tx_first[i] = false;
1678 		}
1679 	}
1680 	if (state & ISP21_RAW3_WR_FRAME) {
1681 		dev->csi_dev.err_cnt = 0;
1682 		stream = &dev->cap_dev.stream[RKISP_STREAM_DMATX3];
1683 		atomic_inc(&stream->sequence);
1684 	}
1685 
1686 	if (dev->csi_dev.err_cnt > RKISP_CONTI_ERR_MAX) {
1687 		if (!(dev->isp_state & ISP_MIPI_ERROR)) {
1688 			dev->isp_state |= ISP_MIPI_ERROR;
1689 			rkisp_write(dev, CSI2RX_MASK_PHY, 0, true);
1690 			rkisp_write(dev, CSI2RX_MASK_PACKET, 0, true);
1691 			rkisp_write(dev, CSI2RX_MASK_OVERFLOW, 0, true);
1692 			if (dev->hw_dev->monitor.is_en) {
1693 				if (!completion_done(&dev->hw_dev->monitor.cmpl))
1694 					complete(&dev->hw_dev->monitor.cmpl);
1695 				dev->hw_dev->monitor.state |= ISP_MIPI_ERROR;
1696 			}
1697 		}
1698 	}
1699 }
1700