• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Rockchip Electronics Co., Ltd. */
3 
4 #include <linux/delay.h>
5 #include <linux/pm_runtime.h>
6 #include <media/v4l2-common.h>
7 #include <media/v4l2-event.h>
8 #include <media/v4l2-fh.h>
9 #include <media/v4l2-ioctl.h>
10 #include <media/v4l2-subdev.h>
11 #include <media/videobuf2-dma-contig.h>
12 #include <media/videobuf2-dma-sg.h>
13 #include "dev.h"
14 #include "regs.h"
15 
16 #define CIF_ISP_REQ_BUFS_MIN			0
17 
18 static int mi_frame_end(struct rkisp_stream *stream);
19 static void rkisp_buf_queue(struct vb2_buffer *vb);
20 static int rkisp_create_dummy_buf(struct rkisp_stream *stream);
21 
22 /* configure dual-crop unit */
rkisp_stream_config_dcrop(struct rkisp_stream * stream,bool async)23 static int rkisp_stream_config_dcrop(struct rkisp_stream *stream, bool async)
24 {
25 	struct rkisp_device *dev = stream->ispdev;
26 	struct v4l2_rect *dcrop = &stream->dcrop;
27 	struct v4l2_rect *input_win;
28 
29 	/* dual-crop unit get data from isp */
30 	input_win = rkisp_get_isp_sd_win(&dev->isp_sdev);
31 
32 	if (dcrop->width == input_win->width &&
33 	    dcrop->height == input_win->height &&
34 	    dcrop->left == 0 && dcrop->top == 0) {
35 		rkisp_disable_dcrop(stream, async);
36 		v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
37 			 "stream %d crop disabled\n", stream->id);
38 		return 0;
39 	}
40 
41 	rkisp_config_dcrop(stream, dcrop, async);
42 
43 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
44 		 "stream %d crop: %dx%d -> %dx%d\n", stream->id,
45 		 input_win->width, input_win->height,
46 		 dcrop->width, dcrop->height);
47 
48 	return 0;
49 }
50 
51 /* configure scale unit */
rkisp_stream_config_rsz(struct rkisp_stream * stream,bool async)52 static int rkisp_stream_config_rsz(struct rkisp_stream *stream, bool async)
53 {
54 	struct rkisp_device *dev = stream->ispdev;
55 	struct v4l2_pix_format_mplane output_fmt = stream->out_fmt;
56 	struct capture_fmt *output_isp_fmt = &stream->out_isp_fmt;
57 	struct ispsd_out_fmt *input_isp_fmt =
58 			rkisp_get_ispsd_out_fmt(&dev->isp_sdev);
59 	struct v4l2_rect in_y, in_c, out_y, out_c;
60 	u32 xsubs_in = 1, ysubs_in = 1;
61 	u32 xsubs_out = 1, ysubs_out = 1;
62 
63 	if (input_isp_fmt->fmt_type == FMT_BAYER)
64 		goto disable;
65 
66 	/* set input and output sizes for scale calculation */
67 	in_y.width = stream->dcrop.width;
68 	in_y.height = stream->dcrop.height;
69 	out_y.width = output_fmt.width;
70 	out_y.height = output_fmt.height;
71 
72 	/* The size of Cb,Cr are related to the format */
73 	if (rkisp_mbus_code_xysubs(input_isp_fmt->mbus_code, &xsubs_in, &ysubs_in)) {
74 		v4l2_err(&dev->v4l2_dev, "Not xsubs/ysubs found\n");
75 		return -EINVAL;
76 	}
77 	in_c.width = in_y.width / xsubs_in;
78 	in_c.height = in_y.height / ysubs_in;
79 
80 	if (output_isp_fmt->fmt_type == FMT_YUV) {
81 		rkisp_fcc_xysubs(output_isp_fmt->fourcc, &xsubs_out, &ysubs_out);
82 		out_c.width = out_y.width / xsubs_out;
83 		out_c.height = out_y.height / ysubs_out;
84 	} else {
85 		out_c.width = out_y.width / xsubs_in;
86 		out_c.height = out_y.height / ysubs_in;
87 	}
88 
89 	if (in_c.width == out_c.width && in_c.height == out_c.height)
90 		goto disable;
91 
92 	/* set RSZ input and output */
93 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
94 		 "stream %d rsz/scale: %dx%d -> %dx%d\n",
95 		 stream->id, stream->dcrop.width, stream->dcrop.height,
96 		 output_fmt.width, output_fmt.height);
97 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
98 		 "chroma scaling %dx%d -> %dx%d\n",
99 		 in_c.width, in_c.height, out_c.width, out_c.height);
100 
101 	/* calculate and set scale */
102 	rkisp_config_rsz(stream, &in_y, &in_c, &out_y, &out_c, async);
103 
104 	if (rkisp_debug)
105 		rkisp_dump_rsz_regs(stream);
106 
107 	return 0;
108 
109 disable:
110 	rkisp_disable_rsz(stream, async);
111 
112 	return 0;
113 }
114 
115 /***************************** stream operations*******************************/
116 
117 /*
118  * memory base addresses should be with respect
119  * to the burst alignment restriction for AXI.
120  */
calc_burst_len(struct rkisp_stream * stream)121 static u32 calc_burst_len(struct rkisp_stream *stream)
122 {
123 	struct rkisp_device *dev = stream->ispdev;
124 	u32 y_size = stream->out_fmt.plane_fmt[0].bytesperline *
125 		stream->out_fmt.height;
126 	u32 cb_size = stream->out_fmt.plane_fmt[1].sizeimage;
127 	u32 cr_size = stream->out_fmt.plane_fmt[2].sizeimage;
128 	u32 cb_offs, cr_offs;
129 	u32 bus, burst;
130 	int i;
131 
132 	/* MI128bit and MI64bit */
133 	bus = 8;
134 	if (dev->isp_ver == ISP_V12 ||
135 	    dev->isp_ver == ISP_V13)
136 		bus = 16;
137 
138 	/* y/c base addr: burstN * bus alignment */
139 	cb_offs = y_size;
140 	cr_offs = cr_size ? (cb_size + cb_offs) : 0;
141 
142 	if (!(cb_offs % (bus * 16)) &&
143 		!(cr_offs % (bus * 16)))
144 		burst = CIF_MI_CTRL_BURST_LEN_LUM_16 |
145 			CIF_MI_CTRL_BURST_LEN_CHROM_16;
146 	else if (!(cb_offs % (bus * 8)) &&
147 		!(cr_offs % (bus * 8)))
148 		burst = CIF_MI_CTRL_BURST_LEN_LUM_8 |
149 			CIF_MI_CTRL_BURST_LEN_CHROM_8;
150 	else
151 		burst = CIF_MI_CTRL_BURST_LEN_LUM_4 |
152 			CIF_MI_CTRL_BURST_LEN_CHROM_4;
153 
154 	if (cb_offs % (bus * 4) ||
155 		cr_offs % (bus * 4))
156 		v4l2_warn(&dev->v4l2_dev,
157 			"%dx%d fmt:0x%x not support, should be %d aligned\n",
158 			stream->out_fmt.width,
159 			stream->out_fmt.height,
160 			stream->out_fmt.pixelformat,
161 			(cr_offs == 0) ? bus * 4 : bus * 16);
162 
163 	stream->burst = burst;
164 	for (i = 0; i < RKISP_MAX_STREAM; i++)
165 		if (burst > dev->cap_dev.stream[i].burst)
166 			burst = dev->cap_dev.stream[i].burst;
167 
168 	if (stream->interlaced) {
169 		if (!stream->out_fmt.width % (bus * 16))
170 			stream->burst = CIF_MI_CTRL_BURST_LEN_LUM_16 |
171 				CIF_MI_CTRL_BURST_LEN_CHROM_16;
172 		else if (!stream->out_fmt.width % (bus * 8))
173 			stream->burst = CIF_MI_CTRL_BURST_LEN_LUM_8 |
174 				CIF_MI_CTRL_BURST_LEN_CHROM_8;
175 		else
176 			stream->burst = CIF_MI_CTRL_BURST_LEN_LUM_4 |
177 				CIF_MI_CTRL_BURST_LEN_CHROM_4;
178 		if (stream->out_fmt.width % (bus * 4))
179 			v4l2_warn(&dev->v4l2_dev,
180 				"interlaced: width should be %d aligned\n",
181 				bus * 4);
182 		burst = min(stream->burst, burst);
183 		stream->burst = burst;
184 	}
185 
186 	return burst;
187 }
188 
189 /*
190  * configure memory interface for mainpath
191  * This should only be called when stream-on
192  */
mp_config_mi(struct rkisp_stream * stream)193 static int mp_config_mi(struct rkisp_stream *stream)
194 {
195 	void __iomem *base = stream->ispdev->base_addr;
196 
197        /*
198 	* NOTE: plane_fmt[0].sizeimage is total size of all planes for single
199 	* memory plane formats, so calculate the size explicitly.
200 	*/
201 	mi_set_y_size(stream, stream->out_fmt.plane_fmt[0].bytesperline *
202 			 stream->out_fmt.height);
203 	mi_set_cb_size(stream, stream->out_fmt.plane_fmt[1].sizeimage);
204 	mi_set_cr_size(stream, stream->out_fmt.plane_fmt[2].sizeimage);
205 	mi_frame_end_int_enable(stream);
206 	if (stream->out_isp_fmt.uv_swap)
207 		mp_set_uv_swap(base);
208 
209 	config_mi_ctrl(stream, calc_burst_len(stream));
210 	mp_mi_ctrl_set_format(base, stream->out_isp_fmt.write_format);
211 	mp_mi_ctrl_autoupdate_en(base);
212 
213 	/* set up first buffer */
214 	mi_frame_end(stream);
215 	return 0;
216 }
217 
mbus_code_sp_in_fmt(u32 in_mbus_code,u32 out_fourcc,u32 * format)218 static int mbus_code_sp_in_fmt(u32 in_mbus_code, u32 out_fourcc, u32 *format)
219 {
220 	switch (in_mbus_code) {
221 	case MEDIA_BUS_FMT_YUYV8_2X8:
222 		*format = MI_CTRL_SP_INPUT_YUV422;
223 		break;
224 	default:
225 		return -EINVAL;
226 	}
227 
228 	/*
229 	 * Only SP can support output format of YCbCr4:0:0,
230 	 * and the input format of SP must be YCbCr4:0:0
231 	 * when outputting YCbCr4:0:0.
232 	 * The output format of isp is YCbCr4:2:2,
233 	 * so the CbCr data is discarded here.
234 	 */
235 	if (out_fourcc == V4L2_PIX_FMT_GREY)
236 		*format = MI_CTRL_SP_INPUT_YUV400;
237 
238 	return 0;
239 }
240 
241 /*
242  * configure memory interface for selfpath
243  * This should only be called when stream-on
244  */
sp_config_mi(struct rkisp_stream * stream)245 static int sp_config_mi(struct rkisp_stream *stream)
246 {
247 	void __iomem *base = stream->ispdev->base_addr;
248 	struct rkisp_device *dev = stream->ispdev;
249 	struct capture_fmt *output_isp_fmt = &stream->out_isp_fmt;
250 	struct ispsd_out_fmt *input_isp_fmt =
251 			rkisp_get_ispsd_out_fmt(&dev->isp_sdev);
252 	u32 sp_in_fmt;
253 
254 	if (mbus_code_sp_in_fmt(input_isp_fmt->mbus_code,
255 				output_isp_fmt->fourcc, &sp_in_fmt)) {
256 		v4l2_err(&dev->v4l2_dev, "Can't find the input format\n");
257 		return -EINVAL;
258 	}
259 
260        /*
261 	* NOTE: plane_fmt[0].sizeimage is total size of all planes for single
262 	* memory plane formats, so calculate the size explicitly.
263 	*/
264 	mi_set_y_size(stream, stream->out_fmt.plane_fmt[0].bytesperline *
265 		      stream->out_fmt.height);
266 	mi_set_cb_size(stream, stream->out_fmt.plane_fmt[1].sizeimage);
267 	mi_set_cr_size(stream, stream->out_fmt.plane_fmt[2].sizeimage);
268 
269 	sp_set_y_width(base, stream->out_fmt.width);
270 	if (stream->interlaced) {
271 		stream->u.sp.vir_offs =
272 			stream->out_fmt.plane_fmt[0].bytesperline;
273 		sp_set_y_height(base, stream->out_fmt.height / 2);
274 		sp_set_y_line_length(base, stream->u.sp.y_stride * 2);
275 	} else {
276 		sp_set_y_height(base, stream->out_fmt.height);
277 		sp_set_y_line_length(base, stream->u.sp.y_stride);
278 	}
279 
280 	mi_frame_end_int_enable(stream);
281 	if (output_isp_fmt->uv_swap)
282 		sp_set_uv_swap(base);
283 
284 	config_mi_ctrl(stream, calc_burst_len(stream));
285 	sp_mi_ctrl_set_format(base, stream->out_isp_fmt.write_format |
286 			      sp_in_fmt | output_isp_fmt->output_format);
287 
288 	sp_mi_ctrl_autoupdate_en(base);
289 
290 	/* set up first buffer */
291 	mi_frame_end(stream);
292 	return 0;
293 }
294 
mp_enable_mi(struct rkisp_stream * stream)295 static void mp_enable_mi(struct rkisp_stream *stream)
296 {
297 	void __iomem *base = stream->ispdev->base_addr;
298 	struct capture_fmt *isp_fmt = &stream->out_isp_fmt;
299 
300 	mi_ctrl_mp_disable(base);
301 	if (isp_fmt->fmt_type == FMT_BAYER)
302 		mi_ctrl_mpraw_enable(base);
303 	else if (isp_fmt->fmt_type == FMT_YUV)
304 		mi_ctrl_mpyuv_enable(base);
305 }
306 
sp_enable_mi(struct rkisp_stream * stream)307 static void sp_enable_mi(struct rkisp_stream *stream)
308 {
309 	void __iomem *base = stream->ispdev->base_addr;
310 
311 	mi_ctrl_spyuv_enable(base);
312 }
313 
mp_disable_mi(struct rkisp_stream * stream)314 static void mp_disable_mi(struct rkisp_stream *stream)
315 {
316 	struct rkisp_device *dev = stream->ispdev;
317 	void __iomem *base = dev->base_addr;
318 
319 	mi_ctrl_mp_disable(base);
320 }
321 
sp_disable_mi(struct rkisp_stream * stream)322 static void sp_disable_mi(struct rkisp_stream *stream)
323 {
324 	void __iomem *base = stream->ispdev->base_addr;
325 
326 	mi_ctrl_spyuv_disable(base);
327 }
328 
329 /* Update buffer info to memory interface, it's called in interrupt */
update_mi(struct rkisp_stream * stream)330 static void update_mi(struct rkisp_stream *stream)
331 {
332 	struct rkisp_dummy_buffer *dummy_buf = &stream->ispdev->hw_dev->dummy_buf;
333 	void __iomem *base = stream->ispdev->base_addr;
334 
335 	/* The dummy space allocated by dma_alloc_coherent is used, we can
336 	 * throw data to it if there is no available buffer.
337 	 */
338 	if (stream->next_buf) {
339 		mi_set_y_addr(stream,
340 			stream->next_buf->buff_addr[RKISP_PLANE_Y]);
341 		mi_set_cb_addr(stream,
342 			stream->next_buf->buff_addr[RKISP_PLANE_CB]);
343 		mi_set_cr_addr(stream,
344 			stream->next_buf->buff_addr[RKISP_PLANE_CR]);
345 	} else if (dummy_buf->mem_priv) {
346 		mi_set_y_addr(stream, dummy_buf->dma_addr);
347 		mi_set_cb_addr(stream, dummy_buf->dma_addr);
348 		mi_set_cr_addr(stream, dummy_buf->dma_addr);
349 	}
350 
351 	mi_set_y_offset(stream, 0);
352 	mi_set_cb_offset(stream, 0);
353 	mi_set_cr_offset(stream, 0);
354 	v4l2_dbg(2, rkisp_debug, &stream->ispdev->v4l2_dev,
355 		 "%s stream:%d Y:0x%x CB:0x%x CR:0x%x\n",
356 		 __func__, stream->id,
357 		 readl(base + stream->config->mi.y_base_ad_init),
358 		 readl(base + stream->config->mi.cb_base_ad_init),
359 		 readl(base + stream->config->mi.cr_base_ad_init));
360 }
361 
mp_stop_mi(struct rkisp_stream * stream)362 static void mp_stop_mi(struct rkisp_stream *stream)
363 {
364 	if (!stream->streaming)
365 		return;
366 	mi_frame_end_int_clear(stream);
367 	stream->ops->disable_mi(stream);
368 }
369 
sp_stop_mi(struct rkisp_stream * stream)370 static void sp_stop_mi(struct rkisp_stream *stream)
371 {
372 	if (!stream->streaming)
373 		return;
374 	mi_frame_end_int_clear(stream);
375 	stream->ops->disable_mi(stream);
376 }
377 
378 static struct streams_ops rkisp_mp_streams_ops = {
379 	.config_mi = mp_config_mi,
380 	.enable_mi = mp_enable_mi,
381 	.disable_mi = mp_disable_mi,
382 	.stop_mi = mp_stop_mi,
383 	.set_data_path = stream_data_path,
384 	.is_stream_stopped = mp_is_stream_stopped,
385 	.update_mi = update_mi,
386 	.frame_end = mi_frame_end,
387 };
388 
389 static struct streams_ops rkisp_sp_streams_ops = {
390 	.config_mi = sp_config_mi,
391 	.enable_mi = sp_enable_mi,
392 	.disable_mi = sp_disable_mi,
393 	.stop_mi = sp_stop_mi,
394 	.set_data_path = stream_data_path,
395 	.is_stream_stopped = sp_is_stream_stopped,
396 	.update_mi = update_mi,
397 	.frame_end = mi_frame_end,
398 };
399 
400 /*
401  * This function is called when a frame end come. The next frame
402  * is processing and we should set up buffer for next-next frame,
403  * otherwise it will overflow.
404  */
mi_frame_end(struct rkisp_stream * stream)405 static int mi_frame_end(struct rkisp_stream *stream)
406 {
407 	struct rkisp_device *dev = stream->ispdev;
408 	struct capture_fmt *isp_fmt = &stream->out_isp_fmt;
409 	bool interlaced = stream->interlaced;
410 	unsigned long lock_flags = 0;
411 	int i = 0;
412 
413 	if (stream->curr_buf &&
414 	    (!interlaced ||
415 	     (stream->u.sp.field_rec == RKISP_FIELD_ODD &&
416 	      stream->u.sp.field == RKISP_FIELD_EVEN))) {
417 		u64 ns = ktime_get_ns();
418 
419 		/* Dequeue a filled buffer */
420 		for (i = 0; i < isp_fmt->mplanes; i++) {
421 			u32 payload_size = stream->out_fmt.plane_fmt[i].sizeimage;
422 
423 			vb2_set_plane_payload(&stream->curr_buf->vb.vb2_buf, i, payload_size);
424 		}
425 		stream->curr_buf->vb.sequence = atomic_read(&dev->isp_sdev.frm_sync_seq) - 1;
426 		stream->curr_buf->vb.vb2_buf.timestamp = ns;
427 		vb2_buffer_done(&stream->curr_buf->vb.vb2_buf,
428 				VB2_BUF_STATE_DONE);
429 		stream->curr_buf = NULL;
430 	}
431 
432 	if (!interlaced ||
433 		(stream->curr_buf == stream->next_buf &&
434 		stream->u.sp.field == RKISP_FIELD_ODD)) {
435 		/* Next frame is writing to it
436 		 * Interlaced: odd field next buffer address
437 		 */
438 		stream->curr_buf = stream->next_buf;
439 		stream->next_buf = NULL;
440 
441 		/* Set up an empty buffer for the next-next frame */
442 		spin_lock_irqsave(&stream->vbq_lock, lock_flags);
443 		if (!list_empty(&stream->buf_queue)) {
444 			stream->next_buf =
445 				list_first_entry(&stream->buf_queue,
446 						 struct rkisp_buffer,
447 						 queue);
448 			list_del(&stream->next_buf->queue);
449 		}
450 		spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
451 	} else if (stream->u.sp.field_rec == RKISP_FIELD_ODD &&
452 		stream->u.sp.field == RKISP_FIELD_EVEN) {
453 		/* Interlaced: event field next buffer address */
454 		if (stream->next_buf) {
455 			stream->next_buf->buff_addr[RKISP_PLANE_Y] +=
456 				stream->u.sp.vir_offs;
457 			stream->next_buf->buff_addr[RKISP_PLANE_CB] +=
458 				stream->u.sp.vir_offs;
459 			stream->next_buf->buff_addr[RKISP_PLANE_CR] +=
460 				stream->u.sp.vir_offs;
461 		}
462 		stream->curr_buf = stream->next_buf;
463 	}
464 
465 	stream->ops->update_mi(stream);
466 
467 	if (interlaced)
468 		stream->u.sp.field_rec = stream->u.sp.field;
469 
470 	return 0;
471 }
472 
473 /***************************** vb2 operations*******************************/
474 
475 /*
476  * Set flags and wait, it should stop in interrupt.
477  * If it didn't, stop it by force.
478  */
rkisp_stream_stop(struct rkisp_stream * stream)479 static void rkisp_stream_stop(struct rkisp_stream *stream)
480 {
481 	struct rkisp_device *dev = stream->ispdev;
482 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
483 	int ret = 0;
484 
485 	stream->stopping = true;
486 	stream->ops->stop_mi(stream);
487 	if ((dev->isp_state & ISP_START) &&
488 	    dev->isp_inp != INP_DMARX_ISP) {
489 		ret = wait_event_timeout(stream->done,
490 					 !stream->streaming,
491 					 msecs_to_jiffies(1000));
492 		if (!ret)
493 			v4l2_warn(v4l2_dev, "%s id:%d timeout\n",
494 				  __func__, stream->id);
495 	}
496 
497 	stream->stopping = false;
498 	stream->streaming = false;
499 
500 	if (stream->id == RKISP_STREAM_MP ||
501 	    stream->id == RKISP_STREAM_SP) {
502 		rkisp_disable_dcrop(stream, true);
503 		rkisp_disable_rsz(stream, true);
504 	}
505 
506 	stream->burst =
507 		CIF_MI_CTRL_BURST_LEN_LUM_16 |
508 		CIF_MI_CTRL_BURST_LEN_CHROM_16;
509 	stream->interlaced = false;
510 }
511 
512 /*
513  * Most of registers inside rockchip isp1 have shadow register since
514  * they must be not changed during processing a frame.
515  * Usually, each sub-module updates its shadow register after
516  * processing the last pixel of a frame.
517  */
rkisp_start(struct rkisp_stream * stream)518 static int rkisp_start(struct rkisp_stream *stream)
519 {
520 	int ret;
521 
522 	if (stream->ops->set_data_path)
523 		stream->ops->set_data_path(stream);
524 	ret = stream->ops->config_mi(stream);
525 	if (ret)
526 		return ret;
527 
528 	stream->ops->enable_mi(stream);
529 	stream->streaming = true;
530 
531 	return 0;
532 }
533 
rkisp_queue_setup(struct vb2_queue * queue,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])534 static int rkisp_queue_setup(struct vb2_queue *queue,
535 			      unsigned int *num_buffers,
536 			      unsigned int *num_planes,
537 			      unsigned int sizes[],
538 			      struct device *alloc_ctxs[])
539 {
540 	struct rkisp_stream *stream = queue->drv_priv;
541 	struct rkisp_device *dev = stream->ispdev;
542 	const struct v4l2_pix_format_mplane *pixm = NULL;
543 	const struct capture_fmt *isp_fmt = NULL;
544 	u32 i;
545 
546 	pixm = &stream->out_fmt;
547 	isp_fmt = &stream->out_isp_fmt;
548 	*num_planes = isp_fmt->mplanes;
549 
550 	for (i = 0; i < isp_fmt->mplanes; i++) {
551 		const struct v4l2_plane_pix_format *plane_fmt;
552 
553 		plane_fmt = &pixm->plane_fmt[i];
554 		/* height to align with 16 when allocating memory
555 		 * so that Rockchip encoder can use DMA buffer directly
556 		 */
557 		sizes[i] = (isp_fmt->fmt_type == FMT_YUV) ?
558 			plane_fmt->sizeimage / pixm->height *
559 			ALIGN(pixm->height, 16) :
560 			plane_fmt->sizeimage;
561 	}
562 
563 	rkisp_chk_tb_over(dev);
564 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev, "%s count %d, size %d\n",
565 		 v4l2_type_names[queue->type], *num_buffers, sizes[0]);
566 
567 	return rkisp_create_dummy_buf(stream);
568 }
569 
570 /*
571  * The vb2_buffer are stored in rkisp_buffer, in order to unify
572  * mplane buffer and none-mplane buffer.
573  */
rkisp_buf_queue(struct vb2_buffer * vb)574 static void rkisp_buf_queue(struct vb2_buffer *vb)
575 {
576 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
577 	struct rkisp_buffer *ispbuf = to_rkisp_buffer(vbuf);
578 	struct vb2_queue *queue = vb->vb2_queue;
579 	struct rkisp_stream *stream = queue->drv_priv;
580 	unsigned long lock_flags = 0;
581 	struct v4l2_pix_format_mplane *pixm = &stream->out_fmt;
582 	struct capture_fmt *isp_fmt = &stream->out_isp_fmt;
583 	struct sg_table *sgt;
584 	int i;
585 
586 	memset(ispbuf->buff_addr, 0, sizeof(ispbuf->buff_addr));
587 	for (i = 0; i < isp_fmt->mplanes; i++) {
588 		vb2_plane_vaddr(vb, i);
589 		if (stream->ispdev->hw_dev->is_dma_sg_ops) {
590 			sgt = vb2_dma_sg_plane_desc(vb, i);
591 			ispbuf->buff_addr[i] = sg_dma_address(sgt->sgl);
592 		} else {
593 			ispbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
594 		}
595 	}
596 	/*
597 	 * NOTE: plane_fmt[0].sizeimage is total size of all planes for single
598 	 * memory plane formats, so calculate the size explicitly.
599 	 */
600 	if (isp_fmt->mplanes == 1) {
601 		for (i = 0; i < isp_fmt->cplanes - 1; i++) {
602 			ispbuf->buff_addr[i + 1] = (i == 0) ?
603 				ispbuf->buff_addr[i] +
604 				pixm->plane_fmt[i].bytesperline *
605 				pixm->height :
606 				ispbuf->buff_addr[i] +
607 				pixm->plane_fmt[i].sizeimage;
608 		}
609 	}
610 
611 	v4l2_dbg(2, rkisp_debug, &stream->ispdev->v4l2_dev,
612 		 "stream:%d queue buf:0x%x\n",
613 		 stream->id, ispbuf->buff_addr[0]);
614 
615 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
616 	list_add_tail(&ispbuf->queue, &stream->buf_queue);
617 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
618 }
619 
rkisp_create_dummy_buf(struct rkisp_stream * stream)620 static int rkisp_create_dummy_buf(struct rkisp_stream *stream)
621 {
622 	return rkisp_alloc_common_dummy_buf(stream->ispdev);
623 }
624 
rkisp_destroy_dummy_buf(struct rkisp_stream * stream)625 static void rkisp_destroy_dummy_buf(struct rkisp_stream *stream)
626 {
627 	struct rkisp_device *dev = stream->ispdev;
628 
629 	rkisp_free_common_dummy_buf(dev);
630 }
631 
destroy_buf_queue(struct rkisp_stream * stream,enum vb2_buffer_state state)632 static void destroy_buf_queue(struct rkisp_stream *stream,
633 			      enum vb2_buffer_state state)
634 {
635 	unsigned long lock_flags = 0;
636 	struct rkisp_buffer *buf;
637 
638 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
639 	if (stream->curr_buf) {
640 		list_add_tail(&stream->curr_buf->queue, &stream->buf_queue);
641 		if (stream->curr_buf == stream->next_buf)
642 			stream->next_buf = NULL;
643 		stream->curr_buf = NULL;
644 	}
645 	if (stream->next_buf) {
646 		list_add_tail(&stream->next_buf->queue, &stream->buf_queue);
647 		stream->next_buf = NULL;
648 	}
649 	while (!list_empty(&stream->buf_queue)) {
650 		buf = list_first_entry(&stream->buf_queue,
651 			struct rkisp_buffer, queue);
652 		list_del(&buf->queue);
653 		vb2_buffer_done(&buf->vb.vb2_buf, state);
654 	}
655 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
656 }
657 
rkisp_stop_streaming(struct vb2_queue * queue)658 static void rkisp_stop_streaming(struct vb2_queue *queue)
659 {
660 	struct rkisp_stream *stream = queue->drv_priv;
661 	struct rkisp_vdev_node *node = &stream->vnode;
662 	struct rkisp_device *dev = stream->ispdev;
663 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
664 	int ret;
665 
666 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
667 		 "%s %d\n", __func__, stream->id);
668 
669 	if (!stream->streaming)
670 		return;
671 
672 	rkisp_stream_stop(stream);
673 	media_pipeline_stop(&node->vdev.entity);
674 	ret = dev->pipe.set_stream(&dev->pipe, false);
675 	if (ret < 0)
676 		v4l2_err(v4l2_dev,
677 			 "pipeline stream-off failed:%d\n", ret);
678 
679 	/* release buffers */
680 	destroy_buf_queue(stream, VB2_BUF_STATE_ERROR);
681 
682 	ret = dev->pipe.close(&dev->pipe);
683 	if (ret < 0)
684 		v4l2_err(v4l2_dev, "pipeline close failed error:%d\n", ret);
685 	rkisp_destroy_dummy_buf(stream);
686 	atomic_dec(&dev->cap_dev.refcnt);
687 }
688 
rkisp_stream_start(struct rkisp_stream * stream)689 static int rkisp_stream_start(struct rkisp_stream *stream)
690 {
691 	struct v4l2_device *v4l2_dev = &stream->ispdev->v4l2_dev;
692 	struct rkisp_device *dev = stream->ispdev;
693 	struct rkisp_stream *other = &dev->cap_dev.stream[stream->id ^ 1];
694 	bool async = false;
695 	int ret;
696 
697 	if (other->streaming)
698 		async = true;
699 
700 	ret = rkisp_stream_config_rsz(stream, async);
701 	if (ret < 0) {
702 		v4l2_err(v4l2_dev, "config rsz failed with error %d\n", ret);
703 		return ret;
704 	}
705 
706 	/*
707 	 * can't be async now, otherwise the latter started stream fails to
708 	 * produce mi interrupt.
709 	 */
710 	ret = rkisp_stream_config_dcrop(stream, false);
711 	if (ret < 0) {
712 		v4l2_err(v4l2_dev, "config dcrop failed with error %d\n", ret);
713 		return ret;
714 	}
715 
716 	return rkisp_start(stream);
717 }
718 
719 static int
rkisp_start_streaming(struct vb2_queue * queue,unsigned int count)720 rkisp_start_streaming(struct vb2_queue *queue, unsigned int count)
721 {
722 	struct rkisp_stream *stream = queue->drv_priv;
723 	struct rkisp_vdev_node *node = &stream->vnode;
724 	struct rkisp_device *dev = stream->ispdev;
725 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
726 	int ret = -1;
727 
728 	v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
729 		 "%s %d\n", __func__, stream->id);
730 
731 	if (WARN_ON(stream->streaming))
732 		return -EBUSY;
733 
734 	atomic_inc(&dev->cap_dev.refcnt);
735 	if (!dev->isp_inp || !stream->linked) {
736 		v4l2_err(v4l2_dev, "check video link or isp input\n");
737 		goto buffer_done;
738 	}
739 
740 	if (atomic_read(&dev->cap_dev.refcnt) == 1 &&
741 	    (dev->isp_inp & INP_CSI || dev->isp_inp & INP_DVP)) {
742 		/* update sensor info when first streaming */
743 		ret = rkisp_update_sensor_info(dev);
744 		if (ret < 0) {
745 			v4l2_err(v4l2_dev,
746 				 "update sensor info failed %d\n",
747 				 ret);
748 			goto buffer_done;
749 		}
750 	}
751 
752 	if (dev->active_sensor &&
753 		dev->active_sensor->fmt[0].format.field ==
754 		V4L2_FIELD_INTERLACED) {
755 		if (stream->id != RKISP_STREAM_SP) {
756 			v4l2_err(v4l2_dev,
757 				"only selfpath support interlaced\n");
758 			ret = -EINVAL;
759 			goto buffer_done;
760 		}
761 		stream->interlaced = true;
762 		stream->u.sp.field = RKISP_FIELD_INVAL;
763 		stream->u.sp.field_rec = RKISP_FIELD_INVAL;
764 	}
765 
766 	/* enable clocks/power-domains */
767 	ret = dev->pipe.open(&dev->pipe, &node->vdev.entity, true);
768 	if (ret < 0) {
769 		v4l2_err(v4l2_dev, "open cif pipeline failed %d\n", ret);
770 		goto destroy_dummy_buf;
771 	}
772 
773 	/* configure stream hardware to start */
774 	ret = rkisp_stream_start(stream);
775 	if (ret < 0) {
776 		v4l2_err(v4l2_dev, "start streaming failed\n");
777 		goto close_pipe;
778 	}
779 
780 	/* start sub-devices */
781 	ret = dev->pipe.set_stream(&dev->pipe, true);
782 	if (ret < 0)
783 		goto stop_stream;
784 
785 	ret = media_pipeline_start(&node->vdev.entity, &dev->pipe.pipe);
786 	if (ret < 0) {
787 		v4l2_err(&dev->v4l2_dev,
788 			 "start pipeline failed %d\n", ret);
789 		goto pipe_stream_off;
790 	}
791 
792 	return 0;
793 
794 pipe_stream_off:
795 	dev->pipe.set_stream(&dev->pipe, false);
796 stop_stream:
797 	rkisp_stream_stop(stream);
798 close_pipe:
799 	dev->pipe.close(&dev->pipe);
800 destroy_dummy_buf:
801 	rkisp_destroy_dummy_buf(stream);
802 buffer_done:
803 	destroy_buf_queue(stream, VB2_BUF_STATE_QUEUED);
804 	atomic_dec(&dev->cap_dev.refcnt);
805 	stream->streaming = false;
806 	return ret;
807 }
808 
809 static struct vb2_ops rkisp_vb2_ops = {
810 	.queue_setup = rkisp_queue_setup,
811 	.buf_queue = rkisp_buf_queue,
812 	.wait_prepare = vb2_ops_wait_prepare,
813 	.wait_finish = vb2_ops_wait_finish,
814 	.stop_streaming = rkisp_stop_streaming,
815 	.start_streaming = rkisp_start_streaming,
816 };
817 
rkisp_init_vb2_queue(struct vb2_queue * q,struct rkisp_stream * stream,enum v4l2_buf_type buf_type)818 static int rkisp_init_vb2_queue(struct vb2_queue *q,
819 				struct rkisp_stream *stream,
820 				enum v4l2_buf_type buf_type)
821 {
822 	q->type = buf_type;
823 	q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
824 	q->drv_priv = stream;
825 	q->ops = &rkisp_vb2_ops;
826 	q->mem_ops = stream->ispdev->hw_dev->mem_ops;
827 	q->buf_struct_size = sizeof(struct rkisp_buffer);
828 	q->min_buffers_needed = CIF_ISP_REQ_BUFS_MIN;
829 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
830 	q->lock = &stream->ispdev->apilock;
831 	q->dev = stream->ispdev->hw_dev->dev;
832 	q->allow_cache_hints = 1;
833 	if (stream->ispdev->hw_dev->is_dma_contig)
834 		q->dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
835 	q->gfp_flags = GFP_DMA32;
836 	return vb2_queue_init(q);
837 }
838 
rkisp_stream_init(struct rkisp_device * dev,u32 id)839 static int rkisp_stream_init(struct rkisp_device *dev, u32 id)
840 {
841 	struct rkisp_capture_device *cap_dev = &dev->cap_dev;
842 	struct rkisp_stream *stream;
843 	struct video_device *vdev;
844 	struct rkisp_vdev_node *node;
845 	int ret = 0;
846 
847 	stream = &cap_dev->stream[id];
848 	stream->id = id;
849 	stream->ispdev = dev;
850 	vdev = &stream->vnode.vdev;
851 
852 	INIT_LIST_HEAD(&stream->buf_queue);
853 	init_waitqueue_head(&stream->done);
854 	spin_lock_init(&stream->vbq_lock);
855 
856 	stream->linked = true;
857 	switch (id) {
858 	case RKISP_STREAM_SP:
859 		strlcpy(vdev->name, SP_VDEV_NAME,
860 			sizeof(vdev->name));
861 		stream->ops = &rkisp_sp_streams_ops;
862 		stream->config = &rkisp_sp_stream_config;
863 		break;
864 	default:
865 		strlcpy(vdev->name, MP_VDEV_NAME,
866 			sizeof(vdev->name));
867 		stream->ops = &rkisp_mp_streams_ops;
868 		stream->config = &rkisp_mp_stream_config;
869 	}
870 
871 	node = vdev_to_node(vdev);
872 	rkisp_init_vb2_queue(&node->buf_queue, stream,
873 			     V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
874 	ret = rkisp_register_stream_vdev(stream);
875 	if (ret < 0)
876 		return ret;
877 
878 	stream->streaming = false;
879 	stream->interlaced = false;
880 	stream->burst =
881 		CIF_MI_CTRL_BURST_LEN_LUM_16 |
882 		CIF_MI_CTRL_BURST_LEN_CHROM_16;
883 	atomic_set(&stream->sequence, 0);
884 	return 0;
885 }
886 
rkisp_register_stream_v1x(struct rkisp_device * dev)887 int rkisp_register_stream_v1x(struct rkisp_device *dev)
888 {
889 	struct rkisp_capture_device *cap_dev = &dev->cap_dev;
890 	int ret;
891 
892 	ret = rkisp_stream_init(dev, RKISP_STREAM_MP);
893 	if (ret < 0)
894 		goto err;
895 	if (dev->isp_ver != ISP_V10_1) {
896 		ret = rkisp_stream_init(dev, RKISP_STREAM_SP);
897 		if (ret < 0)
898 			goto err_free_mp;
899 	}
900 
901 	return 0;
902 err_free_mp:
903 	rkisp_unregister_stream_vdev(&cap_dev->stream[RKISP_STREAM_MP]);
904 err:
905 	return ret;
906 }
907 
rkisp_unregister_stream_v1x(struct rkisp_device * dev)908 void rkisp_unregister_stream_v1x(struct rkisp_device *dev)
909 {
910 	struct rkisp_capture_device *cap_dev = &dev->cap_dev;
911 	struct rkisp_stream *stream;
912 
913 	stream = &cap_dev->stream[RKISP_STREAM_MP];
914 	rkisp_unregister_stream_vdev(stream);
915 	if (dev->isp_ver != ISP_V10_1) {
916 		stream = &cap_dev->stream[RKISP_STREAM_SP];
917 		rkisp_unregister_stream_vdev(stream);
918 	}
919 }
920 
rkisp_mi_v1x_isr(u32 mis_val,struct rkisp_device * dev)921 void rkisp_mi_v1x_isr(u32 mis_val, struct rkisp_device *dev)
922 {
923 	unsigned int i;
924 
925 	v4l2_dbg(3, rkisp_debug, &dev->v4l2_dev,
926 		 "mi isr:0x%x\n", mis_val);
927 
928 	if (mis_val & CIF_MI_DMA_READY)
929 		rkisp_dmarx_isr(mis_val, dev);
930 
931 	for (i = 0; i < RKISP_MAX_STREAM; ++i) {
932 		struct rkisp_stream *stream = &dev->cap_dev.stream[i];
933 
934 		if (!(mis_val & CIF_MI_FRAME(stream)))
935 			continue;
936 
937 		mi_frame_end_int_clear(stream);
938 
939 		if (stream->stopping) {
940 			/*
941 			 * Make sure stream is actually stopped, whose state
942 			 * can be read from the shadow register, before
943 			 * wake_up() thread which would immediately free all
944 			 * frame buffers. stop_mi() takes effect at the next
945 			 * frame end that sync the configurations to shadow
946 			 * regs.
947 			 */
948 			if (stream->ops->is_stream_stopped(dev->base_addr)) {
949 				stream->stopping = false;
950 				stream->streaming = false;
951 				wake_up(&stream->done);
952 			}
953 		} else {
954 			mi_frame_end(stream);
955 		}
956 	}
957 }
958