• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Rockchip Electronics Co., Ltd. */
3 
4 #include <linux/kfifo.h>
5 #include <media/v4l2-common.h>
6 #include <media/v4l2-ioctl.h>
7 #include <media/videobuf2-core.h>
8 #include <media/videobuf2-vmalloc.h>
9 #include <media/videobuf2-dma-contig.h>
10 #include <media/videobuf2-dma-sg.h>
11 #include <linux/of.h>
12 #include <linux/of_gpio.h>
13 #include <linux/of_graph.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_reserved_mem.h>
16 #include <media/v4l2-event.h>
17 #include "dev.h"
18 #include "regs.h"
19 #include "mipi-csi2.h"
20 #include <media/v4l2-fwnode.h>
21 #include <linux/pm_runtime.h>
22 
23 #define MEMORY_ALIGN_ROUND_UP_HEIGHT		16
24 
25 #define SCALE_MIN_WIDTH		4
26 #define SCALE_MIN_HEIGHT	4
27 #define SCALE_OUTPUT_STEP_WISE	1
28 #define CIF_SCALE_REQ_BUFS_MIN	3
29 
30 static const struct cif_output_fmt scale_out_fmts[] = {
31 	{
32 		.fourcc = V4L2_PIX_FMT_SRGGB16,
33 		.cplanes = 1,
34 		.mplanes = 1,
35 		.bpp = { 16 },
36 		.raw_bpp = 16,
37 		.fmt_type = CIF_FMT_TYPE_RAW,
38 	}, {
39 		.fourcc = V4L2_PIX_FMT_SGRBG16,
40 		.cplanes = 1,
41 		.mplanes = 1,
42 		.bpp = { 16 },
43 		.raw_bpp = 16,
44 		.fmt_type = CIF_FMT_TYPE_RAW,
45 	}, {
46 		.fourcc = V4L2_PIX_FMT_SGBRG16,
47 		.cplanes = 1,
48 		.mplanes = 1,
49 		.bpp = { 16 },
50 		.raw_bpp = 16,
51 		.fmt_type = CIF_FMT_TYPE_RAW,
52 	}, {
53 		.fourcc = V4L2_PIX_FMT_SBGGR16,
54 		.cplanes = 1,
55 		.mplanes = 1,
56 		.bpp = { 16 },
57 		.raw_bpp = 16,
58 		.fmt_type = CIF_FMT_TYPE_RAW,
59 	}
60 };
61 
rkcif_scale_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)62 static int rkcif_scale_enum_fmt_vid_cap(struct file *file, void *priv,
63 					struct v4l2_fmtdesc *f)
64 {
65 	const struct cif_output_fmt *fmt = NULL;
66 
67 	if (f->index >= ARRAY_SIZE(scale_out_fmts))
68 		return -EINVAL;
69 	fmt = &scale_out_fmts[f->index];
70 	f->pixelformat = fmt->fourcc;
71 	return 0;
72 }
73 
rkcif_scale_g_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)74 static int rkcif_scale_g_fmt_vid_cap_mplane(struct file *file, void *priv,
75 					    struct v4l2_format *f)
76 {
77 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
78 
79 	f->fmt.pix_mp = scale_vdev->pixm;
80 	return 0;
81 }
82 
rkcif_scale_align_bits_per_pixel(struct rkcif_device * cif_dev,const struct cif_output_fmt * fmt,int plane_index)83 static u32 rkcif_scale_align_bits_per_pixel(struct rkcif_device *cif_dev,
84 					    const struct cif_output_fmt *fmt,
85 					    int plane_index)
86 {
87 	u32 bpp = 0, i;
88 
89 	if (fmt) {
90 		switch (fmt->fourcc) {
91 		case V4L2_PIX_FMT_SBGGR16:
92 		case V4L2_PIX_FMT_SGBRG16:
93 		case V4L2_PIX_FMT_SGRBG16:
94 		case V4L2_PIX_FMT_SRGGB16:
95 			bpp = max(fmt->bpp[plane_index], (u8)CIF_RAW_STORED_BIT_WIDTH_RV1126);
96 			for (i = 1; i < 5; i++) {
97 				if (i * CIF_RAW_STORED_BIT_WIDTH_RV1126 >= bpp) {
98 					bpp = i * CIF_RAW_STORED_BIT_WIDTH_RV1126;
99 					break;
100 				}
101 			}
102 			break;
103 		default:
104 			v4l2_err(&cif_dev->v4l2_dev, "fourcc: %d is not supported!\n",
105 				 fmt->fourcc);
106 			break;
107 		}
108 	}
109 
110 	return bpp;
111 }
112 
113 
114 static const struct
find_output_fmt(u32 pixelfmt)115 cif_output_fmt *find_output_fmt(u32 pixelfmt)
116 {
117 	const struct cif_output_fmt *fmt;
118 	u32 i;
119 
120 	for (i = 0; i < ARRAY_SIZE(scale_out_fmts); i++) {
121 		fmt = &scale_out_fmts[i];
122 		if (fmt->fourcc == pixelfmt)
123 			return fmt;
124 	}
125 
126 	return NULL;
127 }
128 
rkcif_scale_set_fmt(struct rkcif_scale_vdev * scale_vdev,struct v4l2_pix_format_mplane * pixm,bool try)129 static int rkcif_scale_set_fmt(struct rkcif_scale_vdev *scale_vdev,
130 			       struct v4l2_pix_format_mplane *pixm,
131 			       bool try)
132 {
133 	struct rkcif_stream *stream = scale_vdev->stream;
134 	struct rkcif_device *cif_dev = scale_vdev->cifdev;
135 	struct v4l2_subdev_selection input_sel;
136 	struct v4l2_subdev_format fmt_src;
137 	const struct cif_output_fmt *fmt;
138 	unsigned int imagesize = 0;
139 	int bpl, size, bpp;
140 	int scale_times = 0;
141 	u32 scale_ratio = 0;
142 	u32 width = 640;
143 	u32 height = 480;
144 	int ret = 0;
145 
146 	if (!cif_dev->terminal_sensor.sd)
147 		rkcif_update_sensor_info(&cif_dev->stream[0]);
148 
149 	if (cif_dev->terminal_sensor.sd) {
150 		fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
151 		fmt_src.pad = 0;
152 		ret = v4l2_subdev_call(cif_dev->terminal_sensor.sd, pad, get_fmt, NULL, &fmt_src);
153 		if (ret) {
154 			v4l2_err(&scale_vdev->cifdev->v4l2_dev,
155 				 "%s: get sensor format failed\n", __func__);
156 			return ret;
157 		}
158 
159 		input_sel.target = V4L2_SEL_TGT_CROP_BOUNDS;
160 		input_sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
161 		input_sel.pad = 0;
162 		ret = v4l2_subdev_call(cif_dev->terminal_sensor.sd,
163 				       pad, get_selection, NULL,
164 				       &input_sel);
165 		if (!ret) {
166 			fmt_src.format.width = input_sel.r.width;
167 			fmt_src.format.height = input_sel.r.height;
168 		}
169 		scale_vdev->src_res.width = fmt_src.format.width;
170 		scale_vdev->src_res.height = fmt_src.format.height;
171 	}
172 	fmt = find_output_fmt(pixm->pixelformat);
173 	if (fmt == NULL) {
174 		v4l2_err(&scale_vdev->cifdev->v4l2_dev,
175 			"format of source channel are not bayer raw, not support scale\n");
176 		return -1;
177 	}
178 	if (scale_vdev->src_res.width && scale_vdev->src_res.height) {
179 		width = scale_vdev->src_res.width;
180 		height = scale_vdev->src_res.height;
181 	}
182 	scale_ratio = width / pixm->width;
183 	if (scale_ratio <= 8) {
184 		scale_vdev->scale_mode = SCALE_8TIMES;
185 		scale_times = 8;
186 	} else if (scale_ratio <= 16) {
187 		scale_vdev->scale_mode = SCALE_16TIMES;
188 		scale_times = 16;
189 	} else {
190 		scale_vdev->scale_mode = SCALE_32TIMES;
191 		scale_times = 32;
192 	}
193 	//source resolution align (scale_times * 2)
194 	pixm->width = width  / (scale_times * 2) * 2;
195 	pixm->height = height / (scale_times * 2) * 2;
196 	pixm->num_planes = fmt->mplanes;
197 	pixm->field = V4L2_FIELD_NONE;
198 	pixm->quantization = V4L2_QUANTIZATION_DEFAULT;
199 
200 	bpp = rkcif_scale_align_bits_per_pixel(cif_dev, fmt, 0);
201 	bpl = pixm->width * bpp / CIF_RAW_STORED_BIT_WIDTH_RV1126;
202 	size = bpl * pixm->height;
203 	imagesize += size;
204 
205 	v4l2_dbg(3, rkcif_debug, &stream->cifdev->v4l2_dev,
206 		 "%s C-Plane %i size: %d, Total imagesize: %d\n",
207 		 __func__, 0, size, imagesize);
208 
209 	if (fmt->mplanes == 1) {
210 		pixm->plane_fmt[0].bytesperline = bpl;
211 		pixm->plane_fmt[0].sizeimage = imagesize;
212 	}
213 
214 	if (!try) {
215 		scale_vdev->scale_out_fmt = fmt;
216 		scale_vdev->pixm = *pixm;
217 
218 		v4l2_dbg(3, rkcif_debug, &stream->cifdev->v4l2_dev,
219 			 "%s: req(%d, %d) src out(%d, %d)\n", __func__,
220 			 pixm->width, pixm->height,
221 			 scale_vdev->src_res.width, scale_vdev->src_res.height);
222 	}
223 	return 0;
224 }
225 
rkcif_scale_s_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)226 static int rkcif_scale_s_fmt_vid_cap_mplane(struct file *file,
227 					    void *priv, struct v4l2_format *f)
228 {
229 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
230 	int ret = 0;
231 
232 	if (vb2_is_busy(&scale_vdev->vnode.buf_queue)) {
233 		v4l2_err(&scale_vdev->cifdev->v4l2_dev, "%s queue busy\n", __func__);
234 		return -EBUSY;
235 	}
236 
237 	ret = rkcif_scale_set_fmt(scale_vdev, &f->fmt.pix_mp, false);
238 
239 	return ret;
240 }
241 
rkcif_scale_querycap(struct file * file,void * priv,struct v4l2_capability * cap)242 static int rkcif_scale_querycap(struct file *file,
243 				void *priv, struct v4l2_capability *cap)
244 {
245 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
246 	struct device *dev = scale_vdev->cifdev->dev;
247 
248 	strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
249 	strscpy(cap->card, dev->driver->name, sizeof(cap->card));
250 	snprintf(cap->bus_info, sizeof(cap->bus_info),
251 		 "platform:%s", dev_name(dev));
252 	return 0;
253 }
254 
rkcif_scale_ioctl_default(struct file * file,void * fh,bool valid_prio,unsigned int cmd,void * arg)255 static long rkcif_scale_ioctl_default(struct file *file, void *fh,
256 				    bool valid_prio, unsigned int cmd, void *arg)
257 {
258 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
259 	struct rkcif_device *dev = scale_vdev->cifdev;
260 	struct bayer_blc *pblc;
261 
262 	switch (cmd) {
263 	case RKCIF_CMD_GET_SCALE_BLC:
264 		pblc = (struct bayer_blc *)arg;
265 		*pblc = scale_vdev->blc;
266 		v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev, "get scale blc %d %d %d %d\n",
267 			 pblc->pattern00, pblc->pattern01, pblc->pattern02, pblc->pattern03);
268 		break;
269 	case RKCIF_CMD_SET_SCALE_BLC:
270 		pblc = (struct bayer_blc *)arg;
271 		scale_vdev->blc = *pblc;
272 		v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev, "set scale blc %d %d %d %d\n",
273 			 pblc->pattern00, pblc->pattern01, pblc->pattern02, pblc->pattern03);
274 		break;
275 	default:
276 		return -EINVAL;
277 	}
278 
279 	return 0;
280 }
281 
rkcif_scale_enum_input(struct file * file,void * priv,struct v4l2_input * input)282 static int rkcif_scale_enum_input(struct file *file, void *priv,
283 				  struct v4l2_input *input)
284 {
285 
286 	if (input->index > 0)
287 		return -EINVAL;
288 
289 	input->type = V4L2_INPUT_TYPE_CAMERA;
290 	strscpy(input->name, "Camera", sizeof(input->name));
291 
292 	return 0;
293 }
294 
rkcif_scale_try_fmt_vid_cap_mplane(struct file * file,void * fh,struct v4l2_format * f)295 static int rkcif_scale_try_fmt_vid_cap_mplane(struct file *file, void *fh,
296 					      struct v4l2_format *f)
297 {
298 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
299 	int ret = 0;
300 
301 	ret = rkcif_scale_set_fmt(scale_vdev, &f->fmt.pix_mp, true);
302 
303 	return ret;
304 }
305 
rkcif_scale_enum_frameintervals(struct file * file,void * fh,struct v4l2_frmivalenum * fival)306 static int rkcif_scale_enum_frameintervals(struct file *file, void *fh,
307 					   struct v4l2_frmivalenum *fival)
308 {
309 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
310 	struct rkcif_device *dev = scale_vdev->cifdev;
311 	struct rkcif_sensor_info *sensor = &dev->terminal_sensor;
312 	struct v4l2_subdev_frame_interval fi;
313 	int ret;
314 
315 	if (fival->index != 0)
316 		return -EINVAL;
317 
318 	if (!sensor || !sensor->sd) {
319 		/* TODO: active_sensor is NULL if using DMARX path */
320 		v4l2_err(&dev->v4l2_dev, "%s Not active sensor\n", __func__);
321 		return -ENODEV;
322 	}
323 
324 	ret = v4l2_subdev_call(sensor->sd, video, g_frame_interval, &fi);
325 	if (ret && ret != -ENOIOCTLCMD) {
326 		return ret;
327 	} else if (ret == -ENOIOCTLCMD) {
328 		/* Set a default value for sensors not implements ioctl */
329 		fi.interval.numerator = 1;
330 		fi.interval.denominator = 30;
331 	}
332 
333 	fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
334 	fival->stepwise.step.numerator = 1;
335 	fival->stepwise.step.denominator = 1;
336 	fival->stepwise.max.numerator = 1;
337 	fival->stepwise.max.denominator = 1;
338 	fival->stepwise.min.numerator = fi.interval.numerator;
339 	fival->stepwise.min.denominator = fi.interval.denominator;
340 
341 	return 0;
342 }
343 
rkcif_scale_enum_framesizes(struct file * file,void * prov,struct v4l2_frmsizeenum * fsize)344 static int rkcif_scale_enum_framesizes(struct file *file, void *prov,
345 				       struct v4l2_frmsizeenum *fsize)
346 {
347 	struct v4l2_frmsize_discrete *s = &fsize->discrete;
348 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
349 	struct rkcif_device *dev = scale_vdev->cifdev;
350 	struct v4l2_rect input_rect;
351 	struct rkcif_sensor_info *terminal_sensor = &dev->terminal_sensor;
352 	struct csi_channel_info csi_info;
353 	int scale_times = 0;
354 
355 	if (fsize->index >= RKCIF_SCALE_ENUM_SIZE_MAX)
356 		return -EINVAL;
357 
358 	if (!find_output_fmt(fsize->pixel_format))
359 		return -EINVAL;
360 
361 	input_rect.width = RKCIF_DEFAULT_WIDTH;
362 	input_rect.height = RKCIF_DEFAULT_HEIGHT;
363 
364 	if (terminal_sensor && terminal_sensor->sd)
365 		get_input_fmt(terminal_sensor->sd,
366 			      &input_rect, 0, &csi_info);
367 
368 	switch (fsize->index) {
369 	case SCALE_8TIMES:
370 		scale_times = 8;
371 		break;
372 	case SCALE_16TIMES:
373 		scale_times = 16;
374 		break;
375 	case SCALE_32TIMES:
376 		scale_times = 32;
377 		break;
378 	default:
379 		scale_times = 32;
380 		break;
381 	}
382 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
383 	s->width = input_rect.width  / (scale_times * 2) * 2;
384 	s->height = input_rect.height / (scale_times * 2) * 2;
385 
386 	return 0;
387 }
388 
389 /* ISP video device IOCTLs */
390 static const struct v4l2_ioctl_ops rkcif_scale_ioctl = {
391 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
392 	.vidioc_querybuf = vb2_ioctl_querybuf,
393 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
394 	.vidioc_qbuf = vb2_ioctl_qbuf,
395 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
396 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
397 	.vidioc_expbuf = vb2_ioctl_expbuf,
398 	.vidioc_streamon = vb2_ioctl_streamon,
399 	.vidioc_streamoff = vb2_ioctl_streamoff,
400 	.vidioc_enum_input = rkcif_scale_enum_input,
401 	.vidioc_enum_fmt_vid_cap = rkcif_scale_enum_fmt_vid_cap,
402 	.vidioc_g_fmt_vid_cap_mplane = rkcif_scale_g_fmt_vid_cap_mplane,
403 	.vidioc_s_fmt_vid_cap_mplane = rkcif_scale_s_fmt_vid_cap_mplane,
404 	.vidioc_try_fmt_vid_cap_mplane = rkcif_scale_try_fmt_vid_cap_mplane,
405 	.vidioc_querycap = rkcif_scale_querycap,
406 	.vidioc_enum_frameintervals = rkcif_scale_enum_frameintervals,
407 	.vidioc_enum_framesizes = rkcif_scale_enum_framesizes,
408 	.vidioc_default = rkcif_scale_ioctl_default,
409 };
410 
rkcif_scale_fh_open(struct file * file)411 static int rkcif_scale_fh_open(struct file *file)
412 {
413 	struct video_device *vdev = video_devdata(file);
414 	struct rkcif_vdev_node *vnode = vdev_to_node(vdev);
415 	struct rkcif_scale_vdev *scale_vdev = to_rkcif_scale_vdev(vnode);
416 	struct rkcif_device *cifdev = scale_vdev->cifdev;
417 	int ret;
418 
419 	ret = rkcif_update_sensor_info(scale_vdev->stream);
420 	if (ret < 0) {
421 		v4l2_err(vdev,
422 			 "update sensor info failed %d\n",
423 			 ret);
424 
425 		return ret;
426 	}
427 
428 	ret = pm_runtime_resume_and_get(cifdev->dev);
429 	if (ret < 0)
430 		v4l2_err(&cifdev->v4l2_dev, "Failed to get runtime pm, %d\n",
431 			 ret);
432 
433 	mutex_lock(&cifdev->stream_lock);
434 	if (!atomic_read(&cifdev->fh_cnt))
435 		rkcif_soft_reset(cifdev, true);
436 	atomic_inc(&cifdev->fh_cnt);
437 	mutex_unlock(&cifdev->stream_lock);
438 
439 	ret = v4l2_fh_open(file);
440 	if (!ret) {
441 		ret = v4l2_pipeline_pm_get(&vnode->vdev.entity);
442 		if (ret < 0)
443 			vb2_fop_release(file);
444 	}
445 
446 	return ret;
447 }
448 
rkcif_scale_fop_release(struct file * file)449 static int rkcif_scale_fop_release(struct file *file)
450 {
451 	struct video_device *vdev = video_devdata(file);
452 	struct rkcif_vdev_node *vnode = vdev_to_node(vdev);
453 	struct rkcif_scale_vdev *scale_vdev = to_rkcif_scale_vdev(vnode);
454 	struct rkcif_device *cifdev = scale_vdev->cifdev;
455 	int ret;
456 
457 	ret = vb2_fop_release(file);
458 	if (!ret)
459 		v4l2_pipeline_pm_put(&vnode->vdev.entity);
460 
461 	pm_runtime_put_sync(cifdev->dev);
462 	return ret;
463 }
464 
465 struct v4l2_file_operations rkcif_scale_fops = {
466 	.mmap = vb2_fop_mmap,
467 	.unlocked_ioctl = video_ioctl2,
468 	.poll = vb2_fop_poll,
469 	.open = rkcif_scale_fh_open,
470 	.release = rkcif_scale_fop_release
471 };
472 
rkcif_scale_vb2_queue_setup(struct vb2_queue * queue,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])473 static int rkcif_scale_vb2_queue_setup(struct vb2_queue *queue,
474 				       unsigned int *num_buffers,
475 				       unsigned int *num_planes,
476 				       unsigned int sizes[],
477 				       struct device *alloc_ctxs[])
478 {
479 	struct rkcif_scale_vdev *scale_vdev = queue->drv_priv;
480 	struct rkcif_device *cif_dev = scale_vdev->cifdev;
481 	const struct v4l2_pix_format_mplane *pixm = NULL;
482 	const struct cif_output_fmt *cif_fmt;
483 	u32 i;
484 	const struct v4l2_plane_pix_format *plane_fmt;
485 
486 	pixm = &scale_vdev->pixm;
487 	cif_fmt = scale_vdev->scale_out_fmt;
488 	*num_planes = cif_fmt->mplanes;
489 
490 	for (i = 0; i < cif_fmt->mplanes; i++) {
491 		plane_fmt = &pixm->plane_fmt[i];
492 		sizes[i] = plane_fmt->sizeimage;
493 	}
494 
495 	v4l2_dbg(1, rkcif_debug, &cif_dev->v4l2_dev, "%s count %d, size %d\n",
496 		 v4l2_type_names[queue->type], *num_buffers, sizes[0]);
497 	return 0;
498 
499 }
500 
rkcif_scale_vb2_buf_queue(struct vb2_buffer * vb)501 static void rkcif_scale_vb2_buf_queue(struct vb2_buffer *vb)
502 {
503 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
504 	struct rkcif_buffer *cifbuf = to_rkcif_buffer(vbuf);
505 	struct vb2_queue *queue = vb->vb2_queue;
506 	struct rkcif_scale_vdev *scale_vdev = queue->drv_priv;
507 	struct v4l2_pix_format_mplane *pixm = &scale_vdev->pixm;
508 	const struct cif_output_fmt *fmt = scale_vdev->scale_out_fmt;
509 	struct rkcif_hw *hw_dev = scale_vdev->cifdev->hw_dev;
510 	unsigned long lock_flags = 0;
511 	int i;
512 
513 	memset(cifbuf->buff_addr, 0, sizeof(cifbuf->buff_addr));
514 	/* If mplanes > 1, every c-plane has its own m-plane,
515 	 * otherwise, multiple c-planes are in the same m-plane
516 	 */
517 	for (i = 0; i < fmt->mplanes; i++) {
518 		void *addr = vb2_plane_vaddr(vb, i);
519 
520 		if (hw_dev->iommu_en) {
521 			struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, i);
522 
523 			cifbuf->buff_addr[i] = sg_dma_address(sgt->sgl);
524 		} else {
525 			cifbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
526 		}
527 		if (rkcif_debug && addr && !hw_dev->iommu_en) {
528 			memset(addr, 0, pixm->plane_fmt[i].sizeimage);
529 			v4l2_dbg(1, rkcif_debug, &scale_vdev->cifdev->v4l2_dev,
530 				 "Clear buffer, size: 0x%08x\n",
531 				 pixm->plane_fmt[i].sizeimage);
532 		}
533 	}
534 
535 	if (fmt->mplanes == 1) {
536 		for (i = 0; i < fmt->cplanes - 1; i++)
537 			cifbuf->buff_addr[i + 1] = cifbuf->buff_addr[i] +
538 				pixm->plane_fmt[i].bytesperline * pixm->height;
539 	}
540 	spin_lock_irqsave(&scale_vdev->vbq_lock, lock_flags);
541 	list_add_tail(&cifbuf->queue, &scale_vdev->buf_head);
542 	spin_unlock_irqrestore(&scale_vdev->vbq_lock, lock_flags);
543 }
544 
rkcif_scale_stop(struct rkcif_scale_vdev * scale_vdev)545 static int rkcif_scale_stop(struct rkcif_scale_vdev *scale_vdev)
546 {
547 	struct rkcif_device *dev = scale_vdev->cifdev;
548 	int ch = scale_vdev->ch;
549 
550 	rkcif_write_register_and(dev, CIF_REG_SCL_CH_CTRL,
551 				 ~(CIF_SCALE_EN(ch) |
552 				 CIF_SCALE_SW_SRC_CH(0x1f, ch) |
553 				 CIF_SCALE_SW_MODE(0x03, ch)));
554 	scale_vdev->state = RKCIF_STATE_READY;
555 	scale_vdev->frame_idx = 0;
556 	return 0;
557 }
558 
rkcif_scale_vb2_stop_streaming(struct vb2_queue * vq)559 static void rkcif_scale_vb2_stop_streaming(struct vb2_queue *vq)
560 {
561 	struct rkcif_scale_vdev *scale_vdev = vq->drv_priv;
562 	struct rkcif_stream *stream = scale_vdev->stream;
563 	struct rkcif_device *dev = scale_vdev->cifdev;
564 	struct rkcif_buffer *buf = NULL;
565 	int ret = 0;
566 
567 	mutex_lock(&dev->scale_lock);
568 	/* Make sure no new work queued in isr before draining wq */
569 	scale_vdev->stopping = true;
570 	ret = wait_event_timeout(scale_vdev->wq_stopped,
571 				 scale_vdev->state != RKCIF_STATE_STREAMING,
572 				 msecs_to_jiffies(1000));
573 	if (!ret) {
574 		rkcif_scale_stop(scale_vdev);
575 		scale_vdev->stopping = false;
576 	}
577 	/* release buffers */
578 	if (scale_vdev->curr_buf)
579 		list_add_tail(&scale_vdev->curr_buf->queue, &scale_vdev->buf_head);
580 
581 	if (scale_vdev->next_buf &&
582 	    scale_vdev->next_buf != scale_vdev->curr_buf)
583 		list_add_tail(&scale_vdev->next_buf->queue, &scale_vdev->buf_head);
584 	scale_vdev->curr_buf = NULL;
585 	scale_vdev->next_buf = NULL;
586 	while (!list_empty(&scale_vdev->buf_head)) {
587 		buf = list_first_entry(&scale_vdev->buf_head,
588 				       struct rkcif_buffer, queue);
589 		list_del(&buf->queue);
590 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
591 	}
592 	mutex_unlock(&dev->scale_lock);
593 	rkcif_do_stop_stream(stream, RKCIF_STREAM_MODE_TOSCALE);
594 }
595 
rkcif_scale_channel_init(struct rkcif_scale_vdev * scale_vdev)596 static int rkcif_scale_channel_init(struct rkcif_scale_vdev *scale_vdev)
597 {
598 	struct rkcif_device *cif_dev = scale_vdev->cifdev;
599 	struct rkcif_scale_ch_info *ch_info = &scale_vdev->ch_info;
600 	struct v4l2_pix_format_mplane pixm = scale_vdev->pixm;
601 	const struct cif_output_fmt *fmt = scale_vdev->scale_out_fmt;
602 
603 	if (cif_dev->inf_id == RKCIF_DVP)
604 		scale_vdev->ch_src = SCALE_DVP;
605 	else
606 		scale_vdev->ch_src = 4 * cif_dev->csi_host_idx + scale_vdev->ch;
607 	ch_info->width = pixm.width;
608 	ch_info->height = pixm.height;
609 	ch_info->vir_width = ALIGN(ch_info->width  * fmt->bpp[0] / 8, 8);
610 	return 0;
611 }
612 
get_reg_index_of_scale_vlw(int ch)613 static enum cif_reg_index get_reg_index_of_scale_vlw(int ch)
614 {
615 	enum cif_reg_index index;
616 
617 	switch (ch) {
618 	case 0:
619 		index = CIF_REG_SCL_VLW_CH0;
620 		break;
621 	case 1:
622 		index = CIF_REG_SCL_VLW_CH1;
623 		break;
624 	case 2:
625 		index = CIF_REG_SCL_VLW_CH2;
626 		break;
627 	case 3:
628 		index = CIF_REG_SCL_VLW_CH3;
629 		break;
630 	default:
631 		index = CIF_REG_SCL_VLW_CH0;
632 		break;
633 	}
634 
635 	return index;
636 }
637 
get_reg_index_of_scale_frm0_addr(int channel_id)638 static enum cif_reg_index get_reg_index_of_scale_frm0_addr(int channel_id)
639 {
640 	enum cif_reg_index index;
641 
642 	switch (channel_id) {
643 	case 0:
644 		index = CIF_REG_SCL_FRM0_ADDR_CH0;
645 		break;
646 	case 1:
647 		index = CIF_REG_SCL_FRM0_ADDR_CH1;
648 		break;
649 	case 2:
650 		index = CIF_REG_SCL_FRM0_ADDR_CH2;
651 		break;
652 	case 3:
653 		index = CIF_REG_SCL_FRM0_ADDR_CH3;
654 		break;
655 	default:
656 		index = CIF_REG_SCL_FRM0_ADDR_CH0;
657 		break;
658 	}
659 
660 	return index;
661 }
662 
get_reg_index_of_scale_frm1_addr(int channel_id)663 static enum cif_reg_index get_reg_index_of_scale_frm1_addr(int channel_id)
664 {
665 	enum cif_reg_index index;
666 
667 	switch (channel_id) {
668 	case 0:
669 		index = CIF_REG_SCL_FRM1_ADDR_CH0;
670 		break;
671 	case 1:
672 		index = CIF_REG_SCL_FRM1_ADDR_CH1;
673 		break;
674 	case 2:
675 		index = CIF_REG_SCL_FRM1_ADDR_CH2;
676 		break;
677 	case 3:
678 		index = CIF_REG_SCL_FRM1_ADDR_CH3;
679 		break;
680 	default:
681 		index = CIF_REG_SCL_FRM1_ADDR_CH0;
682 		break;
683 	}
684 
685 	return index;
686 }
687 
rkcif_assign_scale_buffer_init(struct rkcif_scale_vdev * scale_vdev,int ch)688 static void rkcif_assign_scale_buffer_init(struct rkcif_scale_vdev *scale_vdev,
689 					   int ch)
690 {
691 	struct rkcif_device *dev = scale_vdev->stream->cifdev;
692 	u32 frm0_addr;
693 	u32 frm1_addr;
694 	unsigned long flags;
695 
696 	frm0_addr = get_reg_index_of_scale_frm0_addr(ch);
697 	frm1_addr = get_reg_index_of_scale_frm1_addr(ch);
698 
699 	spin_lock_irqsave(&scale_vdev->vbq_lock, flags);
700 
701 	if (!scale_vdev->curr_buf) {
702 		if (!list_empty(&scale_vdev->buf_head)) {
703 			scale_vdev->curr_buf = list_first_entry(&scale_vdev->buf_head,
704 							    struct rkcif_buffer,
705 							    queue);
706 			list_del(&scale_vdev->curr_buf->queue);
707 		}
708 	}
709 
710 	if (scale_vdev->curr_buf)
711 		rkcif_write_register(dev, frm0_addr,
712 				     scale_vdev->curr_buf->buff_addr[RKCIF_PLANE_Y]);
713 
714 	if (!scale_vdev->next_buf) {
715 		if (!list_empty(&scale_vdev->buf_head)) {
716 			scale_vdev->next_buf = list_first_entry(&scale_vdev->buf_head,
717 							    struct rkcif_buffer, queue);
718 			list_del(&scale_vdev->next_buf->queue);
719 		}
720 	}
721 
722 	if (scale_vdev->next_buf)
723 		rkcif_write_register(dev, frm1_addr,
724 				     scale_vdev->next_buf->buff_addr[RKCIF_PLANE_Y]);
725 
726 	spin_unlock_irqrestore(&scale_vdev->vbq_lock, flags);
727 }
728 
rkcif_assign_scale_buffer_update(struct rkcif_scale_vdev * scale_vdev,int channel_id)729 static int rkcif_assign_scale_buffer_update(struct rkcif_scale_vdev *scale_vdev,
730 					    int channel_id)
731 {
732 	struct rkcif_device *dev = scale_vdev->cifdev;
733 	struct rkcif_buffer *buffer = NULL;
734 	u32 frm_addr;
735 	int ret = 0;
736 	unsigned long flags;
737 
738 	frm_addr = scale_vdev->frame_phase & CIF_CSI_FRAME0_READY ?
739 		   get_reg_index_of_scale_frm0_addr(channel_id) :
740 		   get_reg_index_of_scale_frm1_addr(channel_id);
741 
742 	spin_lock_irqsave(&scale_vdev->vbq_lock, flags);
743 	if (!list_empty(&scale_vdev->buf_head)) {
744 		if (scale_vdev->frame_phase == CIF_CSI_FRAME0_READY) {
745 			scale_vdev->curr_buf = list_first_entry(&scale_vdev->buf_head,
746 							    struct rkcif_buffer, queue);
747 			if (scale_vdev->curr_buf) {
748 				list_del(&scale_vdev->curr_buf->queue);
749 				buffer = scale_vdev->curr_buf;
750 			}
751 		} else if (scale_vdev->frame_phase == CIF_CSI_FRAME1_READY) {
752 			scale_vdev->next_buf = list_first_entry(&scale_vdev->buf_head,
753 							    struct rkcif_buffer, queue);
754 			if (scale_vdev->next_buf) {
755 				list_del(&scale_vdev->next_buf->queue);
756 				buffer = scale_vdev->next_buf;
757 			}
758 		}
759 	} else {
760 		buffer = NULL;
761 	}
762 	spin_unlock_irqrestore(&scale_vdev->vbq_lock, flags);
763 
764 	if (buffer) {
765 		rkcif_write_register(dev, frm_addr,
766 				     buffer->buff_addr[RKCIF_PLANE_Y]);
767 	} else {
768 		ret = -EINVAL;
769 		v4l2_info(&dev->v4l2_dev,
770 			 "not active buffer,skip frame, scale ch[%d]\n",
771 			  scale_vdev->ch);
772 	}
773 	return ret;
774 }
775 
rkcif_assign_scale_buffer_pingpong(struct rkcif_scale_vdev * scale_vdev,int init,int channel_id)776 static int rkcif_assign_scale_buffer_pingpong(struct rkcif_scale_vdev *scale_vdev,
777 					      int init, int channel_id)
778 {
779 	int ret = 0;
780 
781 	if (init)
782 		rkcif_assign_scale_buffer_init(scale_vdev, channel_id);
783 	else
784 		ret = rkcif_assign_scale_buffer_update(scale_vdev, channel_id);
785 	return ret;
786 }
787 
rkcif_scale_channel_set(struct rkcif_scale_vdev * scale_vdev)788 static int rkcif_scale_channel_set(struct rkcif_scale_vdev *scale_vdev)
789 {
790 	struct rkcif_device *dev = scale_vdev->cifdev;
791 	u32 val = 0;
792 	u32 ch  = scale_vdev->ch;
793 
794 	val = rkcif_read_register(dev, CIF_REG_SCL_CH_CTRL);
795 	if (val & CIF_SCALE_EN(ch)) {
796 		v4l2_err(&dev->v4l2_dev, "scale_vdev[%d] has been used by other device\n", ch);
797 		return -EINVAL;
798 	}
799 
800 	rkcif_assign_scale_buffer_pingpong(scale_vdev,
801 					   RKCIF_YUV_ADDR_STATE_INIT,
802 					   ch);
803 	rkcif_write_register_or(dev, CIF_REG_SCL_CTRL, SCALE_SOFT_RESET(scale_vdev->ch));
804 
805 	rkcif_write_register_and(dev, CIF_REG_GLB_INTST,
806 				 ~(SCALE_END_INTSTAT(ch) |
807 				 SCALE_FIFO_OVERFLOW(ch)));
808 	rkcif_write_register_or(dev, CIF_REG_GLB_INTEN,
809 				(SCALE_END_INTSTAT(ch) |
810 				SCALE_FIFO_OVERFLOW(ch) |
811 				SCALE_TOISP_AXI0_ERR |
812 				SCALE_TOISP_AXI1_ERR));
813 	val = CIF_SCALE_SW_PRESS_ENABLE |
814 	      CIF_SCALE_SW_PRESS_VALUE(7) |
815 	      CIF_SCALE_SW_HURRY_ENABLE |
816 	      CIF_SCALE_SW_HURRY_VALUE(7) |
817 	      CIF_SCALE_SW_WATER_LINE(1);
818 
819 	rkcif_write_register(dev, CIF_REG_SCL_CTRL, val);
820 	val = scale_vdev->blc.pattern00 |
821 	      (scale_vdev->blc.pattern01 << 8) |
822 	      (scale_vdev->blc.pattern02 << 16) |
823 	      (scale_vdev->blc.pattern03 << 24);
824 	rkcif_write_register(dev, CIF_REG_SCL_BLC_CH0 + ch,
825 			     val);
826 	rkcif_write_register(dev, get_reg_index_of_scale_vlw(ch),
827 			     scale_vdev->ch_info.vir_width);
828 	val = CIF_SCALE_SW_SRC_CH(scale_vdev->ch_src, ch) |
829 	      CIF_SCALE_SW_MODE(scale_vdev->scale_mode, ch) |
830 	      CIF_SCALE_EN(ch);
831 	rkcif_write_register_or(dev, CIF_REG_SCL_CH_CTRL,
832 				val);
833 	return 0;
834 }
835 
836 
rkcif_scale_start(struct rkcif_scale_vdev * scale_vdev)837 int rkcif_scale_start(struct rkcif_scale_vdev *scale_vdev)
838 {
839 	int ret = 0;
840 	struct rkcif_device *dev = scale_vdev->cifdev;
841 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
842 
843 	mutex_lock(&dev->scale_lock);
844 	if (scale_vdev->state == RKCIF_STATE_STREAMING) {
845 		ret = -EBUSY;
846 		v4l2_err(v4l2_dev, "stream in busy state\n");
847 		goto destroy_buf;
848 	}
849 
850 	rkcif_scale_channel_init(scale_vdev);
851 	ret = rkcif_scale_channel_set(scale_vdev);
852 	if (ret)
853 		goto destroy_buf;
854 	scale_vdev->frame_idx = 0;
855 	scale_vdev->state = RKCIF_STATE_STREAMING;
856 	mutex_unlock(&dev->scale_lock);
857 	return 0;
858 
859 destroy_buf:
860 	if (scale_vdev->next_buf)
861 		vb2_buffer_done(&scale_vdev->next_buf->vb.vb2_buf,
862 				VB2_BUF_STATE_QUEUED);
863 	if (scale_vdev->curr_buf)
864 		vb2_buffer_done(&scale_vdev->curr_buf->vb.vb2_buf,
865 				VB2_BUF_STATE_QUEUED);
866 	while (!list_empty(&scale_vdev->buf_head)) {
867 		struct rkcif_buffer *buf;
868 
869 		buf = list_first_entry(&scale_vdev->buf_head,
870 				       struct rkcif_buffer, queue);
871 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
872 		list_del(&buf->queue);
873 	}
874 	mutex_unlock(&dev->scale_lock);
875 	return ret;
876 }
877 
878 static int
rkcif_scale_vb2_start_streaming(struct vb2_queue * queue,unsigned int count)879 rkcif_scale_vb2_start_streaming(struct vb2_queue *queue,
880 				unsigned int count)
881 {
882 	struct rkcif_scale_vdev *scale_vdev = queue->drv_priv;
883 	struct rkcif_stream *stream = scale_vdev->stream;
884 	int ret = 0;
885 
886 	if (stream->state == RKCIF_STATE_STREAMING) {
887 		stream->to_en_scale = true;
888 	} else {
889 		ret = rkcif_scale_start(scale_vdev);
890 		if (ret)
891 			return ret;
892 	}
893 
894 	rkcif_do_start_stream(stream, RKCIF_STREAM_MODE_TOSCALE);
895 	return 0;
896 }
897 
898 static struct vb2_ops rkcif_scale_vb2_ops = {
899 	.queue_setup = rkcif_scale_vb2_queue_setup,
900 	.buf_queue = rkcif_scale_vb2_buf_queue,
901 	.wait_prepare = vb2_ops_wait_prepare,
902 	.wait_finish = vb2_ops_wait_finish,
903 	.stop_streaming = rkcif_scale_vb2_stop_streaming,
904 	.start_streaming = rkcif_scale_vb2_start_streaming,
905 };
906 
rkcif_scale_init_vb2_queue(struct vb2_queue * q,struct rkcif_scale_vdev * scale_vdev,enum v4l2_buf_type buf_type)907 static int rkcif_scale_init_vb2_queue(struct vb2_queue *q,
908 				      struct rkcif_scale_vdev *scale_vdev,
909 				      enum v4l2_buf_type buf_type)
910 {
911 	struct rkcif_hw *hw_dev = scale_vdev->cifdev->hw_dev;
912 
913 	q->type = buf_type;
914 	q->io_modes = VB2_MMAP | VB2_DMABUF;
915 	q->drv_priv = scale_vdev;
916 	q->ops = &rkcif_scale_vb2_ops;
917 	if (hw_dev->iommu_en)
918 		q->mem_ops = &vb2_dma_sg_memops;
919 	else
920 		q->mem_ops = &vb2_dma_contig_memops;
921 	q->buf_struct_size = sizeof(struct rkcif_buffer);
922 	q->min_buffers_needed = CIF_SCALE_REQ_BUFS_MIN;
923 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
924 	q->lock = &scale_vdev->vnode.vlock;
925 	q->dev = hw_dev->dev;
926 	q->allow_cache_hints = 1;
927 	q->bidirectional = 1;
928 	q->gfp_flags = GFP_DMA32;
929 	return vb2_queue_init(q);
930 }
931 
932 
rkcif_scale_g_ch(struct v4l2_device * v4l2_dev,unsigned int intstat)933 static int rkcif_scale_g_ch(struct v4l2_device *v4l2_dev,
934 			    unsigned int intstat)
935 {
936 	if (intstat & SCALE_END_INTSTAT(0)) {
937 		if ((intstat & SCALE_END_INTSTAT(0)) ==
938 		    SCALE_END_INTSTAT(0))
939 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH0\n");
940 		return RKCIF_SCALE_CH0;
941 	}
942 
943 	if (intstat & SCALE_END_INTSTAT(1)) {
944 		if ((intstat & SCALE_END_INTSTAT(1)) ==
945 		    SCALE_END_INTSTAT(1))
946 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH1\n");
947 		return RKCIF_SCALE_CH1;
948 	}
949 
950 	if (intstat & SCALE_END_INTSTAT(2)) {
951 		if ((intstat & SCALE_END_INTSTAT(2)) ==
952 		    SCALE_END_INTSTAT(2))
953 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH2\n");
954 		return RKCIF_SCALE_CH2;
955 	}
956 
957 	if (intstat & SCALE_END_INTSTAT(3)) {
958 		if ((intstat & SCALE_END_INTSTAT(3)) ==
959 		    SCALE_END_INTSTAT(3))
960 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH3\n");
961 		return RKCIF_SCALE_CH3;
962 	}
963 
964 	return -EINVAL;
965 }
966 
rkcif_scale_vb_done_oneframe(struct rkcif_scale_vdev * scale_vdev,struct vb2_v4l2_buffer * vb_done)967 static void rkcif_scale_vb_done_oneframe(struct rkcif_scale_vdev *scale_vdev,
968 					 struct vb2_v4l2_buffer *vb_done)
969 {
970 	const struct cif_output_fmt *fmt = scale_vdev->scale_out_fmt;
971 	u32 i;
972 
973 	/* Dequeue a filled buffer */
974 	for (i = 0; i < fmt->mplanes; i++) {
975 		vb2_set_plane_payload(&vb_done->vb2_buf, i,
976 				      scale_vdev->pixm.plane_fmt[i].sizeimage);
977 	}
978 
979 	vb_done->vb2_buf.timestamp = ktime_get_ns();
980 
981 	vb2_buffer_done(&vb_done->vb2_buf, VB2_BUF_STATE_DONE);
982 }
983 
rkcif_scale_update_stream(struct rkcif_scale_vdev * scale_vdev,int ch)984 static void rkcif_scale_update_stream(struct rkcif_scale_vdev *scale_vdev, int ch)
985 {
986 	struct rkcif_buffer *active_buf = NULL;
987 	struct vb2_v4l2_buffer *vb_done = NULL;
988 	int ret = 0;
989 
990 	if (scale_vdev->frame_phase & CIF_CSI_FRAME0_READY) {
991 		if (scale_vdev->curr_buf)
992 			active_buf = scale_vdev->curr_buf;
993 	} else if (scale_vdev->frame_phase & CIF_CSI_FRAME1_READY) {
994 		if (scale_vdev->next_buf)
995 			active_buf = scale_vdev->next_buf;
996 	}
997 
998 	ret = rkcif_assign_scale_buffer_pingpong(scale_vdev,
999 					 RKCIF_YUV_ADDR_STATE_UPDATE,
1000 					 ch);
1001 
1002 	if (active_buf && (!ret)) {
1003 		vb_done = &active_buf->vb;
1004 		vb_done->vb2_buf.timestamp = ktime_get_ns();
1005 		vb_done->sequence = scale_vdev->frame_idx;
1006 		rkcif_scale_vb_done_oneframe(scale_vdev, vb_done);
1007 	}
1008 	scale_vdev->frame_idx++;
1009 }
1010 
rkcif_irq_handle_scale(struct rkcif_device * cif_dev,unsigned int intstat_glb)1011 void rkcif_irq_handle_scale(struct rkcif_device *cif_dev, unsigned int intstat_glb)
1012 {
1013 	struct rkcif_scale_vdev *scale_vdev;
1014 	struct rkcif_stream *stream;
1015 	int ch;
1016 	int i = 0;
1017 	u32 val = 0;
1018 
1019 	val = SCALE_FIFO_OVERFLOW(0) |
1020 	      SCALE_FIFO_OVERFLOW(1) |
1021 	      SCALE_FIFO_OVERFLOW(2) |
1022 	      SCALE_FIFO_OVERFLOW(3);
1023 	if (intstat_glb & val) {
1024 		v4l2_err(&cif_dev->v4l2_dev,
1025 			"ERROR: scale channel, overflow intstat_glb:0x%x !!\n",
1026 			intstat_glb);
1027 		return;
1028 	}
1029 
1030 	ch = rkcif_scale_g_ch(&cif_dev->v4l2_dev,
1031 				      intstat_glb);
1032 	if (ch < 0)
1033 		return;
1034 
1035 	for (i = 0; i < RKCIF_MAX_STREAM_MIPI; i++) {
1036 		ch = rkcif_scale_g_ch(&cif_dev->v4l2_dev,
1037 				      intstat_glb);
1038 		if (ch < 0)
1039 			continue;
1040 
1041 		scale_vdev = &cif_dev->scale_vdev[ch];
1042 
1043 		if (scale_vdev->state != RKCIF_STATE_STREAMING)
1044 			continue;
1045 
1046 		if (scale_vdev->stopping) {
1047 			rkcif_scale_stop(scale_vdev);
1048 			scale_vdev->stopping = false;
1049 			wake_up(&scale_vdev->wq_stopped);
1050 			continue;
1051 		}
1052 
1053 		scale_vdev->frame_phase = SW_SCALE_END(intstat_glb, ch);
1054 		intstat_glb &= ~(SCALE_END_INTSTAT(ch));
1055 		rkcif_scale_update_stream(scale_vdev, ch);
1056 		stream = scale_vdev->stream;
1057 		if (stream->to_en_dma)
1058 			rkcif_enable_dma_capture(stream);
1059 	}
1060 }
1061 
rkcif_init_scale_vdev(struct rkcif_device * cif_dev,u32 ch)1062 void rkcif_init_scale_vdev(struct rkcif_device *cif_dev, u32 ch)
1063 {
1064 	struct rkcif_scale_vdev *scale_vdev = &cif_dev->scale_vdev[ch];
1065 	struct rkcif_stream *stream = &cif_dev->stream[ch];
1066 	struct v4l2_pix_format_mplane pixm;
1067 
1068 	memset(scale_vdev, 0, sizeof(*scale_vdev));
1069 	memset(&pixm, 0, sizeof(pixm));
1070 	scale_vdev->cifdev = cif_dev;
1071 	scale_vdev->stream = stream;
1072 	stream->scale_vdev = scale_vdev;
1073 	scale_vdev->ch = ch;
1074 	scale_vdev->ch_src = 0;
1075 	scale_vdev->frame_idx = 0;
1076 	pixm.pixelformat = V4L2_PIX_FMT_SBGGR16;
1077 	pixm.width = RKCIF_DEFAULT_WIDTH;
1078 	pixm.height = RKCIF_DEFAULT_HEIGHT;
1079 	scale_vdev->state = RKCIF_STATE_READY;
1080 	scale_vdev->stopping = false;
1081 	scale_vdev->blc.pattern00 = 0;
1082 	scale_vdev->blc.pattern01 = 0;
1083 	scale_vdev->blc.pattern02 = 0;
1084 	scale_vdev->blc.pattern03 = 0;
1085 	INIT_LIST_HEAD(&scale_vdev->buf_head);
1086 	spin_lock_init(&scale_vdev->vbq_lock);
1087 	init_waitqueue_head(&scale_vdev->wq_stopped);
1088 	rkcif_scale_set_fmt(scale_vdev, &pixm, false);
1089 }
1090 
rkcif_register_scale_vdev(struct rkcif_scale_vdev * scale_vdev,bool is_multi_input)1091 static int rkcif_register_scale_vdev(struct rkcif_scale_vdev *scale_vdev, bool is_multi_input)
1092 {
1093 	int ret = 0;
1094 	struct video_device *vdev = &scale_vdev->vnode.vdev;
1095 	struct rkcif_vdev_node *node;
1096 	char *vdev_name;
1097 
1098 	switch (scale_vdev->ch) {
1099 	case RKCIF_SCALE_CH0:
1100 		vdev_name = CIF_SCALE_CH0_VDEV_NAME;
1101 		break;
1102 	case RKCIF_SCALE_CH1:
1103 		vdev_name = CIF_SCALE_CH1_VDEV_NAME;
1104 		break;
1105 	case RKCIF_SCALE_CH2:
1106 		vdev_name = CIF_SCALE_CH2_VDEV_NAME;
1107 		break;
1108 	case RKCIF_SCALE_CH3:
1109 		vdev_name = CIF_SCALE_CH3_VDEV_NAME;
1110 		break;
1111 	default:
1112 		ret = -EINVAL;
1113 		v4l2_err(&scale_vdev->cifdev->v4l2_dev, "Invalid stream\n");
1114 		goto err_cleanup_media_entity;
1115 	}
1116 
1117 	strscpy(vdev->name, vdev_name, sizeof(vdev->name));
1118 	node = container_of(vdev, struct rkcif_vdev_node, vdev);
1119 	mutex_init(&node->vlock);
1120 
1121 	vdev->ioctl_ops = &rkcif_scale_ioctl;
1122 	vdev->fops = &rkcif_scale_fops;
1123 	vdev->release = video_device_release_empty;
1124 	vdev->lock = &node->vlock;
1125 	vdev->v4l2_dev = &scale_vdev->cifdev->v4l2_dev;
1126 	vdev->queue = &node->buf_queue;
1127 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
1128 			    V4L2_CAP_STREAMING;
1129 	vdev->vfl_dir =  VFL_DIR_RX;
1130 	node->pad.flags = MEDIA_PAD_FL_SINK;
1131 	video_set_drvdata(vdev, scale_vdev);
1132 
1133 	rkcif_scale_init_vb2_queue(&node->buf_queue,
1134 				   scale_vdev,
1135 				   V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
1136 	vdev->queue = &node->buf_queue;
1137 
1138 	ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
1139 	if (ret < 0)
1140 		goto err_release_queue;
1141 
1142 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1143 	if (ret < 0) {
1144 		dev_err(&vdev->dev,
1145 			"could not register Video for Linux device\n");
1146 		goto err_cleanup_media_entity;
1147 	}
1148 	return 0;
1149 
1150 err_cleanup_media_entity:
1151 	media_entity_cleanup(&vdev->entity);
1152 err_release_queue:
1153 	vb2_queue_release(vdev->queue);
1154 	return ret;
1155 }
1156 
rkcif_unregister_scale_vdev(struct rkcif_scale_vdev * scale_vdev)1157 static void rkcif_unregister_scale_vdev(struct rkcif_scale_vdev *scale_vdev)
1158 {
1159 	struct rkcif_vdev_node *node = &scale_vdev->vnode;
1160 	struct video_device *vdev = &node->vdev;
1161 
1162 	video_unregister_device(vdev);
1163 	media_entity_cleanup(&vdev->entity);
1164 	vb2_queue_release(vdev->queue);
1165 }
1166 
rkcif_register_scale_vdevs(struct rkcif_device * cif_dev,int stream_num,bool is_multi_input)1167 int rkcif_register_scale_vdevs(struct rkcif_device *cif_dev,
1168 			       int stream_num,
1169 			       bool is_multi_input)
1170 {
1171 	struct rkcif_scale_vdev *scale_vdev;
1172 	int i, j, ret;
1173 
1174 	for (i = 0; i < stream_num; i++) {
1175 		scale_vdev = &cif_dev->scale_vdev[i];
1176 		ret = rkcif_register_scale_vdev(scale_vdev, is_multi_input);
1177 		if (ret < 0)
1178 			goto err;
1179 	}
1180 
1181 	return 0;
1182 err:
1183 	for (j = 0; j < i; j++) {
1184 		scale_vdev = &cif_dev->scale_vdev[j];
1185 		rkcif_unregister_scale_vdev(scale_vdev);
1186 	}
1187 
1188 	return ret;
1189 }
1190 
rkcif_unregister_scale_vdevs(struct rkcif_device * cif_dev,int stream_num)1191 void rkcif_unregister_scale_vdevs(struct rkcif_device *cif_dev,
1192 				  int stream_num)
1193 {
1194 	struct rkcif_scale_vdev *scale_vdev;
1195 	int i;
1196 
1197 	for (i = 0; i < stream_num; i++) {
1198 		scale_vdev = &cif_dev->scale_vdev[i];
1199 		rkcif_unregister_scale_vdev(scale_vdev);
1200 	}
1201 }
1202