• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI VPFE capture Driver
4  *
5  * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
6  *
7  * Benoit Parrot <bparrot@ti.com>
8  * Lad, Prabhakar <prabhakar.csengg@gmail.com>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/of_graph.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/videodev2.h>
24 
25 #include <media/v4l2-common.h>
26 #include <media/v4l2-ctrls.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-fwnode.h>
29 #include <media/v4l2-rect.h>
30 
31 #include "am437x-vpfe.h"
32 
33 #define VPFE_MODULE_NAME	"vpfe"
34 #define VPFE_VERSION		"0.1.0"
35 
36 static int debug;
37 module_param(debug, int, 0644);
38 MODULE_PARM_DESC(debug, "Debug level 0-8");
39 
40 #define vpfe_dbg(level, dev, fmt, arg...)	\
41 		v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
42 #define vpfe_info(dev, fmt, arg...)	\
43 		v4l2_info(&dev->v4l2_dev, fmt, ##arg)
44 #define vpfe_err(dev, fmt, arg...)	\
45 		v4l2_err(&dev->v4l2_dev, fmt, ##arg)
46 
47 /* standard information */
48 struct vpfe_standard {
49 	v4l2_std_id std_id;
50 	unsigned int width;
51 	unsigned int height;
52 	struct v4l2_fract pixelaspect;
53 	int frame_format;
54 };
55 
56 static const struct vpfe_standard vpfe_standards[] = {
57 	{V4L2_STD_525_60, 720, 480, {11, 10}, 1},
58 	{V4L2_STD_625_50, 720, 576, {54, 59}, 1},
59 };
60 
61 static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = {
62 	{
63 		.fourcc		= V4L2_PIX_FMT_YUYV,
64 		.code		= MEDIA_BUS_FMT_YUYV8_2X8,
65 		.bitsperpixel	= 16,
66 	}, {
67 		.fourcc		= V4L2_PIX_FMT_UYVY,
68 		.code		= MEDIA_BUS_FMT_UYVY8_2X8,
69 		.bitsperpixel	= 16,
70 	}, {
71 		.fourcc		= V4L2_PIX_FMT_YVYU,
72 		.code		= MEDIA_BUS_FMT_YVYU8_2X8,
73 		.bitsperpixel	= 16,
74 	}, {
75 		.fourcc		= V4L2_PIX_FMT_VYUY,
76 		.code		= MEDIA_BUS_FMT_VYUY8_2X8,
77 		.bitsperpixel	= 16,
78 	}, {
79 		.fourcc		= V4L2_PIX_FMT_SBGGR8,
80 		.code		= MEDIA_BUS_FMT_SBGGR8_1X8,
81 		.bitsperpixel	= 8,
82 	}, {
83 		.fourcc		= V4L2_PIX_FMT_SGBRG8,
84 		.code		= MEDIA_BUS_FMT_SGBRG8_1X8,
85 		.bitsperpixel	= 8,
86 	}, {
87 		.fourcc		= V4L2_PIX_FMT_SGRBG8,
88 		.code		= MEDIA_BUS_FMT_SGRBG8_1X8,
89 		.bitsperpixel	= 8,
90 	}, {
91 		.fourcc		= V4L2_PIX_FMT_SRGGB8,
92 		.code		= MEDIA_BUS_FMT_SRGGB8_1X8,
93 		.bitsperpixel	= 8,
94 	}, {
95 		.fourcc		= V4L2_PIX_FMT_RGB565,
96 		.code		= MEDIA_BUS_FMT_RGB565_2X8_LE,
97 		.bitsperpixel	= 16,
98 	}, {
99 		.fourcc		= V4L2_PIX_FMT_RGB565X,
100 		.code		= MEDIA_BUS_FMT_RGB565_2X8_BE,
101 		.bitsperpixel	= 16,
102 	},
103 };
104 
105 static int __subdev_get_format(struct vpfe_device *vpfe,
106 			       struct v4l2_mbus_framefmt *fmt);
107 static int vpfe_calc_format_size(struct vpfe_device *vpfe,
108 				 const struct vpfe_fmt *fmt,
109 				 struct v4l2_format *f);
110 
find_format_by_code(struct vpfe_device * vpfe,unsigned int code)111 static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe,
112 					    unsigned int code)
113 {
114 	struct vpfe_fmt *fmt;
115 	unsigned int k;
116 
117 	for (k = 0; k < vpfe->num_active_fmt; k++) {
118 		fmt = vpfe->active_fmt[k];
119 		if (fmt->code == code)
120 			return fmt;
121 	}
122 
123 	return NULL;
124 }
125 
find_format_by_pix(struct vpfe_device * vpfe,unsigned int pixelformat)126 static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe,
127 					   unsigned int pixelformat)
128 {
129 	struct vpfe_fmt *fmt;
130 	unsigned int k;
131 
132 	for (k = 0; k < vpfe->num_active_fmt; k++) {
133 		fmt = vpfe->active_fmt[k];
134 		if (fmt->fourcc == pixelformat)
135 			return fmt;
136 	}
137 
138 	return NULL;
139 }
140 
__get_bytesperpixel(struct vpfe_device * vpfe,const struct vpfe_fmt * fmt)141 static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe,
142 					const struct vpfe_fmt *fmt)
143 {
144 	struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
145 	unsigned int bus_width = sdinfo->vpfe_param.bus_width;
146 	u32 bpp, bus_width_bytes, clocksperpixel;
147 
148 	bus_width_bytes = ALIGN(bus_width, 8) >> 3;
149 	clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width);
150 	bpp = clocksperpixel * bus_width_bytes;
151 
152 	return bpp;
153 }
154 
155 /*  Print Four-character-code (FOURCC) */
print_fourcc(u32 fmt)156 static char *print_fourcc(u32 fmt)
157 {
158 	static char code[5];
159 
160 	code[0] = (unsigned char)(fmt & 0xff);
161 	code[1] = (unsigned char)((fmt >> 8) & 0xff);
162 	code[2] = (unsigned char)((fmt >> 16) & 0xff);
163 	code[3] = (unsigned char)((fmt >> 24) & 0xff);
164 	code[4] = '\0';
165 
166 	return code;
167 }
168 
vpfe_reg_read(struct vpfe_ccdc * ccdc,u32 offset)169 static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
170 {
171 	return ioread32(ccdc->ccdc_cfg.base_addr + offset);
172 }
173 
vpfe_reg_write(struct vpfe_ccdc * ccdc,u32 val,u32 offset)174 static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
175 {
176 	iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
177 }
178 
to_vpfe(struct vpfe_ccdc * ccdc)179 static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
180 {
181 	return container_of(ccdc, struct vpfe_device, ccdc);
182 }
183 
184 static inline
to_vpfe_buffer(struct vb2_v4l2_buffer * vb)185 struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
186 {
187 	return container_of(vb, struct vpfe_cap_buffer, vb);
188 }
189 
vpfe_pcr_enable(struct vpfe_ccdc * ccdc,int flag)190 static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
191 {
192 	vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
193 }
194 
vpfe_config_enable(struct vpfe_ccdc * ccdc,int flag)195 static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
196 {
197 	unsigned int cfg;
198 
199 	if (!flag) {
200 		cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
201 		cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
202 	} else {
203 		cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
204 	}
205 
206 	vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
207 }
208 
vpfe_ccdc_setwin(struct vpfe_ccdc * ccdc,struct v4l2_rect * image_win,enum ccdc_frmfmt frm_fmt,int bpp)209 static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
210 			     struct v4l2_rect *image_win,
211 			     enum ccdc_frmfmt frm_fmt,
212 			     int bpp)
213 {
214 	int horz_start, horz_nr_pixels;
215 	int vert_start, vert_nr_lines;
216 	int val, mid_img;
217 
218 	/*
219 	 * ppc - per pixel count. indicates how many pixels per cell
220 	 * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
221 	 * raw capture this is 1
222 	 */
223 	horz_start = image_win->left * bpp;
224 	horz_nr_pixels = (image_win->width * bpp) - 1;
225 	vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
226 				horz_nr_pixels, VPFE_HORZ_INFO);
227 
228 	vert_start = image_win->top;
229 
230 	if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
231 		vert_nr_lines = (image_win->height >> 1) - 1;
232 		vert_start >>= 1;
233 		/* configure VDINT0 */
234 		val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
235 	} else {
236 		vert_nr_lines = image_win->height - 1;
237 		/*
238 		 * configure VDINT0 and VDINT1. VDINT1 will be at half
239 		 * of image height
240 		 */
241 		mid_img = vert_start + (image_win->height / 2);
242 		val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
243 				(mid_img & VPFE_VDINT_VDINT1_MASK);
244 	}
245 
246 	vpfe_reg_write(ccdc, val, VPFE_VDINT);
247 
248 	vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
249 				vert_start, VPFE_VERT_START);
250 	vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
251 }
252 
vpfe_reg_dump(struct vpfe_ccdc * ccdc)253 static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
254 {
255 	struct vpfe_device *vpfe = to_vpfe(ccdc);
256 
257 	vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
258 	vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
259 	vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
260 	vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
261 	vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
262 	vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
263 	vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
264 		 vpfe_reg_read(ccdc, VPFE_SYNMODE));
265 	vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
266 		 vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
267 	vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
268 		 vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
269 	vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
270 		 vpfe_reg_read(ccdc, VPFE_VERT_START));
271 	vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
272 		 vpfe_reg_read(ccdc, VPFE_VERT_LINES));
273 }
274 
275 static int
vpfe_ccdc_validate_param(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_config_params_raw * ccdcparam)276 vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
277 			 struct vpfe_ccdc_config_params_raw *ccdcparam)
278 {
279 	struct vpfe_device *vpfe = to_vpfe(ccdc);
280 	u8 max_gamma, max_data;
281 
282 	if (!ccdcparam->alaw.enable)
283 		return 0;
284 
285 	max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
286 	max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
287 
288 	if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
289 	    ccdcparam->data_sz > VPFE_CCDC_DATA_8BITS ||
290 	    max_gamma > max_data) {
291 		vpfe_dbg(1, vpfe, "Invalid data line select\n");
292 		return -EINVAL;
293 	}
294 
295 	return 0;
296 }
297 
298 static void
vpfe_ccdc_update_raw_params(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_config_params_raw * raw_params)299 vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
300 			    struct vpfe_ccdc_config_params_raw *raw_params)
301 {
302 	struct vpfe_ccdc_config_params_raw *config_params =
303 				&ccdc->ccdc_cfg.bayer.config_params;
304 
305 	*config_params = *raw_params;
306 }
307 
308 /*
309  * vpfe_ccdc_restore_defaults()
310  * This function will write defaults to all CCDC registers
311  */
vpfe_ccdc_restore_defaults(struct vpfe_ccdc * ccdc)312 static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
313 {
314 	int i;
315 
316 	/* Disable CCDC */
317 	vpfe_pcr_enable(ccdc, 0);
318 
319 	/* set all registers to default value */
320 	for (i = 4; i <= 0x94; i += 4)
321 		vpfe_reg_write(ccdc, 0,  i);
322 
323 	vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
324 	vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
325 }
326 
vpfe_ccdc_close(struct vpfe_ccdc * ccdc,struct device * dev)327 static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
328 {
329 	struct vpfe_device *vpfe = to_vpfe(ccdc);
330 	u32 dma_cntl, pcr;
331 
332 	pcr = vpfe_reg_read(ccdc, VPFE_PCR);
333 	if (pcr)
334 		vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr);
335 
336 	dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
337 	if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
338 		vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)",
339 			 dma_cntl);
340 
341 	/* Disable CCDC by resetting all register to default POR values */
342 	vpfe_ccdc_restore_defaults(ccdc);
343 
344 	/* Disabled the module at the CONFIG level */
345 	vpfe_config_enable(ccdc, 0);
346 
347 	pm_runtime_put_sync(dev);
348 	return 0;
349 }
350 
vpfe_ccdc_set_params(struct vpfe_ccdc * ccdc,void __user * params)351 static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
352 {
353 	struct vpfe_device *vpfe = to_vpfe(ccdc);
354 	struct vpfe_ccdc_config_params_raw raw_params;
355 	int x;
356 
357 	if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
358 		return -EINVAL;
359 
360 	x = copy_from_user(&raw_params, params, sizeof(raw_params));
361 	if (x) {
362 		vpfe_dbg(1, vpfe,
363 			 "%s: error in copying ccdc params, %d\n",
364 			 __func__, x);
365 		return -EFAULT;
366 	}
367 
368 	if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
369 		vpfe_ccdc_update_raw_params(ccdc, &raw_params);
370 		return 0;
371 	}
372 
373 	return -EINVAL;
374 }
375 
376 /*
377  * vpfe_ccdc_config_ycbcr()
378  * This function will configure CCDC for YCbCr video capture
379  */
vpfe_ccdc_config_ycbcr(struct vpfe_ccdc * ccdc)380 static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
381 {
382 	struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
383 	u32 syn_mode;
384 
385 	/*
386 	 * first restore the CCDC registers to default values
387 	 * This is important since we assume default values to be set in
388 	 * a lot of registers that we didn't touch
389 	 */
390 	vpfe_ccdc_restore_defaults(ccdc);
391 
392 	/*
393 	 * configure pixel format, frame format, configure video frame
394 	 * format, enable output to SDRAM, enable internal timing generator
395 	 * and 8bit pack mode
396 	 */
397 	syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
398 		    VPFE_SYN_MODE_INPMOD_SHIFT) |
399 		    ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
400 		    VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
401 		    VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
402 
403 	/* setup BT.656 sync mode */
404 	if (params->bt656_enable) {
405 		vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
406 
407 		/*
408 		 * configure the FID, VD, HD pin polarity,
409 		 * fld,hd pol positive, vd negative, 8-bit data
410 		 */
411 		syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
412 		if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
413 			syn_mode |= VPFE_SYN_MODE_10BITS;
414 		else
415 			syn_mode |= VPFE_SYN_MODE_8BITS;
416 	} else {
417 		/* y/c external sync mode */
418 		syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
419 			     VPFE_FID_POL_SHIFT) |
420 			     ((params->hd_pol & VPFE_HD_POL_MASK) <<
421 			     VPFE_HD_POL_SHIFT) |
422 			     ((params->vd_pol & VPFE_VD_POL_MASK) <<
423 			     VPFE_VD_POL_SHIFT));
424 	}
425 	vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
426 
427 	/* configure video window */
428 	vpfe_ccdc_setwin(ccdc, &params->win,
429 			 params->frm_fmt, params->bytesperpixel);
430 
431 	/*
432 	 * configure the order of y cb cr in SDRAM, and disable latch
433 	 * internal register on vsync
434 	 */
435 	if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
436 		vpfe_reg_write(ccdc,
437 			       (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
438 			       VPFE_LATCH_ON_VSYNC_DISABLE |
439 			       VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
440 	else
441 		vpfe_reg_write(ccdc,
442 			       (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
443 			       VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
444 
445 	/*
446 	 * configure the horizontal line offset. This should be a
447 	 * on 32 byte boundary. So clear LSB 5 bits
448 	 */
449 	vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
450 
451 	/* configure the memory line offset */
452 	if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
453 		/* two fields are interleaved in memory */
454 		vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
455 			       VPFE_SDOFST);
456 }
457 
458 static void
vpfe_ccdc_config_black_clamp(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_black_clamp * bclamp)459 vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
460 			     struct vpfe_ccdc_black_clamp *bclamp)
461 {
462 	u32 val;
463 
464 	if (!bclamp->enable) {
465 		/* configure DCSub */
466 		val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
467 		vpfe_reg_write(ccdc, val, VPFE_DCSUB);
468 		vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
469 		return;
470 	}
471 	/*
472 	 * Configure gain,  Start pixel, No of line to be avg,
473 	 * No of pixel/line to be avg, & Enable the Black clamping
474 	 */
475 	val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
476 	       ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
477 		VPFE_BLK_ST_PXL_SHIFT) |
478 	       ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
479 		VPFE_BLK_SAMPLE_LINE_SHIFT) |
480 	       ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
481 		VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
482 	vpfe_reg_write(ccdc, val, VPFE_CLAMP);
483 	/* If Black clamping is enable then make dcsub 0 */
484 	vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
485 }
486 
487 static void
vpfe_ccdc_config_black_compense(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_black_compensation * bcomp)488 vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
489 				struct vpfe_ccdc_black_compensation *bcomp)
490 {
491 	u32 val;
492 
493 	val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
494 	      ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
495 	       VPFE_BLK_COMP_GB_COMP_SHIFT) |
496 	      ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
497 	       VPFE_BLK_COMP_GR_COMP_SHIFT) |
498 	      ((bcomp->r & VPFE_BLK_COMP_MASK) <<
499 	       VPFE_BLK_COMP_R_COMP_SHIFT));
500 	vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
501 }
502 
503 /*
504  * vpfe_ccdc_config_raw()
505  * This function will configure CCDC for Raw capture mode
506  */
vpfe_ccdc_config_raw(struct vpfe_ccdc * ccdc)507 static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
508 {
509 	struct vpfe_device *vpfe = to_vpfe(ccdc);
510 	struct vpfe_ccdc_config_params_raw *config_params =
511 				&ccdc->ccdc_cfg.bayer.config_params;
512 	struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
513 	unsigned int syn_mode;
514 	unsigned int val;
515 
516 	/* Reset CCDC */
517 	vpfe_ccdc_restore_defaults(ccdc);
518 
519 	/* Disable latching function registers on VSYNC  */
520 	vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
521 
522 	/*
523 	 * Configure the vertical sync polarity(SYN_MODE.VDPOL),
524 	 * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
525 	 * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
526 	 * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
527 	 * SDRAM, enable internal timing generator
528 	 */
529 	syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
530 		   ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
531 		   ((params->fid_pol & VPFE_FID_POL_MASK) <<
532 		   VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
533 		   VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
534 		   ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
535 		   VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
536 		   VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
537 		   VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
538 
539 	/* Enable and configure aLaw register if needed */
540 	if (config_params->alaw.enable) {
541 		val = ((config_params->alaw.gamma_wd &
542 		      VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
543 		vpfe_reg_write(ccdc, val, VPFE_ALAW);
544 		vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
545 	}
546 
547 	/* Configure video window */
548 	vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
549 			 params->bytesperpixel);
550 
551 	/* Configure Black Clamp */
552 	vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
553 
554 	/* Configure Black level compensation */
555 	vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
556 
557 	/* If data size is 8 bit then pack the data */
558 	if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
559 	    config_params->alaw.enable)
560 		syn_mode |= VPFE_DATA_PACK_ENABLE;
561 
562 	/*
563 	 * Configure Horizontal offset register. If pack 8 is enabled then
564 	 * 1 pixel will take 1 byte
565 	 */
566 	vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
567 
568 	vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
569 		params->bytesperline, params->bytesperline);
570 
571 	/* Set value for SDOFST */
572 	if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
573 		if (params->image_invert_enable) {
574 			/* For interlace inverse mode */
575 			vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
576 				   VPFE_SDOFST);
577 		} else {
578 			/* For interlace non inverse mode */
579 			vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
580 				   VPFE_SDOFST);
581 		}
582 	} else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
583 		vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
584 			   VPFE_SDOFST);
585 	}
586 
587 	vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
588 
589 	vpfe_reg_dump(ccdc);
590 }
591 
592 static inline int
vpfe_ccdc_set_buftype(struct vpfe_ccdc * ccdc,enum ccdc_buftype buf_type)593 vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
594 		      enum ccdc_buftype buf_type)
595 {
596 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
597 		ccdc->ccdc_cfg.bayer.buf_type = buf_type;
598 	else
599 		ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
600 
601 	return 0;
602 }
603 
vpfe_ccdc_get_buftype(struct vpfe_ccdc * ccdc)604 static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
605 {
606 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
607 		return ccdc->ccdc_cfg.bayer.buf_type;
608 
609 	return ccdc->ccdc_cfg.ycbcr.buf_type;
610 }
611 
vpfe_ccdc_set_pixel_format(struct vpfe_ccdc * ccdc,u32 pixfmt)612 static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
613 {
614 	struct vpfe_device *vpfe = to_vpfe(ccdc);
615 
616 	vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n",
617 		 __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
618 
619 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
620 		ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
621 		/*
622 		 * Need to clear it in case it was left on
623 		 * after the last capture.
624 		 */
625 		ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
626 
627 		switch (pixfmt) {
628 		case V4L2_PIX_FMT_SBGGR8:
629 			ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
630 			break;
631 
632 		case V4L2_PIX_FMT_YUYV:
633 		case V4L2_PIX_FMT_UYVY:
634 		case V4L2_PIX_FMT_YUV420:
635 		case V4L2_PIX_FMT_NV12:
636 		case V4L2_PIX_FMT_RGB565X:
637 			break;
638 
639 		case V4L2_PIX_FMT_SBGGR16:
640 		default:
641 			return -EINVAL;
642 		}
643 	} else {
644 		switch (pixfmt) {
645 		case V4L2_PIX_FMT_YUYV:
646 			ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
647 			break;
648 
649 		case V4L2_PIX_FMT_UYVY:
650 			ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
651 			break;
652 
653 		default:
654 			return -EINVAL;
655 		}
656 	}
657 
658 	return 0;
659 }
660 
vpfe_ccdc_get_pixel_format(struct vpfe_ccdc * ccdc)661 static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
662 {
663 	u32 pixfmt;
664 
665 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
666 		pixfmt = V4L2_PIX_FMT_YUYV;
667 	} else {
668 		if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
669 			pixfmt = V4L2_PIX_FMT_YUYV;
670 		else
671 			pixfmt = V4L2_PIX_FMT_UYVY;
672 	}
673 
674 	return pixfmt;
675 }
676 
677 static int
vpfe_ccdc_set_image_window(struct vpfe_ccdc * ccdc,struct v4l2_rect * win,unsigned int bpp)678 vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
679 			   struct v4l2_rect *win, unsigned int bpp)
680 {
681 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
682 		ccdc->ccdc_cfg.bayer.win = *win;
683 		ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
684 		ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
685 	} else {
686 		ccdc->ccdc_cfg.ycbcr.win = *win;
687 		ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
688 		ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
689 	}
690 
691 	return 0;
692 }
693 
694 static inline void
vpfe_ccdc_get_image_window(struct vpfe_ccdc * ccdc,struct v4l2_rect * win)695 vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
696 			   struct v4l2_rect *win)
697 {
698 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
699 		*win = ccdc->ccdc_cfg.bayer.win;
700 	else
701 		*win = ccdc->ccdc_cfg.ycbcr.win;
702 }
703 
vpfe_ccdc_get_line_length(struct vpfe_ccdc * ccdc)704 static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
705 {
706 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
707 		return ccdc->ccdc_cfg.bayer.bytesperline;
708 
709 	return ccdc->ccdc_cfg.ycbcr.bytesperline;
710 }
711 
712 static inline int
vpfe_ccdc_set_frame_format(struct vpfe_ccdc * ccdc,enum ccdc_frmfmt frm_fmt)713 vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
714 			   enum ccdc_frmfmt frm_fmt)
715 {
716 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
717 		ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
718 	else
719 		ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
720 
721 	return 0;
722 }
723 
724 static inline enum ccdc_frmfmt
vpfe_ccdc_get_frame_format(struct vpfe_ccdc * ccdc)725 vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
726 {
727 	if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
728 		return ccdc->ccdc_cfg.bayer.frm_fmt;
729 
730 	return ccdc->ccdc_cfg.ycbcr.frm_fmt;
731 }
732 
vpfe_ccdc_getfid(struct vpfe_ccdc * ccdc)733 static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
734 {
735 	return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
736 }
737 
vpfe_set_sdr_addr(struct vpfe_ccdc * ccdc,unsigned long addr)738 static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
739 {
740 	vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
741 }
742 
vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc * ccdc,struct vpfe_hw_if_param * params)743 static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
744 				      struct vpfe_hw_if_param *params)
745 {
746 	struct vpfe_device *vpfe = to_vpfe(ccdc);
747 
748 	ccdc->ccdc_cfg.if_type = params->if_type;
749 
750 	switch (params->if_type) {
751 	case VPFE_BT656:
752 	case VPFE_YCBCR_SYNC_16:
753 	case VPFE_YCBCR_SYNC_8:
754 	case VPFE_BT656_10BIT:
755 		ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
756 		ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
757 		break;
758 
759 	case VPFE_RAW_BAYER:
760 		ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
761 		ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
762 		if (params->bus_width == 10)
763 			ccdc->ccdc_cfg.bayer.config_params.data_sz =
764 				VPFE_CCDC_DATA_10BITS;
765 		else
766 			ccdc->ccdc_cfg.bayer.config_params.data_sz =
767 				VPFE_CCDC_DATA_8BITS;
768 		vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
769 			params->bus_width);
770 		vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
771 			ccdc->ccdc_cfg.bayer.config_params.data_sz);
772 		break;
773 
774 	default:
775 		return -EINVAL;
776 	}
777 
778 	return 0;
779 }
780 
vpfe_clear_intr(struct vpfe_ccdc * ccdc,int vdint)781 static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
782 {
783 	unsigned int vpfe_int_status;
784 
785 	vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
786 
787 	switch (vdint) {
788 	/* VD0 interrupt */
789 	case VPFE_VDINT0:
790 		vpfe_int_status &= ~VPFE_VDINT0;
791 		vpfe_int_status |= VPFE_VDINT0;
792 		break;
793 
794 	/* VD1 interrupt */
795 	case VPFE_VDINT1:
796 		vpfe_int_status &= ~VPFE_VDINT1;
797 		vpfe_int_status |= VPFE_VDINT1;
798 		break;
799 
800 	/* VD2 interrupt */
801 	case VPFE_VDINT2:
802 		vpfe_int_status &= ~VPFE_VDINT2;
803 		vpfe_int_status |= VPFE_VDINT2;
804 		break;
805 
806 	/* Clear all interrupts */
807 	default:
808 		vpfe_int_status &= ~(VPFE_VDINT0 |
809 				VPFE_VDINT1 |
810 				VPFE_VDINT2);
811 		vpfe_int_status |= (VPFE_VDINT0 |
812 				VPFE_VDINT1 |
813 				VPFE_VDINT2);
814 		break;
815 	}
816 	/* Clear specific VDINT from the status register */
817 	vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
818 
819 	vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
820 
821 	/* Acknowledge that we are done with all interrupts */
822 	vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
823 }
824 
vpfe_ccdc_config_defaults(struct vpfe_ccdc * ccdc)825 static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
826 {
827 	ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
828 
829 	ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
830 	ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
831 	ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
832 	ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
833 	ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
834 	ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
835 	ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
836 
837 	ccdc->ccdc_cfg.ycbcr.win.left = 0;
838 	ccdc->ccdc_cfg.ycbcr.win.top = 0;
839 	ccdc->ccdc_cfg.ycbcr.win.width = 720;
840 	ccdc->ccdc_cfg.ycbcr.win.height = 576;
841 	ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
842 
843 	ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
844 	ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
845 	ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
846 	ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
847 	ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
848 
849 	ccdc->ccdc_cfg.bayer.win.left = 0;
850 	ccdc->ccdc_cfg.bayer.win.top = 0;
851 	ccdc->ccdc_cfg.bayer.win.width = 800;
852 	ccdc->ccdc_cfg.bayer.win.height = 600;
853 	ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
854 	ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
855 						VPFE_CCDC_GAMMA_BITS_09_0;
856 }
857 
858 /*
859  * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
860  */
vpfe_get_ccdc_image_format(struct vpfe_device * vpfe,struct v4l2_format * f)861 static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
862 				      struct v4l2_format *f)
863 {
864 	struct v4l2_rect image_win;
865 	enum ccdc_buftype buf_type;
866 	enum ccdc_frmfmt frm_fmt;
867 
868 	memset(f, 0, sizeof(*f));
869 	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
870 	vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
871 	f->fmt.pix.width = image_win.width;
872 	f->fmt.pix.height = image_win.height;
873 	f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
874 	f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
875 				f->fmt.pix.height;
876 	buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
877 	f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
878 	frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
879 
880 	if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
881 		f->fmt.pix.field = V4L2_FIELD_NONE;
882 	} else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
883 		if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
884 			f->fmt.pix.field = V4L2_FIELD_INTERLACED;
885 		 } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
886 			f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
887 		} else {
888 			vpfe_err(vpfe, "Invalid buf_type\n");
889 			return -EINVAL;
890 		}
891 	} else {
892 		vpfe_err(vpfe, "Invalid frm_fmt\n");
893 		return -EINVAL;
894 	}
895 	return 0;
896 }
897 
vpfe_config_ccdc_image_format(struct vpfe_device * vpfe)898 static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
899 {
900 	enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
901 	u32 bpp;
902 	int ret = 0;
903 
904 	vpfe_dbg(1, vpfe, "pixelformat: %s\n",
905 		print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
906 
907 	if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
908 			vpfe->fmt.fmt.pix.pixelformat) < 0) {
909 		vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
910 		return -EINVAL;
911 	}
912 
913 	/* configure the image window */
914 	bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
915 	vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp);
916 
917 	switch (vpfe->fmt.fmt.pix.field) {
918 	case V4L2_FIELD_INTERLACED:
919 		/* do nothing, since it is default */
920 		ret = vpfe_ccdc_set_buftype(
921 				&vpfe->ccdc,
922 				CCDC_BUFTYPE_FLD_INTERLEAVED);
923 		break;
924 
925 	case V4L2_FIELD_NONE:
926 		frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
927 		/* buffer type only applicable for interlaced scan */
928 		break;
929 
930 	case V4L2_FIELD_SEQ_TB:
931 		ret = vpfe_ccdc_set_buftype(
932 				&vpfe->ccdc,
933 				CCDC_BUFTYPE_FLD_SEPARATED);
934 		break;
935 
936 	default:
937 		return -EINVAL;
938 	}
939 
940 	if (ret)
941 		return ret;
942 
943 	return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
944 }
945 
946 /*
947  * vpfe_config_image_format()
948  * For a given standard, this functions sets up the default
949  * pix format & crop values in the vpfe device and ccdc.  It first
950  * starts with defaults based values from the standard table.
951  * It then checks if sub device supports get_fmt and then override the
952  * values based on that.Sets crop values to match with scan resolution
953  * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
954  * values in ccdc
955  */
vpfe_config_image_format(struct vpfe_device * vpfe,v4l2_std_id std_id)956 static int vpfe_config_image_format(struct vpfe_device *vpfe,
957 				    v4l2_std_id std_id)
958 {
959 	struct vpfe_fmt *fmt;
960 	struct v4l2_mbus_framefmt mbus_fmt;
961 	int i, ret;
962 
963 	for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
964 		if (vpfe_standards[i].std_id & std_id) {
965 			vpfe->std_info.active_pixels =
966 					vpfe_standards[i].width;
967 			vpfe->std_info.active_lines =
968 					vpfe_standards[i].height;
969 			vpfe->std_info.frame_format =
970 					vpfe_standards[i].frame_format;
971 			vpfe->std_index = i;
972 
973 			break;
974 		}
975 	}
976 
977 	if (i ==  ARRAY_SIZE(vpfe_standards)) {
978 		vpfe_err(vpfe, "standard not supported\n");
979 		return -EINVAL;
980 	}
981 
982 	ret = __subdev_get_format(vpfe, &mbus_fmt);
983 	if (ret)
984 		return ret;
985 
986 	fmt = find_format_by_code(vpfe, mbus_fmt.code);
987 	if (!fmt) {
988 		vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n",
989 			 mbus_fmt.code);
990 		return -EINVAL;
991 	}
992 
993 	/* Save current subdev format */
994 	v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
995 	vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
996 	vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc;
997 	vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt);
998 	vpfe->current_vpfe_fmt = fmt;
999 
1000 	/* Update the crop window based on found values */
1001 	vpfe->crop.top = 0;
1002 	vpfe->crop.left = 0;
1003 	vpfe->crop.width = mbus_fmt.width;
1004 	vpfe->crop.height = mbus_fmt.height;
1005 
1006 	return vpfe_config_ccdc_image_format(vpfe);
1007 }
1008 
vpfe_initialize_device(struct vpfe_device * vpfe)1009 static int vpfe_initialize_device(struct vpfe_device *vpfe)
1010 {
1011 	struct vpfe_subdev_info *sdinfo;
1012 	int ret;
1013 
1014 	sdinfo = &vpfe->cfg->sub_devs[0];
1015 	sdinfo->sd = vpfe->sd[0];
1016 	vpfe->current_input = 0;
1017 	vpfe->std_index = 0;
1018 	/* Configure the default format information */
1019 	ret = vpfe_config_image_format(vpfe,
1020 				       vpfe_standards[vpfe->std_index].std_id);
1021 	if (ret)
1022 		return ret;
1023 
1024 	ret = pm_runtime_resume_and_get(vpfe->pdev);
1025 	if (ret < 0)
1026 		return ret;
1027 
1028 	vpfe_config_enable(&vpfe->ccdc, 1);
1029 
1030 	vpfe_ccdc_restore_defaults(&vpfe->ccdc);
1031 
1032 	/* Clear all VPFE interrupts */
1033 	vpfe_clear_intr(&vpfe->ccdc, -1);
1034 
1035 	return ret;
1036 }
1037 
1038 /*
1039  * vpfe_release : This function is based on the vb2_fop_release
1040  * helper function.
1041  * It has been augmented to handle module power management,
1042  * by disabling/enabling h/w module fcntl clock when necessary.
1043  */
vpfe_release(struct file * file)1044 static int vpfe_release(struct file *file)
1045 {
1046 	struct vpfe_device *vpfe = video_drvdata(file);
1047 	bool fh_singular;
1048 	int ret;
1049 
1050 	mutex_lock(&vpfe->lock);
1051 
1052 	/* Save the singular status before we call the clean-up helper */
1053 	fh_singular = v4l2_fh_is_singular_file(file);
1054 
1055 	/* the release helper will cleanup any on-going streaming */
1056 	ret = _vb2_fop_release(file, NULL);
1057 
1058 	/*
1059 	 * If this was the last open file.
1060 	 * Then de-initialize hw module.
1061 	 */
1062 	if (fh_singular)
1063 		vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
1064 
1065 	mutex_unlock(&vpfe->lock);
1066 
1067 	return ret;
1068 }
1069 
1070 /*
1071  * vpfe_open : This function is based on the v4l2_fh_open helper function.
1072  * It has been augmented to handle module power management,
1073  * by disabling/enabling h/w module fcntl clock when necessary.
1074  */
vpfe_open(struct file * file)1075 static int vpfe_open(struct file *file)
1076 {
1077 	struct vpfe_device *vpfe = video_drvdata(file);
1078 	int ret;
1079 
1080 	mutex_lock(&vpfe->lock);
1081 
1082 	ret = v4l2_fh_open(file);
1083 	if (ret) {
1084 		vpfe_err(vpfe, "v4l2_fh_open failed\n");
1085 		goto unlock;
1086 	}
1087 
1088 	if (!v4l2_fh_is_singular_file(file))
1089 		goto unlock;
1090 
1091 	if (vpfe_initialize_device(vpfe)) {
1092 		v4l2_fh_release(file);
1093 		ret = -ENODEV;
1094 	}
1095 
1096 unlock:
1097 	mutex_unlock(&vpfe->lock);
1098 	return ret;
1099 }
1100 
1101 /**
1102  * vpfe_schedule_next_buffer: set next buffer address for capture
1103  * @vpfe : ptr to vpfe device
1104  *
1105  * This function will get next buffer from the dma queue and
1106  * set the buffer address in the vpfe register for capture.
1107  * the buffer is marked active
1108  */
vpfe_schedule_next_buffer(struct vpfe_device * vpfe)1109 static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
1110 {
1111 	dma_addr_t addr;
1112 
1113 	spin_lock(&vpfe->dma_queue_lock);
1114 	if (list_empty(&vpfe->dma_queue)) {
1115 		spin_unlock(&vpfe->dma_queue_lock);
1116 		return;
1117 	}
1118 
1119 	vpfe->next_frm = list_entry(vpfe->dma_queue.next,
1120 				    struct vpfe_cap_buffer, list);
1121 	list_del(&vpfe->next_frm->list);
1122 	spin_unlock(&vpfe->dma_queue_lock);
1123 
1124 	addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
1125 	vpfe_set_sdr_addr(&vpfe->ccdc, addr);
1126 }
1127 
vpfe_schedule_bottom_field(struct vpfe_device * vpfe)1128 static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
1129 {
1130 	dma_addr_t addr;
1131 
1132 	addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
1133 					vpfe->field_off;
1134 
1135 	vpfe_set_sdr_addr(&vpfe->ccdc, addr);
1136 }
1137 
1138 /*
1139  * vpfe_process_buffer_complete: process a completed buffer
1140  * @vpfe : ptr to vpfe device
1141  *
1142  * This function time stamp the buffer and mark it as DONE. It also
1143  * wake up any process waiting on the QUEUE and set the next buffer
1144  * as current
1145  */
vpfe_process_buffer_complete(struct vpfe_device * vpfe)1146 static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
1147 {
1148 	vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
1149 	vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
1150 	vpfe->cur_frm->vb.sequence = vpfe->sequence++;
1151 	vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
1152 	vpfe->cur_frm = vpfe->next_frm;
1153 }
1154 
vpfe_handle_interlaced_irq(struct vpfe_device * vpfe,enum v4l2_field field)1155 static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
1156 				       enum v4l2_field field)
1157 {
1158 	int fid;
1159 
1160 	/* interlaced or TB capture check which field
1161 	 * we are in hardware
1162 	 */
1163 	fid = vpfe_ccdc_getfid(&vpfe->ccdc);
1164 
1165 	/* switch the software maintained field id */
1166 	vpfe->field ^= 1;
1167 	if (fid == vpfe->field) {
1168 		/* we are in-sync here,continue */
1169 		if (fid == 0) {
1170 			/*
1171 			 * One frame is just being captured. If the
1172 			 * next frame is available, release the
1173 			 * current frame and move on
1174 			 */
1175 			if (vpfe->cur_frm != vpfe->next_frm)
1176 				vpfe_process_buffer_complete(vpfe);
1177 
1178 			if (vpfe->stopping)
1179 				return;
1180 
1181 			/*
1182 			 * based on whether the two fields are stored
1183 			 * interleave or separately in memory,
1184 			 * reconfigure the CCDC memory address
1185 			 */
1186 			if (field == V4L2_FIELD_SEQ_TB)
1187 				vpfe_schedule_bottom_field(vpfe);
1188 		} else {
1189 			/*
1190 			 * if one field is just being captured configure
1191 			 * the next frame get the next frame from the empty
1192 			 * queue if no frame is available hold on to the
1193 			 * current buffer
1194 			 */
1195 			if (vpfe->cur_frm == vpfe->next_frm)
1196 				vpfe_schedule_next_buffer(vpfe);
1197 		}
1198 	} else if (fid == 0) {
1199 		/*
1200 		 * out of sync. Recover from any hardware out-of-sync.
1201 		 * May loose one frame
1202 		 */
1203 		vpfe->field = fid;
1204 	}
1205 }
1206 
1207 /*
1208  * vpfe_isr : ISR handler for vpfe capture (VINT0)
1209  * @irq: irq number
1210  * @dev_id: dev_id ptr
1211  *
1212  * It changes status of the captured buffer, takes next buffer from the queue
1213  * and sets its address in VPFE registers
1214  */
vpfe_isr(int irq,void * dev)1215 static irqreturn_t vpfe_isr(int irq, void *dev)
1216 {
1217 	struct vpfe_device *vpfe = (struct vpfe_device *)dev;
1218 	enum v4l2_field field = vpfe->fmt.fmt.pix.field;
1219 	int intr_status, stopping = vpfe->stopping;
1220 
1221 	intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
1222 
1223 	if (intr_status & VPFE_VDINT0) {
1224 		if (field == V4L2_FIELD_NONE) {
1225 			if (vpfe->cur_frm != vpfe->next_frm)
1226 				vpfe_process_buffer_complete(vpfe);
1227 		} else {
1228 			vpfe_handle_interlaced_irq(vpfe, field);
1229 		}
1230 		if (stopping) {
1231 			vpfe->stopping = false;
1232 			complete(&vpfe->capture_stop);
1233 		}
1234 	}
1235 
1236 	if (intr_status & VPFE_VDINT1 && !stopping) {
1237 		if (field == V4L2_FIELD_NONE &&
1238 		    vpfe->cur_frm == vpfe->next_frm)
1239 			vpfe_schedule_next_buffer(vpfe);
1240 	}
1241 
1242 	vpfe_clear_intr(&vpfe->ccdc, intr_status);
1243 
1244 	return IRQ_HANDLED;
1245 }
1246 
vpfe_detach_irq(struct vpfe_device * vpfe)1247 static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
1248 {
1249 	unsigned int intr = VPFE_VDINT0;
1250 	enum ccdc_frmfmt frame_format;
1251 
1252 	frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
1253 	if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
1254 		intr |= VPFE_VDINT1;
1255 
1256 	vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
1257 }
1258 
vpfe_attach_irq(struct vpfe_device * vpfe)1259 static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
1260 {
1261 	unsigned int intr = VPFE_VDINT0;
1262 	enum ccdc_frmfmt frame_format;
1263 
1264 	frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
1265 	if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
1266 		intr |= VPFE_VDINT1;
1267 
1268 	vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
1269 }
1270 
vpfe_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1271 static int vpfe_querycap(struct file *file, void  *priv,
1272 			 struct v4l2_capability *cap)
1273 {
1274 	struct vpfe_device *vpfe = video_drvdata(file);
1275 
1276 	strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
1277 	strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
1278 	snprintf(cap->bus_info, sizeof(cap->bus_info),
1279 			"platform:%s", vpfe->v4l2_dev.name);
1280 	return 0;
1281 }
1282 
1283 /* get the format set at output pad of the adjacent subdev */
__subdev_get_format(struct vpfe_device * vpfe,struct v4l2_mbus_framefmt * fmt)1284 static int __subdev_get_format(struct vpfe_device *vpfe,
1285 			       struct v4l2_mbus_framefmt *fmt)
1286 {
1287 	struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1288 	struct v4l2_subdev_format sd_fmt;
1289 	struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
1290 	int ret;
1291 
1292 	sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1293 	sd_fmt.pad = 0;
1294 
1295 	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
1296 	if (ret)
1297 		return ret;
1298 
1299 	*fmt = *mbus_fmt;
1300 
1301 	vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__,
1302 		 fmt->width, fmt->height, fmt->code);
1303 
1304 	return 0;
1305 }
1306 
1307 /* set the format at output pad of the adjacent subdev */
__subdev_set_format(struct vpfe_device * vpfe,struct v4l2_mbus_framefmt * fmt)1308 static int __subdev_set_format(struct vpfe_device *vpfe,
1309 			       struct v4l2_mbus_framefmt *fmt)
1310 {
1311 	struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1312 	struct v4l2_subdev_format sd_fmt;
1313 	struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
1314 	int ret;
1315 
1316 	sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1317 	sd_fmt.pad = 0;
1318 	*mbus_fmt = *fmt;
1319 
1320 	ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt);
1321 	if (ret)
1322 		return ret;
1323 
1324 	vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__,
1325 		 fmt->width, fmt->height, fmt->code);
1326 
1327 	return 0;
1328 }
1329 
vpfe_calc_format_size(struct vpfe_device * vpfe,const struct vpfe_fmt * fmt,struct v4l2_format * f)1330 static int vpfe_calc_format_size(struct vpfe_device *vpfe,
1331 				 const struct vpfe_fmt *fmt,
1332 				 struct v4l2_format *f)
1333 {
1334 	u32 bpp;
1335 
1336 	if (!fmt) {
1337 		vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n");
1338 		return -EINVAL;
1339 	}
1340 
1341 	bpp = __get_bytesperpixel(vpfe, fmt);
1342 
1343 	/* pitch should be 32 bytes aligned */
1344 	f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32);
1345 	f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
1346 			       f->fmt.pix.height;
1347 
1348 	vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
1349 		 __func__, print_fourcc(f->fmt.pix.pixelformat),
1350 		 f->fmt.pix.width, f->fmt.pix.height,
1351 		 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
1352 
1353 	return 0;
1354 }
1355 
vpfe_g_fmt(struct file * file,void * priv,struct v4l2_format * fmt)1356 static int vpfe_g_fmt(struct file *file, void *priv,
1357 		      struct v4l2_format *fmt)
1358 {
1359 	struct vpfe_device *vpfe = video_drvdata(file);
1360 
1361 	*fmt = vpfe->fmt;
1362 
1363 	return 0;
1364 }
1365 
vpfe_enum_fmt(struct file * file,void * priv,struct v4l2_fmtdesc * f)1366 static int vpfe_enum_fmt(struct file *file, void  *priv,
1367 			 struct v4l2_fmtdesc *f)
1368 {
1369 	struct vpfe_device *vpfe = video_drvdata(file);
1370 	struct vpfe_subdev_info *sdinfo;
1371 	struct vpfe_fmt *fmt;
1372 
1373 	sdinfo = vpfe->current_subdev;
1374 	if (!sdinfo->sd)
1375 		return -EINVAL;
1376 
1377 	if (f->index >= vpfe->num_active_fmt)
1378 		return -EINVAL;
1379 
1380 	fmt = vpfe->active_fmt[f->index];
1381 
1382 	f->pixelformat = fmt->fourcc;
1383 
1384 	vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n",
1385 		 __func__, f->index, fmt->code, print_fourcc(fmt->fourcc));
1386 
1387 	return 0;
1388 }
1389 
vpfe_try_fmt(struct file * file,void * priv,struct v4l2_format * f)1390 static int vpfe_try_fmt(struct file *file, void *priv,
1391 			struct v4l2_format *f)
1392 {
1393 	struct vpfe_device *vpfe = video_drvdata(file);
1394 	struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1395 	const struct vpfe_fmt *fmt;
1396 	struct v4l2_subdev_frame_size_enum fse;
1397 	int ret, found;
1398 
1399 	fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat);
1400 	if (!fmt) {
1401 		/* default to first entry */
1402 		vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
1403 			 f->fmt.pix.pixelformat);
1404 		fmt = vpfe->active_fmt[0];
1405 		f->fmt.pix.pixelformat = fmt->fourcc;
1406 	}
1407 
1408 	f->fmt.pix.field = vpfe->fmt.fmt.pix.field;
1409 
1410 	/* check for/find a valid width/height */
1411 	ret = 0;
1412 	found = false;
1413 	fse.pad = 0;
1414 	fse.code = fmt->code;
1415 	fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1416 	for (fse.index = 0; ; fse.index++) {
1417 		ret = v4l2_subdev_call(sd, pad, enum_frame_size,
1418 				       NULL, &fse);
1419 		if (ret)
1420 			break;
1421 
1422 		if (f->fmt.pix.width == fse.max_width &&
1423 		    f->fmt.pix.height == fse.max_height) {
1424 			found = true;
1425 			break;
1426 		} else if (f->fmt.pix.width >= fse.min_width &&
1427 			   f->fmt.pix.width <= fse.max_width &&
1428 			   f->fmt.pix.height >= fse.min_height &&
1429 			   f->fmt.pix.height <= fse.max_height) {
1430 			found = true;
1431 			break;
1432 		}
1433 	}
1434 
1435 	if (!found) {
1436 		/* use existing values as default */
1437 		f->fmt.pix.width = vpfe->fmt.fmt.pix.width;
1438 		f->fmt.pix.height =  vpfe->fmt.fmt.pix.height;
1439 	}
1440 
1441 	/*
1442 	 * Use current colorspace for now, it will get
1443 	 * updated properly during s_fmt
1444 	 */
1445 	f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace;
1446 	return vpfe_calc_format_size(vpfe, fmt, f);
1447 }
1448 
vpfe_s_fmt(struct file * file,void * priv,struct v4l2_format * fmt)1449 static int vpfe_s_fmt(struct file *file, void *priv,
1450 		      struct v4l2_format *fmt)
1451 {
1452 	struct vpfe_device *vpfe = video_drvdata(file);
1453 	struct vpfe_fmt *f;
1454 	struct v4l2_mbus_framefmt mbus_fmt;
1455 	int ret;
1456 
1457 	/* If streaming is started, return error */
1458 	if (vb2_is_busy(&vpfe->buffer_queue)) {
1459 		vpfe_err(vpfe, "%s device busy\n", __func__);
1460 		return -EBUSY;
1461 	}
1462 
1463 	ret = vpfe_try_fmt(file, priv, fmt);
1464 	if (ret < 0)
1465 		return ret;
1466 
1467 	f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat);
1468 
1469 	v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code);
1470 
1471 	ret = __subdev_set_format(vpfe, &mbus_fmt);
1472 	if (ret)
1473 		return ret;
1474 
1475 	/* Just double check nothing has gone wrong */
1476 	if (mbus_fmt.code != f->code) {
1477 		vpfe_dbg(3, vpfe,
1478 			 "%s subdev changed format on us, this should not happen\n",
1479 			 __func__);
1480 		return -EINVAL;
1481 	}
1482 
1483 	v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
1484 	vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1485 	vpfe->fmt.fmt.pix.pixelformat  = f->fourcc;
1486 	vpfe_calc_format_size(vpfe, f, &vpfe->fmt);
1487 	*fmt = vpfe->fmt;
1488 	vpfe->current_vpfe_fmt = f;
1489 
1490 	/* Update the crop window based on found values */
1491 	vpfe->crop.width = fmt->fmt.pix.width;
1492 	vpfe->crop.height = fmt->fmt.pix.height;
1493 
1494 	/* set image capture parameters in the ccdc */
1495 	return vpfe_config_ccdc_image_format(vpfe);
1496 }
1497 
vpfe_enum_size(struct file * file,void * priv,struct v4l2_frmsizeenum * fsize)1498 static int vpfe_enum_size(struct file *file, void  *priv,
1499 			  struct v4l2_frmsizeenum *fsize)
1500 {
1501 	struct vpfe_device *vpfe = video_drvdata(file);
1502 	struct v4l2_subdev_frame_size_enum fse = {
1503 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
1504 	};
1505 	struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1506 	struct vpfe_fmt *fmt;
1507 	int ret;
1508 
1509 	/* check for valid format */
1510 	fmt = find_format_by_pix(vpfe, fsize->pixel_format);
1511 	if (!fmt) {
1512 		vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n",
1513 			 fsize->pixel_format);
1514 		return -EINVAL;
1515 	}
1516 
1517 	memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
1518 
1519 	fse.index = fsize->index;
1520 	fse.pad = 0;
1521 	fse.code = fmt->code;
1522 	ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
1523 	if (ret)
1524 		return ret;
1525 
1526 	vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1527 		 __func__, fse.index, fse.code, fse.min_width, fse.max_width,
1528 		 fse.min_height, fse.max_height);
1529 
1530 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1531 	fsize->discrete.width = fse.max_width;
1532 	fsize->discrete.height = fse.max_height;
1533 
1534 	vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n",
1535 		 __func__, fsize->index, print_fourcc(fsize->pixel_format),
1536 		 fsize->discrete.width, fsize->discrete.height);
1537 
1538 	return 0;
1539 }
1540 
1541 /*
1542  * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
1543  * given app input index
1544  */
1545 static int
vpfe_get_subdev_input_index(struct vpfe_device * vpfe,int * subdev_index,int * subdev_input_index,int app_input_index)1546 vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
1547 			    int *subdev_index,
1548 			    int *subdev_input_index,
1549 			    int app_input_index)
1550 {
1551 	int i, j = 0;
1552 
1553 	for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
1554 		if (app_input_index < (j + 1)) {
1555 			*subdev_index = i;
1556 			*subdev_input_index = app_input_index - j;
1557 			return 0;
1558 		}
1559 		j++;
1560 	}
1561 	return -EINVAL;
1562 }
1563 
1564 /*
1565  * vpfe_get_app_input - Get app input index for a given subdev input index
1566  * driver stores the input index of the current sub device and translate it
1567  * when application request the current input
1568  */
vpfe_get_app_input_index(struct vpfe_device * vpfe,int * app_input_index)1569 static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
1570 				    int *app_input_index)
1571 {
1572 	struct vpfe_config *cfg = vpfe->cfg;
1573 	struct vpfe_subdev_info *sdinfo;
1574 	struct i2c_client *client;
1575 	struct i2c_client *curr_client;
1576 	int i, j = 0;
1577 
1578 	curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
1579 	for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
1580 		sdinfo = &cfg->sub_devs[i];
1581 		client = v4l2_get_subdevdata(sdinfo->sd);
1582 		if (client->addr == curr_client->addr &&
1583 		    client->adapter->nr == curr_client->adapter->nr) {
1584 			if (vpfe->current_input >= 1)
1585 				return -1;
1586 			*app_input_index = j + vpfe->current_input;
1587 			return 0;
1588 		}
1589 		j++;
1590 	}
1591 	return -EINVAL;
1592 }
1593 
vpfe_enum_input(struct file * file,void * priv,struct v4l2_input * inp)1594 static int vpfe_enum_input(struct file *file, void *priv,
1595 			   struct v4l2_input *inp)
1596 {
1597 	struct vpfe_device *vpfe = video_drvdata(file);
1598 	struct vpfe_subdev_info *sdinfo;
1599 	int subdev, index;
1600 
1601 	if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
1602 					inp->index) < 0) {
1603 		vpfe_dbg(1, vpfe,
1604 			"input information not found for the subdev\n");
1605 		return -EINVAL;
1606 	}
1607 	sdinfo = &vpfe->cfg->sub_devs[subdev];
1608 	*inp = sdinfo->inputs[index];
1609 
1610 	return 0;
1611 }
1612 
vpfe_g_input(struct file * file,void * priv,unsigned int * index)1613 static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
1614 {
1615 	struct vpfe_device *vpfe = video_drvdata(file);
1616 
1617 	return vpfe_get_app_input_index(vpfe, index);
1618 }
1619 
1620 /* Assumes caller is holding vpfe_dev->lock */
vpfe_set_input(struct vpfe_device * vpfe,unsigned int index)1621 static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
1622 {
1623 	int subdev_index = 0, inp_index = 0;
1624 	struct vpfe_subdev_info *sdinfo;
1625 	struct vpfe_route *route;
1626 	u32 input, output;
1627 	int ret;
1628 
1629 	/* If streaming is started, return error */
1630 	if (vb2_is_busy(&vpfe->buffer_queue)) {
1631 		vpfe_err(vpfe, "%s device busy\n", __func__);
1632 		return -EBUSY;
1633 	}
1634 	ret = vpfe_get_subdev_input_index(vpfe,
1635 					  &subdev_index,
1636 					  &inp_index,
1637 					  index);
1638 	if (ret < 0) {
1639 		vpfe_err(vpfe, "invalid input index: %d\n", index);
1640 		goto get_out;
1641 	}
1642 
1643 	sdinfo = &vpfe->cfg->sub_devs[subdev_index];
1644 	sdinfo->sd = vpfe->sd[subdev_index];
1645 	route = &sdinfo->routes[inp_index];
1646 	if (route && sdinfo->can_route) {
1647 		input = route->input;
1648 		output = route->output;
1649 		if (sdinfo->sd) {
1650 			ret = v4l2_subdev_call(sdinfo->sd, video,
1651 					s_routing, input, output, 0);
1652 			if (ret) {
1653 				vpfe_err(vpfe, "s_routing failed\n");
1654 				ret = -EINVAL;
1655 				goto get_out;
1656 			}
1657 		}
1658 
1659 	}
1660 
1661 	vpfe->current_subdev = sdinfo;
1662 	if (sdinfo->sd)
1663 		vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
1664 	vpfe->current_input = index;
1665 	vpfe->std_index = 0;
1666 
1667 	/* set the bus/interface parameter for the sub device in ccdc */
1668 	ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
1669 	if (ret)
1670 		return ret;
1671 
1672 	/* set the default image parameters in the device */
1673 	return vpfe_config_image_format(vpfe,
1674 					vpfe_standards[vpfe->std_index].std_id);
1675 
1676 get_out:
1677 	return ret;
1678 }
1679 
vpfe_s_input(struct file * file,void * priv,unsigned int index)1680 static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
1681 {
1682 	struct vpfe_device *vpfe = video_drvdata(file);
1683 
1684 	return vpfe_set_input(vpfe, index);
1685 }
1686 
vpfe_querystd(struct file * file,void * priv,v4l2_std_id * std_id)1687 static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
1688 {
1689 	struct vpfe_device *vpfe = video_drvdata(file);
1690 	struct vpfe_subdev_info *sdinfo;
1691 
1692 	sdinfo = vpfe->current_subdev;
1693 	if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
1694 		return -ENODATA;
1695 
1696 	/* Call querystd function of decoder device */
1697 	return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
1698 					 video, querystd, std_id);
1699 }
1700 
vpfe_s_std(struct file * file,void * priv,v4l2_std_id std_id)1701 static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
1702 {
1703 	struct vpfe_device *vpfe = video_drvdata(file);
1704 	struct vpfe_subdev_info *sdinfo;
1705 	int ret;
1706 
1707 	sdinfo = vpfe->current_subdev;
1708 	if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
1709 		return -ENODATA;
1710 
1711 	/* if trying to set the same std then nothing to do */
1712 	if (vpfe_standards[vpfe->std_index].std_id == std_id)
1713 		return 0;
1714 
1715 	/* If streaming is started, return error */
1716 	if (vb2_is_busy(&vpfe->buffer_queue)) {
1717 		vpfe_err(vpfe, "%s device busy\n", __func__);
1718 		ret = -EBUSY;
1719 		return ret;
1720 	}
1721 
1722 	ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
1723 					 video, s_std, std_id);
1724 	if (ret < 0) {
1725 		vpfe_err(vpfe, "Failed to set standard\n");
1726 		return ret;
1727 	}
1728 	ret = vpfe_config_image_format(vpfe, std_id);
1729 
1730 	return ret;
1731 }
1732 
vpfe_g_std(struct file * file,void * priv,v4l2_std_id * std_id)1733 static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
1734 {
1735 	struct vpfe_device *vpfe = video_drvdata(file);
1736 	struct vpfe_subdev_info *sdinfo;
1737 
1738 	sdinfo = vpfe->current_subdev;
1739 	if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
1740 		return -ENODATA;
1741 
1742 	*std_id = vpfe_standards[vpfe->std_index].std_id;
1743 
1744 	return 0;
1745 }
1746 
1747 /*
1748  * vpfe_calculate_offsets : This function calculates buffers offset
1749  * for top and bottom field
1750  */
vpfe_calculate_offsets(struct vpfe_device * vpfe)1751 static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
1752 {
1753 	struct v4l2_rect image_win;
1754 
1755 	vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
1756 	vpfe->field_off = image_win.height * image_win.width;
1757 }
1758 
1759 /*
1760  * vpfe_queue_setup - Callback function for buffer setup.
1761  * @vq: vb2_queue ptr
1762  * @nbuffers: ptr to number of buffers requested by application
1763  * @nplanes:: contains number of distinct video planes needed to hold a frame
1764  * @sizes[]: contains the size (in bytes) of each plane.
1765  * @alloc_devs: ptr to allocation context
1766  *
1767  * This callback function is called when reqbuf() is called to adjust
1768  * the buffer count and buffer size
1769  */
vpfe_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])1770 static int vpfe_queue_setup(struct vb2_queue *vq,
1771 			    unsigned int *nbuffers, unsigned int *nplanes,
1772 			    unsigned int sizes[], struct device *alloc_devs[])
1773 {
1774 	struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
1775 	unsigned size = vpfe->fmt.fmt.pix.sizeimage;
1776 
1777 	if (vq->num_buffers + *nbuffers < 3)
1778 		*nbuffers = 3 - vq->num_buffers;
1779 
1780 	if (*nplanes) {
1781 		if (sizes[0] < size)
1782 			return -EINVAL;
1783 		size = sizes[0];
1784 	}
1785 
1786 	*nplanes = 1;
1787 	sizes[0] = size;
1788 
1789 	vpfe_dbg(1, vpfe,
1790 		"nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
1791 
1792 	/* Calculate field offset */
1793 	vpfe_calculate_offsets(vpfe);
1794 
1795 	return 0;
1796 }
1797 
1798 /*
1799  * vpfe_buffer_prepare :  callback function for buffer prepare
1800  * @vb: ptr to vb2_buffer
1801  *
1802  * This is the callback function for buffer prepare when vb2_qbuf()
1803  * function is called. The buffer is prepared and user space virtual address
1804  * or user address is converted into  physical address
1805  */
vpfe_buffer_prepare(struct vb2_buffer * vb)1806 static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1807 {
1808 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1809 	struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
1810 
1811 	vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
1812 
1813 	if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
1814 		return -EINVAL;
1815 
1816 	vbuf->field = vpfe->fmt.fmt.pix.field;
1817 
1818 	return 0;
1819 }
1820 
1821 /*
1822  * vpfe_buffer_queue : Callback function to add buffer to DMA queue
1823  * @vb: ptr to vb2_buffer
1824  */
vpfe_buffer_queue(struct vb2_buffer * vb)1825 static void vpfe_buffer_queue(struct vb2_buffer *vb)
1826 {
1827 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1828 	struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
1829 	struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
1830 	unsigned long flags = 0;
1831 
1832 	/* add the buffer to the DMA queue */
1833 	spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
1834 	list_add_tail(&buf->list, &vpfe->dma_queue);
1835 	spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
1836 }
1837 
vpfe_return_all_buffers(struct vpfe_device * vpfe,enum vb2_buffer_state state)1838 static void vpfe_return_all_buffers(struct vpfe_device *vpfe,
1839 				    enum vb2_buffer_state state)
1840 {
1841 	struct vpfe_cap_buffer *buf, *node;
1842 	unsigned long flags;
1843 
1844 	spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
1845 	list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) {
1846 		vb2_buffer_done(&buf->vb.vb2_buf, state);
1847 		list_del(&buf->list);
1848 	}
1849 
1850 	if (vpfe->cur_frm)
1851 		vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state);
1852 
1853 	if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm)
1854 		vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state);
1855 
1856 	vpfe->cur_frm = NULL;
1857 	vpfe->next_frm = NULL;
1858 	spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
1859 }
1860 
1861 /*
1862  * vpfe_start_streaming : Starts the DMA engine for streaming
1863  * @vb: ptr to vb2_buffer
1864  * @count: number of buffers
1865  */
vpfe_start_streaming(struct vb2_queue * vq,unsigned int count)1866 static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1867 {
1868 	struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
1869 	struct vpfe_subdev_info *sdinfo;
1870 	unsigned long flags;
1871 	unsigned long addr;
1872 	int ret;
1873 
1874 	spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
1875 
1876 	vpfe->field = 0;
1877 	vpfe->sequence = 0;
1878 
1879 	sdinfo = vpfe->current_subdev;
1880 
1881 	vpfe_attach_irq(vpfe);
1882 
1883 	vpfe->stopping = false;
1884 	init_completion(&vpfe->capture_stop);
1885 
1886 	if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
1887 		vpfe_ccdc_config_raw(&vpfe->ccdc);
1888 	else
1889 		vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
1890 
1891 	/* Get the next frame from the buffer queue */
1892 	vpfe->next_frm = list_entry(vpfe->dma_queue.next,
1893 				    struct vpfe_cap_buffer, list);
1894 	vpfe->cur_frm = vpfe->next_frm;
1895 	/* Remove buffer from the buffer queue */
1896 	list_del(&vpfe->cur_frm->list);
1897 	spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
1898 
1899 	addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
1900 
1901 	vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
1902 
1903 	vpfe_pcr_enable(&vpfe->ccdc, 1);
1904 
1905 	ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
1906 	if (ret < 0) {
1907 		vpfe_err(vpfe, "Error in attaching interrupt handle\n");
1908 		goto err;
1909 	}
1910 
1911 	return 0;
1912 
1913 err:
1914 	vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED);
1915 	vpfe_pcr_enable(&vpfe->ccdc, 0);
1916 	return ret;
1917 }
1918 
1919 /*
1920  * vpfe_stop_streaming : Stop the DMA engine
1921  * @vq: ptr to vb2_queue
1922  *
1923  * This callback stops the DMA engine and any remaining buffers
1924  * in the DMA queue are released.
1925  */
vpfe_stop_streaming(struct vb2_queue * vq)1926 static void vpfe_stop_streaming(struct vb2_queue *vq)
1927 {
1928 	struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
1929 	struct vpfe_subdev_info *sdinfo;
1930 	int ret;
1931 
1932 	vpfe_pcr_enable(&vpfe->ccdc, 0);
1933 
1934 	/* Wait for the last frame to be captured */
1935 	vpfe->stopping = true;
1936 	wait_for_completion_timeout(&vpfe->capture_stop,
1937 				    msecs_to_jiffies(250));
1938 
1939 	vpfe_detach_irq(vpfe);
1940 
1941 	sdinfo = vpfe->current_subdev;
1942 	ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
1943 	if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
1944 		vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
1945 
1946 	/* release all active buffers */
1947 	vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR);
1948 }
1949 
vpfe_g_pixelaspect(struct file * file,void * priv,int type,struct v4l2_fract * f)1950 static int vpfe_g_pixelaspect(struct file *file, void *priv,
1951 			      int type, struct v4l2_fract *f)
1952 {
1953 	struct vpfe_device *vpfe = video_drvdata(file);
1954 
1955 	if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1956 	    vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
1957 		return -EINVAL;
1958 
1959 	*f = vpfe_standards[vpfe->std_index].pixelaspect;
1960 
1961 	return 0;
1962 }
1963 
1964 static int
vpfe_g_selection(struct file * file,void * fh,struct v4l2_selection * s)1965 vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
1966 {
1967 	struct vpfe_device *vpfe = video_drvdata(file);
1968 
1969 	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1970 	    vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
1971 		return -EINVAL;
1972 
1973 	switch (s->target) {
1974 	case V4L2_SEL_TGT_CROP_BOUNDS:
1975 	case V4L2_SEL_TGT_CROP_DEFAULT:
1976 		s->r.left = 0;
1977 		s->r.top = 0;
1978 		s->r.width = vpfe_standards[vpfe->std_index].width;
1979 		s->r.height = vpfe_standards[vpfe->std_index].height;
1980 		break;
1981 
1982 	case V4L2_SEL_TGT_CROP:
1983 		s->r = vpfe->crop;
1984 		break;
1985 
1986 	default:
1987 		return -EINVAL;
1988 	}
1989 
1990 	return 0;
1991 }
1992 
1993 static int
vpfe_s_selection(struct file * file,void * fh,struct v4l2_selection * s)1994 vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
1995 {
1996 	struct vpfe_device *vpfe = video_drvdata(file);
1997 	struct v4l2_rect cr = vpfe->crop;
1998 	struct v4l2_rect r = s->r;
1999 	u32 bpp;
2000 
2001 	/* If streaming is started, return error */
2002 	if (vb2_is_busy(&vpfe->buffer_queue)) {
2003 		vpfe_err(vpfe, "%s device busy\n", __func__);
2004 		return -EBUSY;
2005 	}
2006 
2007 	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
2008 			s->target != V4L2_SEL_TGT_CROP)
2009 		return -EINVAL;
2010 
2011 	v4l_bound_align_image(&r.width, 0, cr.width, 0,
2012 			      &r.height, 0, cr.height, 0, 0);
2013 
2014 	r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
2015 	r.top  = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
2016 
2017 	if (s->flags & V4L2_SEL_FLAG_LE && !v4l2_rect_enclosed(&r, &s->r))
2018 		return -ERANGE;
2019 
2020 	if (s->flags & V4L2_SEL_FLAG_GE && !v4l2_rect_enclosed(&s->r, &r))
2021 		return -ERANGE;
2022 
2023 	s->r = vpfe->crop = r;
2024 
2025 	bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
2026 	vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp);
2027 	vpfe->fmt.fmt.pix.width = r.width;
2028 	vpfe->fmt.fmt.pix.height = r.height;
2029 	vpfe->fmt.fmt.pix.bytesperline =
2030 		vpfe_ccdc_get_line_length(&vpfe->ccdc);
2031 	vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
2032 						vpfe->fmt.fmt.pix.height;
2033 
2034 	vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
2035 		 r.left, r.top, r.width, r.height, cr.width, cr.height);
2036 
2037 	return 0;
2038 }
2039 
vpfe_ioctl_default(struct file * file,void * priv,bool valid_prio,unsigned int cmd,void * param)2040 static long vpfe_ioctl_default(struct file *file, void *priv,
2041 			       bool valid_prio, unsigned int cmd, void *param)
2042 {
2043 	struct vpfe_device *vpfe = video_drvdata(file);
2044 	int ret;
2045 
2046 	if (!valid_prio) {
2047 		vpfe_err(vpfe, "%s device busy\n", __func__);
2048 		return -EBUSY;
2049 	}
2050 
2051 	/* If streaming is started, return error */
2052 	if (vb2_is_busy(&vpfe->buffer_queue)) {
2053 		vpfe_err(vpfe, "%s device busy\n", __func__);
2054 		return -EBUSY;
2055 	}
2056 
2057 	switch (cmd) {
2058 	case VIDIOC_AM437X_CCDC_CFG:
2059 		ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
2060 		if (ret) {
2061 			vpfe_dbg(2, vpfe,
2062 				"Error setting parameters in CCDC\n");
2063 			return ret;
2064 		}
2065 		ret = vpfe_get_ccdc_image_format(vpfe,
2066 						 &vpfe->fmt);
2067 		if (ret < 0) {
2068 			vpfe_dbg(2, vpfe,
2069 				"Invalid image format at CCDC\n");
2070 			return ret;
2071 		}
2072 		break;
2073 
2074 	default:
2075 		ret = -ENOTTY;
2076 		break;
2077 	}
2078 
2079 	return ret;
2080 }
2081 
2082 static const struct vb2_ops vpfe_video_qops = {
2083 	.wait_prepare		= vb2_ops_wait_prepare,
2084 	.wait_finish		= vb2_ops_wait_finish,
2085 	.queue_setup		= vpfe_queue_setup,
2086 	.buf_prepare		= vpfe_buffer_prepare,
2087 	.buf_queue		= vpfe_buffer_queue,
2088 	.start_streaming	= vpfe_start_streaming,
2089 	.stop_streaming		= vpfe_stop_streaming,
2090 };
2091 
2092 /* vpfe capture driver file operations */
2093 static const struct v4l2_file_operations vpfe_fops = {
2094 	.owner		= THIS_MODULE,
2095 	.open		= vpfe_open,
2096 	.release	= vpfe_release,
2097 	.read		= vb2_fop_read,
2098 	.poll		= vb2_fop_poll,
2099 	.unlocked_ioctl	= video_ioctl2,
2100 	.mmap		= vb2_fop_mmap,
2101 };
2102 
2103 /* vpfe capture ioctl operations */
2104 static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
2105 	.vidioc_querycap		= vpfe_querycap,
2106 	.vidioc_enum_fmt_vid_cap	= vpfe_enum_fmt,
2107 	.vidioc_g_fmt_vid_cap		= vpfe_g_fmt,
2108 	.vidioc_s_fmt_vid_cap		= vpfe_s_fmt,
2109 	.vidioc_try_fmt_vid_cap		= vpfe_try_fmt,
2110 
2111 	.vidioc_enum_framesizes		= vpfe_enum_size,
2112 
2113 	.vidioc_enum_input		= vpfe_enum_input,
2114 	.vidioc_g_input			= vpfe_g_input,
2115 	.vidioc_s_input			= vpfe_s_input,
2116 
2117 	.vidioc_querystd		= vpfe_querystd,
2118 	.vidioc_s_std			= vpfe_s_std,
2119 	.vidioc_g_std			= vpfe_g_std,
2120 
2121 	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
2122 	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
2123 	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
2124 	.vidioc_querybuf		= vb2_ioctl_querybuf,
2125 	.vidioc_qbuf			= vb2_ioctl_qbuf,
2126 	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
2127 	.vidioc_expbuf			= vb2_ioctl_expbuf,
2128 	.vidioc_streamon		= vb2_ioctl_streamon,
2129 	.vidioc_streamoff		= vb2_ioctl_streamoff,
2130 
2131 	.vidioc_log_status		= v4l2_ctrl_log_status,
2132 	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
2133 	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
2134 
2135 	.vidioc_g_pixelaspect		= vpfe_g_pixelaspect,
2136 	.vidioc_g_selection		= vpfe_g_selection,
2137 	.vidioc_s_selection		= vpfe_s_selection,
2138 
2139 	.vidioc_default			= vpfe_ioctl_default,
2140 };
2141 
2142 static int
vpfe_async_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)2143 vpfe_async_bound(struct v4l2_async_notifier *notifier,
2144 		 struct v4l2_subdev *subdev,
2145 		 struct v4l2_async_subdev *asd)
2146 {
2147 	struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
2148 					       struct vpfe_device, v4l2_dev);
2149 	struct vpfe_subdev_info *sdinfo;
2150 	struct vpfe_fmt *fmt;
2151 	int ret = 0;
2152 	bool found = false;
2153 	int i, j, k;
2154 
2155 	for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
2156 		if (vpfe->cfg->asd[i]->match.fwnode ==
2157 		    asd[i].match.fwnode) {
2158 			sdinfo = &vpfe->cfg->sub_devs[i];
2159 			vpfe->sd[i] = subdev;
2160 			vpfe->sd[i]->grp_id = sdinfo->grp_id;
2161 			found = true;
2162 			break;
2163 		}
2164 	}
2165 
2166 	if (!found) {
2167 		vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
2168 		return -EINVAL;
2169 	}
2170 
2171 	vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
2172 
2173 	vpfe->num_active_fmt = 0;
2174 	for (j = 0, i = 0; (ret != -EINVAL); ++j) {
2175 		struct v4l2_subdev_mbus_code_enum mbus_code = {
2176 			.index = j,
2177 			.which = V4L2_SUBDEV_FORMAT_ACTIVE,
2178 		};
2179 
2180 		ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
2181 				       NULL, &mbus_code);
2182 		if (ret)
2183 			continue;
2184 
2185 		vpfe_dbg(3, vpfe,
2186 			 "subdev %s: code: %04x idx: %d\n",
2187 			 subdev->name, mbus_code.code, j);
2188 
2189 		for (k = 0; k < ARRAY_SIZE(formats); k++) {
2190 			fmt = &formats[k];
2191 			if (mbus_code.code != fmt->code)
2192 				continue;
2193 			vpfe->active_fmt[i] = fmt;
2194 			vpfe_dbg(3, vpfe,
2195 				 "matched fourcc: %s code: %04x idx: %d\n",
2196 				 print_fourcc(fmt->fourcc), mbus_code.code, i);
2197 			vpfe->num_active_fmt = ++i;
2198 		}
2199 	}
2200 
2201 	if (!i) {
2202 		vpfe_err(vpfe, "No suitable format reported by subdev %s\n",
2203 			 subdev->name);
2204 		return -EINVAL;
2205 	}
2206 	return 0;
2207 }
2208 
vpfe_probe_complete(struct vpfe_device * vpfe)2209 static int vpfe_probe_complete(struct vpfe_device *vpfe)
2210 {
2211 	struct video_device *vdev;
2212 	struct vb2_queue *q;
2213 	int err;
2214 
2215 	spin_lock_init(&vpfe->dma_queue_lock);
2216 	mutex_init(&vpfe->lock);
2217 
2218 	vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
2219 
2220 	/* set first sub device as current one */
2221 	vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
2222 	vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
2223 
2224 	err = vpfe_set_input(vpfe, 0);
2225 	if (err)
2226 		goto probe_out;
2227 
2228 	/* Initialize videobuf2 queue as per the buffer type */
2229 	q = &vpfe->buffer_queue;
2230 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
2231 	q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
2232 	q->drv_priv = vpfe;
2233 	q->ops = &vpfe_video_qops;
2234 	q->mem_ops = &vb2_dma_contig_memops;
2235 	q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
2236 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
2237 	q->lock = &vpfe->lock;
2238 	q->min_buffers_needed = 1;
2239 	q->dev = vpfe->pdev;
2240 
2241 	err = vb2_queue_init(q);
2242 	if (err) {
2243 		vpfe_err(vpfe, "vb2_queue_init() failed\n");
2244 		goto probe_out;
2245 	}
2246 
2247 	INIT_LIST_HEAD(&vpfe->dma_queue);
2248 
2249 	vdev = &vpfe->video_dev;
2250 	strscpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
2251 	vdev->release = video_device_release_empty;
2252 	vdev->fops = &vpfe_fops;
2253 	vdev->ioctl_ops = &vpfe_ioctl_ops;
2254 	vdev->v4l2_dev = &vpfe->v4l2_dev;
2255 	vdev->vfl_dir = VFL_DIR_RX;
2256 	vdev->queue = q;
2257 	vdev->lock = &vpfe->lock;
2258 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
2259 			    V4L2_CAP_READWRITE;
2260 	video_set_drvdata(vdev, vpfe);
2261 	err = video_register_device(&vpfe->video_dev, VFL_TYPE_VIDEO, -1);
2262 	if (err) {
2263 		vpfe_err(vpfe,
2264 			"Unable to register video device.\n");
2265 		goto probe_out;
2266 	}
2267 
2268 	return 0;
2269 
2270 probe_out:
2271 	v4l2_device_unregister(&vpfe->v4l2_dev);
2272 	return err;
2273 }
2274 
vpfe_async_complete(struct v4l2_async_notifier * notifier)2275 static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
2276 {
2277 	struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
2278 					struct vpfe_device, v4l2_dev);
2279 
2280 	return vpfe_probe_complete(vpfe);
2281 }
2282 
2283 static const struct v4l2_async_notifier_operations vpfe_async_ops = {
2284 	.bound = vpfe_async_bound,
2285 	.complete = vpfe_async_complete,
2286 };
2287 
2288 static struct vpfe_config *
vpfe_get_pdata(struct vpfe_device * vpfe)2289 vpfe_get_pdata(struct vpfe_device *vpfe)
2290 {
2291 	struct device_node *endpoint = NULL;
2292 	struct device *dev = vpfe->pdev;
2293 	struct vpfe_subdev_info *sdinfo;
2294 	struct vpfe_config *pdata;
2295 	unsigned int flags;
2296 	unsigned int i;
2297 	int err;
2298 
2299 	dev_dbg(dev, "vpfe_get_pdata\n");
2300 
2301 	v4l2_async_notifier_init(&vpfe->notifier);
2302 
2303 	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
2304 		return dev->platform_data;
2305 
2306 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2307 	if (!pdata)
2308 		return NULL;
2309 
2310 	for (i = 0; ; i++) {
2311 		struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
2312 		struct device_node *rem;
2313 
2314 		endpoint = of_graph_get_next_endpoint(dev->of_node, endpoint);
2315 		if (!endpoint)
2316 			break;
2317 
2318 		sdinfo = &pdata->sub_devs[i];
2319 		sdinfo->grp_id = 0;
2320 
2321 		/* we only support camera */
2322 		sdinfo->inputs[0].index = i;
2323 		strscpy(sdinfo->inputs[0].name, "Camera",
2324 			sizeof(sdinfo->inputs[0].name));
2325 		sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
2326 		sdinfo->inputs[0].std = V4L2_STD_ALL;
2327 		sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
2328 
2329 		sdinfo->can_route = 0;
2330 		sdinfo->routes = NULL;
2331 
2332 		of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
2333 				     &sdinfo->vpfe_param.if_type);
2334 		if (sdinfo->vpfe_param.if_type < 0 ||
2335 			sdinfo->vpfe_param.if_type > 4) {
2336 			sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
2337 		}
2338 
2339 		err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
2340 						 &bus_cfg);
2341 		if (err) {
2342 			dev_err(dev, "Could not parse the endpoint\n");
2343 			goto cleanup;
2344 		}
2345 
2346 		sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
2347 
2348 		if (sdinfo->vpfe_param.bus_width < 8 ||
2349 			sdinfo->vpfe_param.bus_width > 16) {
2350 			dev_err(dev, "Invalid bus width.\n");
2351 			goto cleanup;
2352 		}
2353 
2354 		flags = bus_cfg.bus.parallel.flags;
2355 
2356 		if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
2357 			sdinfo->vpfe_param.hdpol = 1;
2358 
2359 		if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
2360 			sdinfo->vpfe_param.vdpol = 1;
2361 
2362 		rem = of_graph_get_remote_port_parent(endpoint);
2363 		if (!rem) {
2364 			dev_err(dev, "Remote device at %pOF not found\n",
2365 				endpoint);
2366 			goto cleanup;
2367 		}
2368 
2369 		pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
2370 			&vpfe->notifier, of_fwnode_handle(rem),
2371 			struct v4l2_async_subdev);
2372 		of_node_put(rem);
2373 		if (IS_ERR(pdata->asd[i]))
2374 			goto cleanup;
2375 	}
2376 
2377 	of_node_put(endpoint);
2378 	return pdata;
2379 
2380 cleanup:
2381 	v4l2_async_notifier_cleanup(&vpfe->notifier);
2382 	of_node_put(endpoint);
2383 	return NULL;
2384 }
2385 
2386 /*
2387  * vpfe_probe : This function creates device entries by register
2388  * itself to the V4L2 driver and initializes fields of each
2389  * device objects
2390  */
vpfe_probe(struct platform_device * pdev)2391 static int vpfe_probe(struct platform_device *pdev)
2392 {
2393 	struct vpfe_config *vpfe_cfg;
2394 	struct vpfe_device *vpfe;
2395 	struct vpfe_ccdc *ccdc;
2396 	struct resource	*res;
2397 	int ret;
2398 
2399 	vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
2400 	if (!vpfe)
2401 		return -ENOMEM;
2402 
2403 	vpfe->pdev = &pdev->dev;
2404 
2405 	vpfe_cfg = vpfe_get_pdata(vpfe);
2406 	if (!vpfe_cfg) {
2407 		dev_err(&pdev->dev, "No platform data\n");
2408 		return -EINVAL;
2409 	}
2410 
2411 	vpfe->cfg = vpfe_cfg;
2412 	ccdc = &vpfe->ccdc;
2413 
2414 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2415 	ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
2416 	if (IS_ERR(ccdc->ccdc_cfg.base_addr)) {
2417 		ret = PTR_ERR(ccdc->ccdc_cfg.base_addr);
2418 		goto probe_out_cleanup;
2419 	}
2420 
2421 	ret = platform_get_irq(pdev, 0);
2422 	if (ret <= 0) {
2423 		ret = -ENODEV;
2424 		goto probe_out_cleanup;
2425 	}
2426 	vpfe->irq = ret;
2427 
2428 	ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
2429 			       "vpfe_capture0", vpfe);
2430 	if (ret) {
2431 		dev_err(&pdev->dev, "Unable to request interrupt\n");
2432 		ret = -EINVAL;
2433 		goto probe_out_cleanup;
2434 	}
2435 
2436 	ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
2437 	if (ret) {
2438 		vpfe_err(vpfe,
2439 			"Unable to register v4l2 device.\n");
2440 		goto probe_out_cleanup;
2441 	}
2442 
2443 	/* set the driver data in platform device */
2444 	platform_set_drvdata(pdev, vpfe);
2445 	/* Enabling module functional clock */
2446 	pm_runtime_enable(&pdev->dev);
2447 
2448 	/* for now just enable it here instead of waiting for the open */
2449 	ret = pm_runtime_resume_and_get(&pdev->dev);
2450 	if (ret < 0) {
2451 		vpfe_err(vpfe, "Unable to resume device.\n");
2452 		goto probe_out_v4l2_unregister;
2453 	}
2454 
2455 	vpfe_ccdc_config_defaults(ccdc);
2456 
2457 	pm_runtime_put_sync(&pdev->dev);
2458 
2459 	vpfe->sd = devm_kcalloc(&pdev->dev,
2460 				ARRAY_SIZE(vpfe->cfg->asd),
2461 				sizeof(struct v4l2_subdev *),
2462 				GFP_KERNEL);
2463 	if (!vpfe->sd) {
2464 		ret = -ENOMEM;
2465 		goto probe_out_v4l2_unregister;
2466 	}
2467 
2468 	vpfe->notifier.ops = &vpfe_async_ops;
2469 	ret = v4l2_async_notifier_register(&vpfe->v4l2_dev, &vpfe->notifier);
2470 	if (ret) {
2471 		vpfe_err(vpfe, "Error registering async notifier\n");
2472 		ret = -EINVAL;
2473 		goto probe_out_v4l2_unregister;
2474 	}
2475 
2476 	return 0;
2477 
2478 probe_out_v4l2_unregister:
2479 	v4l2_device_unregister(&vpfe->v4l2_dev);
2480 probe_out_cleanup:
2481 	v4l2_async_notifier_cleanup(&vpfe->notifier);
2482 	return ret;
2483 }
2484 
2485 /*
2486  * vpfe_remove : It un-register device from V4L2 driver
2487  */
vpfe_remove(struct platform_device * pdev)2488 static int vpfe_remove(struct platform_device *pdev)
2489 {
2490 	struct vpfe_device *vpfe = platform_get_drvdata(pdev);
2491 
2492 	pm_runtime_disable(&pdev->dev);
2493 
2494 	v4l2_async_notifier_unregister(&vpfe->notifier);
2495 	v4l2_async_notifier_cleanup(&vpfe->notifier);
2496 	v4l2_device_unregister(&vpfe->v4l2_dev);
2497 	video_unregister_device(&vpfe->video_dev);
2498 
2499 	return 0;
2500 }
2501 
2502 #ifdef CONFIG_PM_SLEEP
2503 
vpfe_save_context(struct vpfe_ccdc * ccdc)2504 static void vpfe_save_context(struct vpfe_ccdc *ccdc)
2505 {
2506 	ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
2507 	ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
2508 	ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
2509 	ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
2510 	ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
2511 	ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
2512 	ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
2513 	ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
2514 	ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
2515 	ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
2516 	ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
2517 	ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
2518 	ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
2519 	ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
2520 							    VPFE_HD_VD_WID);
2521 	ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
2522 							    VPFE_PIX_LINES);
2523 	ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
2524 							    VPFE_HORZ_INFO);
2525 	ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
2526 							     VPFE_VERT_START);
2527 	ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
2528 							     VPFE_VERT_LINES);
2529 	ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
2530 							    VPFE_HSIZE_OFF);
2531 }
2532 
vpfe_suspend(struct device * dev)2533 static int vpfe_suspend(struct device *dev)
2534 {
2535 	struct vpfe_device *vpfe = dev_get_drvdata(dev);
2536 	struct vpfe_ccdc *ccdc = &vpfe->ccdc;
2537 
2538 	/* only do full suspend if streaming has started */
2539 	if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
2540 		/*
2541 		 * ignore RPM resume errors here, as it is already too late.
2542 		 * A check like that should happen earlier, either at
2543 		 * open() or just before start streaming.
2544 		 */
2545 		pm_runtime_get_sync(dev);
2546 		vpfe_config_enable(ccdc, 1);
2547 
2548 		/* Save VPFE context */
2549 		vpfe_save_context(ccdc);
2550 
2551 		/* Disable CCDC */
2552 		vpfe_pcr_enable(ccdc, 0);
2553 		vpfe_config_enable(ccdc, 0);
2554 
2555 		/* Disable both master and slave clock */
2556 		pm_runtime_put_sync(dev);
2557 	}
2558 
2559 	/* Select sleep pin state */
2560 	pinctrl_pm_select_sleep_state(dev);
2561 
2562 	return 0;
2563 }
2564 
vpfe_restore_context(struct vpfe_ccdc * ccdc)2565 static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
2566 {
2567 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
2568 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
2569 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
2570 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
2571 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
2572 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
2573 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
2574 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
2575 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
2576 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
2577 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
2578 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
2579 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
2580 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
2581 						VPFE_HD_VD_WID);
2582 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
2583 						VPFE_PIX_LINES);
2584 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
2585 						VPFE_HORZ_INFO);
2586 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
2587 						VPFE_VERT_START);
2588 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
2589 						VPFE_VERT_LINES);
2590 	vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
2591 						VPFE_HSIZE_OFF);
2592 }
2593 
vpfe_resume(struct device * dev)2594 static int vpfe_resume(struct device *dev)
2595 {
2596 	struct vpfe_device *vpfe = dev_get_drvdata(dev);
2597 	struct vpfe_ccdc *ccdc = &vpfe->ccdc;
2598 
2599 	/* only do full resume if streaming has started */
2600 	if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
2601 		/* Enable both master and slave clock */
2602 		pm_runtime_get_sync(dev);
2603 		vpfe_config_enable(ccdc, 1);
2604 
2605 		/* Restore VPFE context */
2606 		vpfe_restore_context(ccdc);
2607 
2608 		vpfe_config_enable(ccdc, 0);
2609 		pm_runtime_put_sync(dev);
2610 	}
2611 
2612 	/* Select default pin state */
2613 	pinctrl_pm_select_default_state(dev);
2614 
2615 	return 0;
2616 }
2617 
2618 #endif
2619 
2620 static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
2621 
2622 static const struct of_device_id vpfe_of_match[] = {
2623 	{ .compatible = "ti,am437x-vpfe", },
2624 	{ /* sentinel */ },
2625 };
2626 MODULE_DEVICE_TABLE(of, vpfe_of_match);
2627 
2628 static struct platform_driver vpfe_driver = {
2629 	.probe		= vpfe_probe,
2630 	.remove		= vpfe_remove,
2631 	.driver = {
2632 		.name	= VPFE_MODULE_NAME,
2633 		.pm	= &vpfe_pm_ops,
2634 		.of_match_table = of_match_ptr(vpfe_of_match),
2635 	},
2636 };
2637 
2638 module_platform_driver(vpfe_driver);
2639 
2640 MODULE_AUTHOR("Texas Instruments");
2641 MODULE_DESCRIPTION("TI AM437x VPFE driver");
2642 MODULE_LICENSE("GPL");
2643 MODULE_VERSION(VPFE_VERSION);
2644