• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
4  *
5  * Copyright (c) 2013 Texas Instruments Inc.
6  * David Griego, <dagriego@biglakesoftware.com>
7  * Dale Farnsworth, <dale@farnsworth.org>
8  * Archit Taneja, <archit@ti.com>
9  *
10  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
11  * Pawel Osciak, <pawel@osciak.com>
12  * Marek Szyprowski, <m.szyprowski@samsung.com>
13  *
14  * Based on the virtual v4l2-mem2mem example device
15  */
16 
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/ioctl.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/videodev2.h>
31 #include <linux/log2.h>
32 #include <linux/sizes.h>
33 
34 #include <media/v4l2-common.h>
35 #include <media/v4l2-ctrls.h>
36 #include <media/v4l2-device.h>
37 #include <media/v4l2-event.h>
38 #include <media/v4l2-ioctl.h>
39 #include <media/v4l2-mem2mem.h>
40 #include <media/videobuf2-v4l2.h>
41 #include <media/videobuf2-dma-contig.h>
42 
43 #include "vpdma.h"
44 #include "vpdma_priv.h"
45 #include "vpe_regs.h"
46 #include "sc.h"
47 #include "csc.h"
48 
49 #define VPE_MODULE_NAME "vpe"
50 
51 /* minimum and maximum frame sizes */
52 #define MIN_W		32
53 #define MIN_H		32
54 #define MAX_W		2048
55 #define MAX_H		2048
56 
57 /* required alignments */
58 #define S_ALIGN		0	/* multiple of 1 */
59 #define H_ALIGN		1	/* multiple of 2 */
60 
61 /* flags that indicate a format can be used for capture/output */
62 #define VPE_FMT_TYPE_CAPTURE	(1 << 0)
63 #define VPE_FMT_TYPE_OUTPUT	(1 << 1)
64 
65 /* used as plane indices */
66 #define VPE_MAX_PLANES	2
67 #define VPE_LUMA	0
68 #define VPE_CHROMA	1
69 
70 /* per m2m context info */
71 #define VPE_MAX_SRC_BUFS	3	/* need 3 src fields to de-interlace */
72 
73 #define VPE_DEF_BUFS_PER_JOB	1	/* default one buffer per batch job */
74 
75 /*
76  * each VPE context can need up to 3 config descriptors, 7 input descriptors,
77  * 3 output descriptors, and 10 control descriptors
78  */
79 #define VPE_DESC_LIST_SIZE	(10 * VPDMA_DTD_DESC_SIZE +	\
80 					13 * VPDMA_CFD_CTD_DESC_SIZE)
81 
82 #define vpe_dbg(vpedev, fmt, arg...)	\
83 		dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
84 #define vpe_err(vpedev, fmt, arg...)	\
85 		dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
86 
87 struct vpe_us_coeffs {
88 	unsigned short	anchor_fid0_c0;
89 	unsigned short	anchor_fid0_c1;
90 	unsigned short	anchor_fid0_c2;
91 	unsigned short	anchor_fid0_c3;
92 	unsigned short	interp_fid0_c0;
93 	unsigned short	interp_fid0_c1;
94 	unsigned short	interp_fid0_c2;
95 	unsigned short	interp_fid0_c3;
96 	unsigned short	anchor_fid1_c0;
97 	unsigned short	anchor_fid1_c1;
98 	unsigned short	anchor_fid1_c2;
99 	unsigned short	anchor_fid1_c3;
100 	unsigned short	interp_fid1_c0;
101 	unsigned short	interp_fid1_c1;
102 	unsigned short	interp_fid1_c2;
103 	unsigned short	interp_fid1_c3;
104 };
105 
106 /*
107  * Default upsampler coefficients
108  */
109 static const struct vpe_us_coeffs us_coeffs[] = {
110 	{
111 		/* Coefficients for progressive input */
112 		0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
113 		0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
114 	},
115 	{
116 		/* Coefficients for Top Field Interlaced input */
117 		0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
118 		/* Coefficients for Bottom Field Interlaced input */
119 		0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
120 	},
121 };
122 
123 /*
124  * the following registers are for configuring some of the parameters of the
125  * motion and edge detection blocks inside DEI, these generally remain the same,
126  * these could be passed later via userspace if some one needs to tweak these.
127  */
128 struct vpe_dei_regs {
129 	unsigned long mdt_spacial_freq_thr_reg;		/* VPE_DEI_REG2 */
130 	unsigned long edi_config_reg;			/* VPE_DEI_REG3 */
131 	unsigned long edi_lut_reg0;			/* VPE_DEI_REG4 */
132 	unsigned long edi_lut_reg1;			/* VPE_DEI_REG5 */
133 	unsigned long edi_lut_reg2;			/* VPE_DEI_REG6 */
134 	unsigned long edi_lut_reg3;			/* VPE_DEI_REG7 */
135 };
136 
137 /*
138  * default expert DEI register values, unlikely to be modified.
139  */
140 static const struct vpe_dei_regs dei_regs = {
141 	.mdt_spacial_freq_thr_reg = 0x020C0804u,
142 	.edi_config_reg = 0x0118100Cu,
143 	.edi_lut_reg0 = 0x08040200u,
144 	.edi_lut_reg1 = 0x1010100Cu,
145 	.edi_lut_reg2 = 0x10101010u,
146 	.edi_lut_reg3 = 0x10101010u,
147 };
148 
149 /*
150  * The port_data structure contains per-port data.
151  */
152 struct vpe_port_data {
153 	enum vpdma_channel channel;	/* VPDMA channel */
154 	u8	vb_index;		/* input frame f, f-1, f-2 index */
155 	u8	vb_part;		/* plane index for co-panar formats */
156 };
157 
158 /*
159  * Define indices into the port_data tables
160  */
161 #define VPE_PORT_LUMA1_IN	0
162 #define VPE_PORT_CHROMA1_IN	1
163 #define VPE_PORT_LUMA2_IN	2
164 #define VPE_PORT_CHROMA2_IN	3
165 #define VPE_PORT_LUMA3_IN	4
166 #define VPE_PORT_CHROMA3_IN	5
167 #define VPE_PORT_MV_IN		6
168 #define VPE_PORT_MV_OUT		7
169 #define VPE_PORT_LUMA_OUT	8
170 #define VPE_PORT_CHROMA_OUT	9
171 #define VPE_PORT_RGB_OUT	10
172 
173 static const struct vpe_port_data port_data[11] = {
174 	[VPE_PORT_LUMA1_IN] = {
175 		.channel	= VPE_CHAN_LUMA1_IN,
176 		.vb_index	= 0,
177 		.vb_part	= VPE_LUMA,
178 	},
179 	[VPE_PORT_CHROMA1_IN] = {
180 		.channel	= VPE_CHAN_CHROMA1_IN,
181 		.vb_index	= 0,
182 		.vb_part	= VPE_CHROMA,
183 	},
184 	[VPE_PORT_LUMA2_IN] = {
185 		.channel	= VPE_CHAN_LUMA2_IN,
186 		.vb_index	= 1,
187 		.vb_part	= VPE_LUMA,
188 	},
189 	[VPE_PORT_CHROMA2_IN] = {
190 		.channel	= VPE_CHAN_CHROMA2_IN,
191 		.vb_index	= 1,
192 		.vb_part	= VPE_CHROMA,
193 	},
194 	[VPE_PORT_LUMA3_IN] = {
195 		.channel	= VPE_CHAN_LUMA3_IN,
196 		.vb_index	= 2,
197 		.vb_part	= VPE_LUMA,
198 	},
199 	[VPE_PORT_CHROMA3_IN] = {
200 		.channel	= VPE_CHAN_CHROMA3_IN,
201 		.vb_index	= 2,
202 		.vb_part	= VPE_CHROMA,
203 	},
204 	[VPE_PORT_MV_IN] = {
205 		.channel	= VPE_CHAN_MV_IN,
206 	},
207 	[VPE_PORT_MV_OUT] = {
208 		.channel	= VPE_CHAN_MV_OUT,
209 	},
210 	[VPE_PORT_LUMA_OUT] = {
211 		.channel	= VPE_CHAN_LUMA_OUT,
212 		.vb_part	= VPE_LUMA,
213 	},
214 	[VPE_PORT_CHROMA_OUT] = {
215 		.channel	= VPE_CHAN_CHROMA_OUT,
216 		.vb_part	= VPE_CHROMA,
217 	},
218 	[VPE_PORT_RGB_OUT] = {
219 		.channel	= VPE_CHAN_RGB_OUT,
220 		.vb_part	= VPE_LUMA,
221 	},
222 };
223 
224 
225 /* driver info for each of the supported video formats */
226 struct vpe_fmt {
227 	u32	fourcc;			/* standard format identifier */
228 	u8	types;			/* CAPTURE and/or OUTPUT */
229 	u8	coplanar;		/* set for unpacked Luma and Chroma */
230 	/* vpdma format info for each plane */
231 	struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
232 };
233 
234 static struct vpe_fmt vpe_formats[] = {
235 	{
236 		.fourcc		= V4L2_PIX_FMT_NV16,
237 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
238 		.coplanar	= 1,
239 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
240 				    &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
241 				  },
242 	},
243 	{
244 		.fourcc		= V4L2_PIX_FMT_NV12,
245 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
246 		.coplanar	= 1,
247 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
248 				    &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
249 				  },
250 	},
251 	{
252 		.fourcc		= V4L2_PIX_FMT_NV21,
253 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
254 		.coplanar	= 1,
255 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
256 				    &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420],
257 				  },
258 	},
259 	{
260 		.fourcc		= V4L2_PIX_FMT_YUYV,
261 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
262 		.coplanar	= 0,
263 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
264 				  },
265 	},
266 	{
267 		.fourcc		= V4L2_PIX_FMT_UYVY,
268 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
269 		.coplanar	= 0,
270 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
271 				  },
272 	},
273 	{
274 		.fourcc		= V4L2_PIX_FMT_RGB24,
275 		.types		= VPE_FMT_TYPE_CAPTURE,
276 		.coplanar	= 0,
277 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
278 				  },
279 	},
280 	{
281 		.fourcc		= V4L2_PIX_FMT_RGB32,
282 		.types		= VPE_FMT_TYPE_CAPTURE,
283 		.coplanar	= 0,
284 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
285 				  },
286 	},
287 	{
288 		.fourcc		= V4L2_PIX_FMT_BGR24,
289 		.types		= VPE_FMT_TYPE_CAPTURE,
290 		.coplanar	= 0,
291 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
292 				  },
293 	},
294 	{
295 		.fourcc		= V4L2_PIX_FMT_BGR32,
296 		.types		= VPE_FMT_TYPE_CAPTURE,
297 		.coplanar	= 0,
298 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
299 				  },
300 	},
301 	{
302 		.fourcc		= V4L2_PIX_FMT_RGB565,
303 		.types		= VPE_FMT_TYPE_CAPTURE,
304 		.coplanar	= 0,
305 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB565],
306 				  },
307 	},
308 	{
309 		.fourcc		= V4L2_PIX_FMT_RGB555,
310 		.types		= VPE_FMT_TYPE_CAPTURE,
311 		.coplanar	= 0,
312 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGBA16_5551],
313 				  },
314 	},
315 };
316 
317 /*
318  * per-queue, driver-specific private data.
319  * there is one source queue and one destination queue for each m2m context.
320  */
321 struct vpe_q_data {
322 	/* current v4l2 format info */
323 	struct v4l2_format	format;
324 	unsigned int		flags;
325 	struct v4l2_rect	c_rect;				/* crop/compose rectangle */
326 	struct vpe_fmt		*fmt;				/* format info */
327 };
328 
329 /* vpe_q_data flag bits */
330 #define	Q_DATA_FRAME_1D			BIT(0)
331 #define	Q_DATA_MODE_TILED		BIT(1)
332 #define	Q_DATA_INTERLACED_ALTERNATE	BIT(2)
333 #define	Q_DATA_INTERLACED_SEQ_TB	BIT(3)
334 #define	Q_DATA_INTERLACED_SEQ_BT	BIT(4)
335 
336 #define Q_IS_SEQ_XX		(Q_DATA_INTERLACED_SEQ_TB | \
337 				Q_DATA_INTERLACED_SEQ_BT)
338 
339 #define Q_IS_INTERLACED		(Q_DATA_INTERLACED_ALTERNATE | \
340 				Q_DATA_INTERLACED_SEQ_TB | \
341 				Q_DATA_INTERLACED_SEQ_BT)
342 
343 enum {
344 	Q_DATA_SRC = 0,
345 	Q_DATA_DST = 1,
346 };
347 
348 /* find our format description corresponding to the passed v4l2_format */
__find_format(u32 fourcc)349 static struct vpe_fmt *__find_format(u32 fourcc)
350 {
351 	struct vpe_fmt *fmt;
352 	unsigned int k;
353 
354 	for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
355 		fmt = &vpe_formats[k];
356 		if (fmt->fourcc == fourcc)
357 			return fmt;
358 	}
359 
360 	return NULL;
361 }
362 
find_format(struct v4l2_format * f)363 static struct vpe_fmt *find_format(struct v4l2_format *f)
364 {
365 	return __find_format(f->fmt.pix.pixelformat);
366 }
367 
368 /*
369  * there is one vpe_dev structure in the driver, it is shared by
370  * all instances.
371  */
372 struct vpe_dev {
373 	struct v4l2_device	v4l2_dev;
374 	struct video_device	vfd;
375 	struct v4l2_m2m_dev	*m2m_dev;
376 
377 	atomic_t		num_instances;	/* count of driver instances */
378 	dma_addr_t		loaded_mmrs;	/* shadow mmrs in device */
379 	struct mutex		dev_mutex;
380 	spinlock_t		lock;
381 
382 	int			irq;
383 	void __iomem		*base;
384 	struct resource		*res;
385 
386 	struct vpdma_data	vpdma_data;
387 	struct vpdma_data	*vpdma;		/* vpdma data handle */
388 	struct sc_data		*sc;		/* scaler data handle */
389 	struct csc_data		*csc;		/* csc data handle */
390 };
391 
392 /*
393  * There is one vpe_ctx structure for each m2m context.
394  */
395 struct vpe_ctx {
396 	struct v4l2_fh		fh;
397 	struct vpe_dev		*dev;
398 	struct v4l2_ctrl_handler hdl;
399 
400 	unsigned int		field;			/* current field */
401 	unsigned int		sequence;		/* current frame/field seq */
402 	unsigned int		aborting;		/* abort after next irq */
403 
404 	unsigned int		bufs_per_job;		/* input buffers per batch */
405 	unsigned int		bufs_completed;		/* bufs done in this batch */
406 
407 	struct vpe_q_data	q_data[2];		/* src & dst queue data */
408 	struct vb2_v4l2_buffer	*src_vbs[VPE_MAX_SRC_BUFS];
409 	struct vb2_v4l2_buffer	*dst_vb;
410 
411 	dma_addr_t		mv_buf_dma[2];		/* dma addrs of motion vector in/out bufs */
412 	void			*mv_buf[2];		/* virtual addrs of motion vector bufs */
413 	size_t			mv_buf_size;		/* current motion vector buffer size */
414 	struct vpdma_buf	mmr_adb;		/* shadow reg addr/data block */
415 	struct vpdma_buf	sc_coeff_h;		/* h coeff buffer */
416 	struct vpdma_buf	sc_coeff_v;		/* v coeff buffer */
417 	struct vpdma_desc_list	desc_list;		/* DMA descriptor list */
418 
419 	bool			deinterlacing;		/* using de-interlacer */
420 	bool			load_mmrs;		/* have new shadow reg values */
421 
422 	unsigned int		src_mv_buf_selector;
423 };
424 
425 
426 /*
427  * M2M devices get 2 queues.
428  * Return the queue given the type.
429  */
get_q_data(struct vpe_ctx * ctx,enum v4l2_buf_type type)430 static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
431 				     enum v4l2_buf_type type)
432 {
433 	switch (type) {
434 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
435 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
436 		return &ctx->q_data[Q_DATA_SRC];
437 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
438 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
439 		return &ctx->q_data[Q_DATA_DST];
440 	default:
441 		return NULL;
442 	}
443 	return NULL;
444 }
445 
read_reg(struct vpe_dev * dev,int offset)446 static u32 read_reg(struct vpe_dev *dev, int offset)
447 {
448 	return ioread32(dev->base + offset);
449 }
450 
write_reg(struct vpe_dev * dev,int offset,u32 value)451 static void write_reg(struct vpe_dev *dev, int offset, u32 value)
452 {
453 	iowrite32(value, dev->base + offset);
454 }
455 
456 /* register field read/write helpers */
get_field(u32 value,u32 mask,int shift)457 static int get_field(u32 value, u32 mask, int shift)
458 {
459 	return (value & (mask << shift)) >> shift;
460 }
461 
read_field_reg(struct vpe_dev * dev,int offset,u32 mask,int shift)462 static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
463 {
464 	return get_field(read_reg(dev, offset), mask, shift);
465 }
466 
write_field(u32 * valp,u32 field,u32 mask,int shift)467 static void write_field(u32 *valp, u32 field, u32 mask, int shift)
468 {
469 	u32 val = *valp;
470 
471 	val &= ~(mask << shift);
472 	val |= (field & mask) << shift;
473 	*valp = val;
474 }
475 
write_field_reg(struct vpe_dev * dev,int offset,u32 field,u32 mask,int shift)476 static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
477 		u32 mask, int shift)
478 {
479 	u32 val = read_reg(dev, offset);
480 
481 	write_field(&val, field, mask, shift);
482 
483 	write_reg(dev, offset, val);
484 }
485 
486 /*
487  * DMA address/data block for the shadow registers
488  */
489 struct vpe_mmr_adb {
490 	struct vpdma_adb_hdr	out_fmt_hdr;
491 	u32			out_fmt_reg[1];
492 	u32			out_fmt_pad[3];
493 	struct vpdma_adb_hdr	us1_hdr;
494 	u32			us1_regs[8];
495 	struct vpdma_adb_hdr	us2_hdr;
496 	u32			us2_regs[8];
497 	struct vpdma_adb_hdr	us3_hdr;
498 	u32			us3_regs[8];
499 	struct vpdma_adb_hdr	dei_hdr;
500 	u32			dei_regs[8];
501 	struct vpdma_adb_hdr	sc_hdr0;
502 	u32			sc_regs0[7];
503 	u32			sc_pad0[1];
504 	struct vpdma_adb_hdr	sc_hdr8;
505 	u32			sc_regs8[6];
506 	u32			sc_pad8[2];
507 	struct vpdma_adb_hdr	sc_hdr17;
508 	u32			sc_regs17[9];
509 	u32			sc_pad17[3];
510 	struct vpdma_adb_hdr	csc_hdr;
511 	u32			csc_regs[6];
512 	u32			csc_pad[2];
513 };
514 
515 #define GET_OFFSET_TOP(ctx, obj, reg)	\
516 	((obj)->res->start - ctx->dev->res->start + reg)
517 
518 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a)	\
519 	VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
520 /*
521  * Set the headers for all of the address/data block structures.
522  */
init_adb_hdrs(struct vpe_ctx * ctx)523 static void init_adb_hdrs(struct vpe_ctx *ctx)
524 {
525 	VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
526 	VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
527 	VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
528 	VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
529 	VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
530 	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
531 		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
532 	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
533 		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
534 	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
535 		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
536 	VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
537 		GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
538 };
539 
540 /*
541  * Allocate or re-allocate the motion vector DMA buffers
542  * There are two buffers, one for input and one for output.
543  * However, the roles are reversed after each field is processed.
544  * In other words, after each field is processed, the previous
545  * output (dst) MV buffer becomes the new input (src) MV buffer.
546  */
realloc_mv_buffers(struct vpe_ctx * ctx,size_t size)547 static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
548 {
549 	struct device *dev = ctx->dev->v4l2_dev.dev;
550 
551 	if (ctx->mv_buf_size == size)
552 		return 0;
553 
554 	if (ctx->mv_buf[0])
555 		dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
556 			ctx->mv_buf_dma[0]);
557 
558 	if (ctx->mv_buf[1])
559 		dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
560 			ctx->mv_buf_dma[1]);
561 
562 	if (size == 0)
563 		return 0;
564 
565 	ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
566 				GFP_KERNEL);
567 	if (!ctx->mv_buf[0]) {
568 		vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
569 		return -ENOMEM;
570 	}
571 
572 	ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
573 				GFP_KERNEL);
574 	if (!ctx->mv_buf[1]) {
575 		vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
576 		dma_free_coherent(dev, size, ctx->mv_buf[0],
577 			ctx->mv_buf_dma[0]);
578 
579 		return -ENOMEM;
580 	}
581 
582 	ctx->mv_buf_size = size;
583 	ctx->src_mv_buf_selector = 0;
584 
585 	return 0;
586 }
587 
free_mv_buffers(struct vpe_ctx * ctx)588 static void free_mv_buffers(struct vpe_ctx *ctx)
589 {
590 	realloc_mv_buffers(ctx, 0);
591 }
592 
593 /*
594  * While de-interlacing, we keep the two most recent input buffers
595  * around.  This function frees those two buffers when we have
596  * finished processing the current stream.
597  */
free_vbs(struct vpe_ctx * ctx)598 static void free_vbs(struct vpe_ctx *ctx)
599 {
600 	struct vpe_dev *dev = ctx->dev;
601 	unsigned long flags;
602 
603 	if (ctx->src_vbs[2] == NULL)
604 		return;
605 
606 	spin_lock_irqsave(&dev->lock, flags);
607 	if (ctx->src_vbs[2]) {
608 		v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
609 		if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
610 			v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
611 		ctx->src_vbs[2] = NULL;
612 		ctx->src_vbs[1] = NULL;
613 	}
614 	spin_unlock_irqrestore(&dev->lock, flags);
615 }
616 
617 /*
618  * Enable or disable the VPE clocks
619  */
vpe_set_clock_enable(struct vpe_dev * dev,bool on)620 static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
621 {
622 	u32 val = 0;
623 
624 	if (on)
625 		val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
626 	write_reg(dev, VPE_CLK_ENABLE, val);
627 }
628 
vpe_top_reset(struct vpe_dev * dev)629 static void vpe_top_reset(struct vpe_dev *dev)
630 {
631 
632 	write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
633 		VPE_DATA_PATH_CLK_RESET_SHIFT);
634 
635 	usleep_range(100, 150);
636 
637 	write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
638 		VPE_DATA_PATH_CLK_RESET_SHIFT);
639 }
640 
vpe_top_vpdma_reset(struct vpe_dev * dev)641 static void vpe_top_vpdma_reset(struct vpe_dev *dev)
642 {
643 	write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
644 		VPE_VPDMA_CLK_RESET_SHIFT);
645 
646 	usleep_range(100, 150);
647 
648 	write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
649 		VPE_VPDMA_CLK_RESET_SHIFT);
650 }
651 
652 /*
653  * Load the correct of upsampler coefficients into the shadow MMRs
654  */
set_us_coefficients(struct vpe_ctx * ctx)655 static void set_us_coefficients(struct vpe_ctx *ctx)
656 {
657 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
658 	struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
659 	u32 *us1_reg = &mmr_adb->us1_regs[0];
660 	u32 *us2_reg = &mmr_adb->us2_regs[0];
661 	u32 *us3_reg = &mmr_adb->us3_regs[0];
662 	const unsigned short *cp, *end_cp;
663 
664 	cp = &us_coeffs[0].anchor_fid0_c0;
665 
666 	if (s_q_data->flags & Q_IS_INTERLACED)		/* interlaced */
667 		cp += sizeof(us_coeffs[0]) / sizeof(*cp);
668 
669 	end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
670 
671 	while (cp < end_cp) {
672 		write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
673 		write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
674 		*us2_reg++ = *us1_reg;
675 		*us3_reg++ = *us1_reg++;
676 	}
677 	ctx->load_mmrs = true;
678 }
679 
680 /*
681  * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
682  */
set_cfg_modes(struct vpe_ctx * ctx)683 static void set_cfg_modes(struct vpe_ctx *ctx)
684 {
685 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
686 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
687 	u32 *us1_reg0 = &mmr_adb->us1_regs[0];
688 	u32 *us2_reg0 = &mmr_adb->us2_regs[0];
689 	u32 *us3_reg0 = &mmr_adb->us3_regs[0];
690 	int cfg_mode = 1;
691 
692 	/*
693 	 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
694 	 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
695 	 */
696 
697 	if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
698 	    fmt->fourcc == V4L2_PIX_FMT_NV21)
699 		cfg_mode = 0;
700 
701 	write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
702 	write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
703 	write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
704 
705 	ctx->load_mmrs = true;
706 }
707 
set_line_modes(struct vpe_ctx * ctx)708 static void set_line_modes(struct vpe_ctx *ctx)
709 {
710 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
711 	int line_mode = 1;
712 
713 	if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
714 	    fmt->fourcc == V4L2_PIX_FMT_NV21)
715 		line_mode = 0;		/* double lines to line buffer */
716 
717 	/* regs for now */
718 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
719 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
720 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
721 
722 	/* frame start for input luma */
723 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
724 		VPE_CHAN_LUMA1_IN);
725 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
726 		VPE_CHAN_LUMA2_IN);
727 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
728 		VPE_CHAN_LUMA3_IN);
729 
730 	/* frame start for input chroma */
731 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
732 		VPE_CHAN_CHROMA1_IN);
733 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
734 		VPE_CHAN_CHROMA2_IN);
735 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
736 		VPE_CHAN_CHROMA3_IN);
737 
738 	/* frame start for MV in client */
739 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
740 		VPE_CHAN_MV_IN);
741 }
742 
743 /*
744  * Set the shadow registers that are modified when the source
745  * format changes.
746  */
set_src_registers(struct vpe_ctx * ctx)747 static void set_src_registers(struct vpe_ctx *ctx)
748 {
749 	set_us_coefficients(ctx);
750 }
751 
752 /*
753  * Set the shadow registers that are modified when the destination
754  * format changes.
755  */
set_dst_registers(struct vpe_ctx * ctx)756 static void set_dst_registers(struct vpe_ctx *ctx)
757 {
758 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
759 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
760 	const struct v4l2_format_info *finfo;
761 	u32 val = 0;
762 
763 	finfo = v4l2_format_info(fmt->fourcc);
764 	if (v4l2_is_format_rgb(finfo)) {
765 		val |= VPE_RGB_OUT_SELECT;
766 		vpdma_set_bg_color(ctx->dev->vpdma,
767 			(struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
768 	} else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
769 		val |= VPE_COLOR_SEPARATE_422;
770 
771 	/*
772 	 * the source of CHR_DS and CSC is always the scaler, irrespective of
773 	 * whether it's used or not
774 	 */
775 	val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
776 
777 	if (fmt->fourcc != V4L2_PIX_FMT_NV12 &&
778 	    fmt->fourcc != V4L2_PIX_FMT_NV21)
779 		val |= VPE_DS_BYPASS;
780 
781 	mmr_adb->out_fmt_reg[0] = val;
782 
783 	ctx->load_mmrs = true;
784 }
785 
786 /*
787  * Set the de-interlacer shadow register values
788  */
set_dei_regs(struct vpe_ctx * ctx)789 static void set_dei_regs(struct vpe_ctx *ctx)
790 {
791 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
792 	struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
793 	unsigned int src_h = s_q_data->c_rect.height;
794 	unsigned int src_w = s_q_data->c_rect.width;
795 	u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
796 	bool deinterlace = true;
797 	u32 val = 0;
798 
799 	/*
800 	 * according to TRM, we should set DEI in progressive bypass mode when
801 	 * the input content is progressive, however, DEI is bypassed correctly
802 	 * for both progressive and interlace content in interlace bypass mode.
803 	 * It has been recommended not to use progressive bypass mode.
804 	 */
805 	if (!(s_q_data->flags & Q_IS_INTERLACED) || !ctx->deinterlacing) {
806 		deinterlace = false;
807 		val = VPE_DEI_INTERLACE_BYPASS;
808 	}
809 
810 	src_h = deinterlace ? src_h * 2 : src_h;
811 
812 	val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
813 		(src_w << VPE_DEI_WIDTH_SHIFT) |
814 		VPE_DEI_FIELD_FLUSH;
815 
816 	*dei_mmr0 = val;
817 
818 	ctx->load_mmrs = true;
819 }
820 
set_dei_shadow_registers(struct vpe_ctx * ctx)821 static void set_dei_shadow_registers(struct vpe_ctx *ctx)
822 {
823 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
824 	u32 *dei_mmr = &mmr_adb->dei_regs[0];
825 	const struct vpe_dei_regs *cur = &dei_regs;
826 
827 	dei_mmr[2]  = cur->mdt_spacial_freq_thr_reg;
828 	dei_mmr[3]  = cur->edi_config_reg;
829 	dei_mmr[4]  = cur->edi_lut_reg0;
830 	dei_mmr[5]  = cur->edi_lut_reg1;
831 	dei_mmr[6]  = cur->edi_lut_reg2;
832 	dei_mmr[7]  = cur->edi_lut_reg3;
833 
834 	ctx->load_mmrs = true;
835 }
836 
config_edi_input_mode(struct vpe_ctx * ctx,int mode)837 static void config_edi_input_mode(struct vpe_ctx *ctx, int mode)
838 {
839 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
840 	u32 *edi_config_reg = &mmr_adb->dei_regs[3];
841 
842 	if (mode & 0x2)
843 		write_field(edi_config_reg, 1, 1, 2);	/* EDI_ENABLE_3D */
844 
845 	if (mode & 0x3)
846 		write_field(edi_config_reg, 1, 1, 3);	/* EDI_CHROMA_3D  */
847 
848 	write_field(edi_config_reg, mode, VPE_EDI_INP_MODE_MASK,
849 		VPE_EDI_INP_MODE_SHIFT);
850 
851 	ctx->load_mmrs = true;
852 }
853 
854 /*
855  * Set the shadow registers whose values are modified when either the
856  * source or destination format is changed.
857  */
set_srcdst_params(struct vpe_ctx * ctx)858 static int set_srcdst_params(struct vpe_ctx *ctx)
859 {
860 	struct vpe_q_data *s_q_data =  &ctx->q_data[Q_DATA_SRC];
861 	struct vpe_q_data *d_q_data =  &ctx->q_data[Q_DATA_DST];
862 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
863 	unsigned int src_w = s_q_data->c_rect.width;
864 	unsigned int src_h = s_q_data->c_rect.height;
865 	unsigned int dst_w = d_q_data->c_rect.width;
866 	unsigned int dst_h = d_q_data->c_rect.height;
867 	struct v4l2_pix_format_mplane *spix;
868 	size_t mv_buf_size;
869 	int ret;
870 
871 	ctx->sequence = 0;
872 	ctx->field = V4L2_FIELD_TOP;
873 	spix = &s_q_data->format.fmt.pix_mp;
874 
875 	if ((s_q_data->flags & Q_IS_INTERLACED) &&
876 			!(d_q_data->flags & Q_IS_INTERLACED)) {
877 		int bytes_per_line;
878 		const struct vpdma_data_format *mv =
879 			&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
880 
881 		/*
882 		 * we make sure that the source image has a 16 byte aligned
883 		 * stride, we need to do the same for the motion vector buffer
884 		 * by aligning it's stride to the next 16 byte boundary. this
885 		 * extra space will not be used by the de-interlacer, but will
886 		 * ensure that vpdma operates correctly
887 		 */
888 		bytes_per_line = ALIGN((spix->width * mv->depth) >> 3,
889 				       VPDMA_STRIDE_ALIGN);
890 		mv_buf_size = bytes_per_line * spix->height;
891 
892 		ctx->deinterlacing = true;
893 		src_h <<= 1;
894 	} else {
895 		ctx->deinterlacing = false;
896 		mv_buf_size = 0;
897 	}
898 
899 	free_vbs(ctx);
900 	ctx->src_vbs[2] = ctx->src_vbs[1] = ctx->src_vbs[0] = NULL;
901 
902 	ret = realloc_mv_buffers(ctx, mv_buf_size);
903 	if (ret)
904 		return ret;
905 
906 	set_cfg_modes(ctx);
907 	set_dei_regs(ctx);
908 
909 	csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
910 		      &s_q_data->format, &d_q_data->format);
911 
912 	sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
913 	sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
914 
915 	sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
916 		&mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
917 		src_w, src_h, dst_w, dst_h);
918 
919 	return 0;
920 }
921 
922 /*
923  * mem2mem callbacks
924  */
925 
926 /*
927  * job_ready() - check whether an instance is ready to be scheduled to run
928  */
job_ready(void * priv)929 static int job_ready(void *priv)
930 {
931 	struct vpe_ctx *ctx = priv;
932 
933 	/*
934 	 * This check is needed as this might be called directly from driver
935 	 * When called by m2m framework, this will always satisfy, but when
936 	 * called from vpe_irq, this might fail. (src stream with zero buffers)
937 	 */
938 	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 ||
939 		v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0)
940 		return 0;
941 
942 	return 1;
943 }
944 
job_abort(void * priv)945 static void job_abort(void *priv)
946 {
947 	struct vpe_ctx *ctx = priv;
948 
949 	/* Will cancel the transaction in the next interrupt handler */
950 	ctx->aborting = 1;
951 }
952 
vpe_dump_regs(struct vpe_dev * dev)953 static void vpe_dump_regs(struct vpe_dev *dev)
954 {
955 #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
956 
957 	vpe_dbg(dev, "VPE Registers:\n");
958 
959 	DUMPREG(PID);
960 	DUMPREG(SYSCONFIG);
961 	DUMPREG(INT0_STATUS0_RAW);
962 	DUMPREG(INT0_STATUS0);
963 	DUMPREG(INT0_ENABLE0);
964 	DUMPREG(INT0_STATUS1_RAW);
965 	DUMPREG(INT0_STATUS1);
966 	DUMPREG(INT0_ENABLE1);
967 	DUMPREG(CLK_ENABLE);
968 	DUMPREG(CLK_RESET);
969 	DUMPREG(CLK_FORMAT_SELECT);
970 	DUMPREG(CLK_RANGE_MAP);
971 	DUMPREG(US1_R0);
972 	DUMPREG(US1_R1);
973 	DUMPREG(US1_R2);
974 	DUMPREG(US1_R3);
975 	DUMPREG(US1_R4);
976 	DUMPREG(US1_R5);
977 	DUMPREG(US1_R6);
978 	DUMPREG(US1_R7);
979 	DUMPREG(US2_R0);
980 	DUMPREG(US2_R1);
981 	DUMPREG(US2_R2);
982 	DUMPREG(US2_R3);
983 	DUMPREG(US2_R4);
984 	DUMPREG(US2_R5);
985 	DUMPREG(US2_R6);
986 	DUMPREG(US2_R7);
987 	DUMPREG(US3_R0);
988 	DUMPREG(US3_R1);
989 	DUMPREG(US3_R2);
990 	DUMPREG(US3_R3);
991 	DUMPREG(US3_R4);
992 	DUMPREG(US3_R5);
993 	DUMPREG(US3_R6);
994 	DUMPREG(US3_R7);
995 	DUMPREG(DEI_FRAME_SIZE);
996 	DUMPREG(MDT_BYPASS);
997 	DUMPREG(MDT_SF_THRESHOLD);
998 	DUMPREG(EDI_CONFIG);
999 	DUMPREG(DEI_EDI_LUT_R0);
1000 	DUMPREG(DEI_EDI_LUT_R1);
1001 	DUMPREG(DEI_EDI_LUT_R2);
1002 	DUMPREG(DEI_EDI_LUT_R3);
1003 	DUMPREG(DEI_FMD_WINDOW_R0);
1004 	DUMPREG(DEI_FMD_WINDOW_R1);
1005 	DUMPREG(DEI_FMD_CONTROL_R0);
1006 	DUMPREG(DEI_FMD_CONTROL_R1);
1007 	DUMPREG(DEI_FMD_STATUS_R0);
1008 	DUMPREG(DEI_FMD_STATUS_R1);
1009 	DUMPREG(DEI_FMD_STATUS_R2);
1010 #undef DUMPREG
1011 
1012 	sc_dump_regs(dev->sc);
1013 	csc_dump_regs(dev->csc);
1014 }
1015 
add_out_dtd(struct vpe_ctx * ctx,int port)1016 static void add_out_dtd(struct vpe_ctx *ctx, int port)
1017 {
1018 	struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
1019 	const struct vpe_port_data *p_data = &port_data[port];
1020 	struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
1021 	struct vpe_fmt *fmt = q_data->fmt;
1022 	const struct vpdma_data_format *vpdma_fmt;
1023 	int mv_buf_selector = !ctx->src_mv_buf_selector;
1024 	struct v4l2_pix_format_mplane *pix;
1025 	dma_addr_t dma_addr;
1026 	u32 flags = 0;
1027 	u32 offset = 0;
1028 	u32 stride;
1029 
1030 	if (port == VPE_PORT_MV_OUT) {
1031 		vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1032 		dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1033 		q_data = &ctx->q_data[Q_DATA_SRC];
1034 		pix = &q_data->format.fmt.pix_mp;
1035 		stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
1036 			       VPDMA_STRIDE_ALIGN);
1037 	} else {
1038 		/* to incorporate interleaved formats */
1039 		int plane = fmt->coplanar ? p_data->vb_part : 0;
1040 
1041 		pix = &q_data->format.fmt.pix_mp;
1042 		vpdma_fmt = fmt->vpdma_fmt[plane];
1043 		/*
1044 		 * If we are using a single plane buffer and
1045 		 * we need to set a separate vpdma chroma channel.
1046 		 */
1047 		if (pix->num_planes == 1 && plane) {
1048 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
1049 			/* Compute required offset */
1050 			offset = pix->plane_fmt[0].bytesperline * pix->height;
1051 		} else {
1052 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1053 			/* Use address as is, no offset */
1054 			offset = 0;
1055 		}
1056 		if (!dma_addr) {
1057 			vpe_err(ctx->dev,
1058 				"acquiring output buffer(%d) dma_addr failed\n",
1059 				port);
1060 			return;
1061 		}
1062 		/* Apply the offset */
1063 		dma_addr += offset;
1064 		stride = pix->plane_fmt[VPE_LUMA].bytesperline;
1065 	}
1066 
1067 	if (q_data->flags & Q_DATA_FRAME_1D)
1068 		flags |= VPDMA_DATA_FRAME_1D;
1069 	if (q_data->flags & Q_DATA_MODE_TILED)
1070 		flags |= VPDMA_DATA_MODE_TILED;
1071 
1072 	vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
1073 			   MAX_W, MAX_H);
1074 
1075 	vpdma_add_out_dtd(&ctx->desc_list, pix->width,
1076 			  stride, &q_data->c_rect,
1077 			  vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
1078 			  MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
1079 }
1080 
add_in_dtd(struct vpe_ctx * ctx,int port)1081 static void add_in_dtd(struct vpe_ctx *ctx, int port)
1082 {
1083 	struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
1084 	const struct vpe_port_data *p_data = &port_data[port];
1085 	struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
1086 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1087 	struct vpe_fmt *fmt = q_data->fmt;
1088 	struct v4l2_pix_format_mplane *pix;
1089 	const struct vpdma_data_format *vpdma_fmt;
1090 	int mv_buf_selector = ctx->src_mv_buf_selector;
1091 	int field = vbuf->field == V4L2_FIELD_BOTTOM;
1092 	int frame_width, frame_height;
1093 	dma_addr_t dma_addr;
1094 	u32 flags = 0;
1095 	u32 offset = 0;
1096 	u32 stride;
1097 
1098 	pix = &q_data->format.fmt.pix_mp;
1099 	if (port == VPE_PORT_MV_IN) {
1100 		vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1101 		dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1102 		stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
1103 			       VPDMA_STRIDE_ALIGN);
1104 	} else {
1105 		/* to incorporate interleaved formats */
1106 		int plane = fmt->coplanar ? p_data->vb_part : 0;
1107 
1108 		vpdma_fmt = fmt->vpdma_fmt[plane];
1109 		/*
1110 		 * If we are using a single plane buffer and
1111 		 * we need to set a separate vpdma chroma channel.
1112 		 */
1113 		if (pix->num_planes == 1 && plane) {
1114 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
1115 			/* Compute required offset */
1116 			offset = pix->plane_fmt[0].bytesperline * pix->height;
1117 		} else {
1118 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1119 			/* Use address as is, no offset */
1120 			offset = 0;
1121 		}
1122 		if (!dma_addr) {
1123 			vpe_err(ctx->dev,
1124 				"acquiring output buffer(%d) dma_addr failed\n",
1125 				port);
1126 			return;
1127 		}
1128 		/* Apply the offset */
1129 		dma_addr += offset;
1130 		stride = pix->plane_fmt[VPE_LUMA].bytesperline;
1131 
1132 		/*
1133 		 * field used in VPDMA desc  = 0 (top) / 1 (bottom)
1134 		 * Use top or bottom field from same vb alternately
1135 		 * For each de-interlacing operation, f,f-1,f-2 should be one
1136 		 * of TBT or BTB
1137 		 */
1138 		if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB ||
1139 		    q_data->flags & Q_DATA_INTERLACED_SEQ_BT) {
1140 			/* Select initial value based on format */
1141 			if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT)
1142 				field = 1;
1143 			else
1144 				field = 0;
1145 
1146 			/* Toggle for each vb_index and each operation */
1147 			field = (field + p_data->vb_index + ctx->sequence) % 2;
1148 
1149 			if (field) {
1150 				int height = pix->height / 2;
1151 				int bpp;
1152 
1153 				if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
1154 				    fmt->fourcc == V4L2_PIX_FMT_NV21)
1155 					bpp = 1;
1156 				else
1157 					bpp = vpdma_fmt->depth >> 3;
1158 
1159 				if (plane)
1160 					height /= 2;
1161 
1162 				dma_addr += pix->width * height * bpp;
1163 			}
1164 		}
1165 	}
1166 
1167 	if (q_data->flags & Q_DATA_FRAME_1D)
1168 		flags |= VPDMA_DATA_FRAME_1D;
1169 	if (q_data->flags & Q_DATA_MODE_TILED)
1170 		flags |= VPDMA_DATA_MODE_TILED;
1171 
1172 	frame_width = q_data->c_rect.width;
1173 	frame_height = q_data->c_rect.height;
1174 
1175 	if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
1176 				fmt->fourcc == V4L2_PIX_FMT_NV21))
1177 		frame_height /= 2;
1178 
1179 	vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride,
1180 			 &q_data->c_rect, vpdma_fmt, dma_addr,
1181 			 p_data->channel, field, flags, frame_width,
1182 			 frame_height, 0, 0);
1183 }
1184 
1185 /*
1186  * Enable the expected IRQ sources
1187  */
enable_irqs(struct vpe_ctx * ctx)1188 static void enable_irqs(struct vpe_ctx *ctx)
1189 {
1190 	write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
1191 	write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
1192 				VPE_DS1_UV_ERROR_INT);
1193 
1194 	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true);
1195 }
1196 
disable_irqs(struct vpe_ctx * ctx)1197 static void disable_irqs(struct vpe_ctx *ctx)
1198 {
1199 	write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
1200 	write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
1201 
1202 	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false);
1203 }
1204 
1205 /* device_run() - prepares and starts the device
1206  *
1207  * This function is only called when both the source and destination
1208  * buffers are in place.
1209  */
device_run(void * priv)1210 static void device_run(void *priv)
1211 {
1212 	struct vpe_ctx *ctx = priv;
1213 	struct sc_data *sc = ctx->dev->sc;
1214 	struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
1215 	struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
1216 	const struct v4l2_format_info *d_finfo;
1217 
1218 	d_finfo = v4l2_format_info(d_q_data->fmt->fourcc);
1219 
1220 	if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX &&
1221 	    ctx->sequence % 2 == 0) {
1222 		/* When using SEQ_XX type buffers, each buffer has two fields
1223 		 * each buffer has two fields (top & bottom)
1224 		 * Removing one buffer is actually getting two fields
1225 		 * Alternate between two operations:-
1226 		 * Even : consume one field but DO NOT REMOVE from queue
1227 		 * Odd : consume other field and REMOVE from queue
1228 		 */
1229 		ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1230 		WARN_ON(ctx->src_vbs[0] == NULL);
1231 	} else {
1232 		ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1233 		WARN_ON(ctx->src_vbs[0] == NULL);
1234 	}
1235 
1236 	ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1237 	WARN_ON(ctx->dst_vb == NULL);
1238 
1239 	if (ctx->deinterlacing) {
1240 
1241 		if (ctx->src_vbs[2] == NULL) {
1242 			ctx->src_vbs[2] = ctx->src_vbs[0];
1243 			WARN_ON(ctx->src_vbs[2] == NULL);
1244 			ctx->src_vbs[1] = ctx->src_vbs[0];
1245 			WARN_ON(ctx->src_vbs[1] == NULL);
1246 		}
1247 
1248 		/*
1249 		 * we have output the first 2 frames through line average, we
1250 		 * now switch to EDI de-interlacer
1251 		 */
1252 		if (ctx->sequence == 2)
1253 			config_edi_input_mode(ctx, 0x3); /* EDI (Y + UV) */
1254 	}
1255 
1256 	/* config descriptors */
1257 	if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
1258 		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
1259 		vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
1260 
1261 		set_line_modes(ctx);
1262 
1263 		ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
1264 		ctx->load_mmrs = false;
1265 	}
1266 
1267 	if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
1268 			sc->load_coeff_h) {
1269 		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
1270 		vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1271 			&ctx->sc_coeff_h, 0);
1272 
1273 		sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
1274 		sc->load_coeff_h = false;
1275 	}
1276 
1277 	if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
1278 			sc->load_coeff_v) {
1279 		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
1280 		vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1281 			&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
1282 
1283 		sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
1284 		sc->load_coeff_v = false;
1285 	}
1286 
1287 	/* output data descriptors */
1288 	if (ctx->deinterlacing)
1289 		add_out_dtd(ctx, VPE_PORT_MV_OUT);
1290 
1291 	if (v4l2_is_format_rgb(d_finfo)) {
1292 		add_out_dtd(ctx, VPE_PORT_RGB_OUT);
1293 	} else {
1294 		add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
1295 		if (d_q_data->fmt->coplanar)
1296 			add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
1297 	}
1298 
1299 	/* input data descriptors */
1300 	if (ctx->deinterlacing) {
1301 		add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
1302 		add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
1303 
1304 		add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
1305 		add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
1306 	}
1307 
1308 	add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
1309 	add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
1310 
1311 	if (ctx->deinterlacing)
1312 		add_in_dtd(ctx, VPE_PORT_MV_IN);
1313 
1314 	/* sync on channel control descriptors for input ports */
1315 	vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
1316 	vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
1317 
1318 	if (ctx->deinterlacing) {
1319 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1320 			VPE_CHAN_LUMA2_IN);
1321 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1322 			VPE_CHAN_CHROMA2_IN);
1323 
1324 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1325 			VPE_CHAN_LUMA3_IN);
1326 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1327 			VPE_CHAN_CHROMA3_IN);
1328 
1329 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
1330 	}
1331 
1332 	/* sync on channel control descriptors for output ports */
1333 	if (v4l2_is_format_rgb(d_finfo)) {
1334 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1335 			VPE_CHAN_RGB_OUT);
1336 	} else {
1337 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1338 			VPE_CHAN_LUMA_OUT);
1339 		if (d_q_data->fmt->coplanar)
1340 			vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1341 				VPE_CHAN_CHROMA_OUT);
1342 	}
1343 
1344 	if (ctx->deinterlacing)
1345 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
1346 
1347 	enable_irqs(ctx);
1348 
1349 	vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
1350 	vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0);
1351 }
1352 
dei_error(struct vpe_ctx * ctx)1353 static void dei_error(struct vpe_ctx *ctx)
1354 {
1355 	dev_warn(ctx->dev->v4l2_dev.dev,
1356 		"received DEI error interrupt\n");
1357 }
1358 
ds1_uv_error(struct vpe_ctx * ctx)1359 static void ds1_uv_error(struct vpe_ctx *ctx)
1360 {
1361 	dev_warn(ctx->dev->v4l2_dev.dev,
1362 		"received downsampler error interrupt\n");
1363 }
1364 
vpe_irq(int irq_vpe,void * data)1365 static irqreturn_t vpe_irq(int irq_vpe, void *data)
1366 {
1367 	struct vpe_dev *dev = (struct vpe_dev *)data;
1368 	struct vpe_ctx *ctx;
1369 	struct vpe_q_data *d_q_data;
1370 	struct vb2_v4l2_buffer *s_vb, *d_vb;
1371 	unsigned long flags;
1372 	u32 irqst0, irqst1;
1373 	bool list_complete = false;
1374 
1375 	irqst0 = read_reg(dev, VPE_INT0_STATUS0);
1376 	if (irqst0) {
1377 		write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
1378 		vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
1379 	}
1380 
1381 	irqst1 = read_reg(dev, VPE_INT0_STATUS1);
1382 	if (irqst1) {
1383 		write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
1384 		vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
1385 	}
1386 
1387 	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
1388 	if (!ctx) {
1389 		vpe_err(dev, "instance released before end of transaction\n");
1390 		goto handled;
1391 	}
1392 
1393 	if (irqst1) {
1394 		if (irqst1 & VPE_DEI_ERROR_INT) {
1395 			irqst1 &= ~VPE_DEI_ERROR_INT;
1396 			dei_error(ctx);
1397 		}
1398 		if (irqst1 & VPE_DS1_UV_ERROR_INT) {
1399 			irqst1 &= ~VPE_DS1_UV_ERROR_INT;
1400 			ds1_uv_error(ctx);
1401 		}
1402 	}
1403 
1404 	if (irqst0) {
1405 		if (irqst0 & VPE_INT0_LIST0_COMPLETE)
1406 			vpdma_clear_list_stat(ctx->dev->vpdma, 0, 0);
1407 
1408 		irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
1409 		list_complete = true;
1410 	}
1411 
1412 	if (irqst0 | irqst1) {
1413 		dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1414 			irqst0, irqst1);
1415 	}
1416 
1417 	/*
1418 	 * Setup next operation only when list complete IRQ occurs
1419 	 * otherwise, skip the following code
1420 	 */
1421 	if (!list_complete)
1422 		goto handled;
1423 
1424 	disable_irqs(ctx);
1425 
1426 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
1427 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
1428 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
1429 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
1430 
1431 	vpdma_reset_desc_list(&ctx->desc_list);
1432 
1433 	 /* the previous dst mv buffer becomes the next src mv buffer */
1434 	ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
1435 
1436 	s_vb = ctx->src_vbs[0];
1437 	d_vb = ctx->dst_vb;
1438 
1439 	d_vb->flags = s_vb->flags;
1440 	d_vb->vb2_buf.timestamp = s_vb->vb2_buf.timestamp;
1441 
1442 	if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
1443 		d_vb->timecode = s_vb->timecode;
1444 
1445 	d_vb->sequence = ctx->sequence;
1446 	s_vb->sequence = ctx->sequence;
1447 
1448 	d_q_data = &ctx->q_data[Q_DATA_DST];
1449 	if (d_q_data->flags & Q_IS_INTERLACED) {
1450 		d_vb->field = ctx->field;
1451 		if (ctx->field == V4L2_FIELD_BOTTOM) {
1452 			ctx->sequence++;
1453 			ctx->field = V4L2_FIELD_TOP;
1454 		} else {
1455 			WARN_ON(ctx->field != V4L2_FIELD_TOP);
1456 			ctx->field = V4L2_FIELD_BOTTOM;
1457 		}
1458 	} else {
1459 		d_vb->field = V4L2_FIELD_NONE;
1460 		ctx->sequence++;
1461 	}
1462 
1463 	if (ctx->deinterlacing) {
1464 		/*
1465 		 * Allow source buffer to be dequeued only if it won't be used
1466 		 * in the next iteration. All vbs are initialized to first
1467 		 * buffer and we are shifting buffers every iteration, for the
1468 		 * first two iterations, no buffer will be dequeued.
1469 		 * This ensures that driver will keep (n-2)th (n-1)th and (n)th
1470 		 * field when deinterlacing is enabled
1471 		 */
1472 		if (ctx->src_vbs[2] != ctx->src_vbs[1])
1473 			s_vb = ctx->src_vbs[2];
1474 		else
1475 			s_vb = NULL;
1476 	}
1477 
1478 	spin_lock_irqsave(&dev->lock, flags);
1479 
1480 	if (s_vb)
1481 		v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
1482 
1483 	v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
1484 
1485 	spin_unlock_irqrestore(&dev->lock, flags);
1486 
1487 	if (ctx->deinterlacing) {
1488 		ctx->src_vbs[2] = ctx->src_vbs[1];
1489 		ctx->src_vbs[1] = ctx->src_vbs[0];
1490 	}
1491 
1492 	/*
1493 	 * Since the vb2_buf_done has already been called fir therse
1494 	 * buffer we can now NULL them out so that we won't try
1495 	 * to clean out stray pointer later on.
1496 	*/
1497 	ctx->src_vbs[0] = NULL;
1498 	ctx->dst_vb = NULL;
1499 
1500 	if (ctx->aborting)
1501 		goto finished;
1502 
1503 	ctx->bufs_completed++;
1504 	if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
1505 		device_run(ctx);
1506 		goto handled;
1507 	}
1508 
1509 finished:
1510 	vpe_dbg(ctx->dev, "finishing transaction\n");
1511 	ctx->bufs_completed = 0;
1512 	v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
1513 handled:
1514 	return IRQ_HANDLED;
1515 }
1516 
1517 /*
1518  * video ioctls
1519  */
vpe_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1520 static int vpe_querycap(struct file *file, void *priv,
1521 			struct v4l2_capability *cap)
1522 {
1523 	strscpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver));
1524 	strscpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card));
1525 	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1526 		VPE_MODULE_NAME);
1527 	return 0;
1528 }
1529 
__enum_fmt(struct v4l2_fmtdesc * f,u32 type)1530 static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1531 {
1532 	int i, index;
1533 	struct vpe_fmt *fmt = NULL;
1534 
1535 	index = 0;
1536 	for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
1537 		if (vpe_formats[i].types & type) {
1538 			if (index == f->index) {
1539 				fmt = &vpe_formats[i];
1540 				break;
1541 			}
1542 			index++;
1543 		}
1544 	}
1545 
1546 	if (!fmt)
1547 		return -EINVAL;
1548 
1549 	f->pixelformat = fmt->fourcc;
1550 	return 0;
1551 }
1552 
vpe_enum_fmt(struct file * file,void * priv,struct v4l2_fmtdesc * f)1553 static int vpe_enum_fmt(struct file *file, void *priv,
1554 				struct v4l2_fmtdesc *f)
1555 {
1556 	if (V4L2_TYPE_IS_OUTPUT(f->type))
1557 		return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
1558 
1559 	return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
1560 }
1561 
vpe_g_fmt(struct file * file,void * priv,struct v4l2_format * f)1562 static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1563 {
1564 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1565 	struct vpe_ctx *ctx = file->private_data;
1566 	struct vb2_queue *vq;
1567 	struct vpe_q_data *q_data;
1568 
1569 	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1570 	if (!vq)
1571 		return -EINVAL;
1572 
1573 	q_data = get_q_data(ctx, f->type);
1574 	if (!q_data)
1575 		return -EINVAL;
1576 
1577 	*f = q_data->format;
1578 
1579 	if (V4L2_TYPE_IS_CAPTURE(f->type)) {
1580 		struct vpe_q_data *s_q_data;
1581 		struct v4l2_pix_format_mplane *spix;
1582 
1583 		/* get colorimetry from the source queue */
1584 		s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1585 		spix = &s_q_data->format.fmt.pix_mp;
1586 
1587 		pix->colorspace = spix->colorspace;
1588 		pix->xfer_func = spix->xfer_func;
1589 		pix->ycbcr_enc = spix->ycbcr_enc;
1590 		pix->quantization = spix->quantization;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
__vpe_try_fmt(struct vpe_ctx * ctx,struct v4l2_format * f,struct vpe_fmt * fmt,int type)1596 static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1597 		       struct vpe_fmt *fmt, int type)
1598 {
1599 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1600 	struct v4l2_plane_pix_format *plane_fmt;
1601 	unsigned int w_align;
1602 	int i, depth, depth_bytes, height;
1603 	unsigned int stride = 0;
1604 	const struct v4l2_format_info *finfo;
1605 
1606 	if (!fmt || !(fmt->types & type)) {
1607 		vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1608 			pix->pixelformat);
1609 		fmt = __find_format(V4L2_PIX_FMT_YUYV);
1610 	}
1611 
1612 	if (pix->field != V4L2_FIELD_NONE &&
1613 	    pix->field != V4L2_FIELD_ALTERNATE &&
1614 	    pix->field != V4L2_FIELD_SEQ_TB &&
1615 	    pix->field != V4L2_FIELD_SEQ_BT)
1616 		pix->field = V4L2_FIELD_NONE;
1617 
1618 	depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
1619 
1620 	/*
1621 	 * the line stride should 16 byte aligned for VPDMA to work, based on
1622 	 * the bytes per pixel, figure out how much the width should be aligned
1623 	 * to make sure line stride is 16 byte aligned
1624 	 */
1625 	depth_bytes = depth >> 3;
1626 
1627 	if (depth_bytes == 3) {
1628 		/*
1629 		 * if bpp is 3(as in some RGB formats), the pixel width doesn't
1630 		 * really help in ensuring line stride is 16 byte aligned
1631 		 */
1632 		w_align = 4;
1633 	} else {
1634 		/*
1635 		 * for the remainder bpp(4, 2 and 1), the pixel width alignment
1636 		 * can ensure a line stride alignment of 16 bytes. For example,
1637 		 * if bpp is 2, then the line stride can be 16 byte aligned if
1638 		 * the width is 8 byte aligned
1639 		 */
1640 
1641 		/*
1642 		 * HACK: using order_base_2() here causes lots of asm output
1643 		 * errors with smatch, on i386:
1644 		 * ./arch/x86/include/asm/bitops.h:457:22:
1645 		 *		 warning: asm output is not an lvalue
1646 		 * Perhaps some gcc optimization is doing the wrong thing
1647 		 * there.
1648 		 * Let's get rid of them by doing the calculus on two steps
1649 		 */
1650 		w_align = roundup_pow_of_two(VPDMA_DESC_ALIGN / depth_bytes);
1651 		w_align = ilog2(w_align);
1652 	}
1653 
1654 	v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
1655 			      &pix->height, MIN_H, MAX_H, H_ALIGN,
1656 			      S_ALIGN);
1657 
1658 	if (!pix->num_planes || pix->num_planes > 2)
1659 		pix->num_planes = fmt->coplanar ? 2 : 1;
1660 	else if (pix->num_planes > 1 && !fmt->coplanar)
1661 		pix->num_planes = 1;
1662 
1663 	pix->pixelformat = fmt->fourcc;
1664 	finfo = v4l2_format_info(fmt->fourcc);
1665 
1666 	/*
1667 	 * For the actual image parameters, we need to consider the field
1668 	 * height of the image for SEQ_XX buffers.
1669 	 */
1670 	if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT)
1671 		height = pix->height / 2;
1672 	else
1673 		height = pix->height;
1674 
1675 	if (!pix->colorspace) {
1676 		if (v4l2_is_format_rgb(finfo)) {
1677 			pix->colorspace = V4L2_COLORSPACE_SRGB;
1678 		} else {
1679 			if (height > 1280)	/* HD */
1680 				pix->colorspace = V4L2_COLORSPACE_REC709;
1681 			else			/* SD */
1682 				pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1683 		}
1684 	}
1685 
1686 	memset(pix->reserved, 0, sizeof(pix->reserved));
1687 	for (i = 0; i < pix->num_planes; i++) {
1688 		plane_fmt = &pix->plane_fmt[i];
1689 		depth = fmt->vpdma_fmt[i]->depth;
1690 
1691 		stride = (pix->width * fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
1692 		if (stride > plane_fmt->bytesperline)
1693 			plane_fmt->bytesperline = stride;
1694 
1695 		plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
1696 						  stride,
1697 						  VPDMA_MAX_STRIDE);
1698 
1699 		plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
1700 						VPDMA_STRIDE_ALIGN);
1701 
1702 		if (i == VPE_LUMA) {
1703 			plane_fmt->sizeimage = pix->height *
1704 					       plane_fmt->bytesperline;
1705 
1706 			if (pix->num_planes == 1 && fmt->coplanar)
1707 				plane_fmt->sizeimage += pix->height *
1708 					plane_fmt->bytesperline *
1709 					fmt->vpdma_fmt[VPE_CHROMA]->depth >> 3;
1710 
1711 		} else { /* i == VIP_CHROMA */
1712 			plane_fmt->sizeimage = (pix->height *
1713 					       plane_fmt->bytesperline *
1714 					       depth) >> 3;
1715 		}
1716 		memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
1717 	}
1718 
1719 	return 0;
1720 }
1721 
vpe_try_fmt(struct file * file,void * priv,struct v4l2_format * f)1722 static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1723 {
1724 	struct vpe_ctx *ctx = file->private_data;
1725 	struct vpe_fmt *fmt = find_format(f);
1726 
1727 	if (V4L2_TYPE_IS_OUTPUT(f->type))
1728 		return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1729 	else
1730 		return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1731 }
1732 
__vpe_s_fmt(struct vpe_ctx * ctx,struct v4l2_format * f)1733 static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1734 {
1735 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1736 	struct v4l2_pix_format_mplane *qpix;
1737 	struct vpe_q_data *q_data;
1738 	struct vb2_queue *vq;
1739 
1740 	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1741 	if (!vq)
1742 		return -EINVAL;
1743 
1744 	if (vb2_is_busy(vq)) {
1745 		vpe_err(ctx->dev, "queue busy\n");
1746 		return -EBUSY;
1747 	}
1748 
1749 	q_data = get_q_data(ctx, f->type);
1750 	if (!q_data)
1751 		return -EINVAL;
1752 
1753 	qpix = &q_data->format.fmt.pix_mp;
1754 	q_data->fmt		= find_format(f);
1755 	q_data->format = *f;
1756 
1757 	q_data->c_rect.left	= 0;
1758 	q_data->c_rect.top	= 0;
1759 	q_data->c_rect.width	= pix->width;
1760 	q_data->c_rect.height	= pix->height;
1761 
1762 	if (qpix->field == V4L2_FIELD_ALTERNATE)
1763 		q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
1764 	else if (qpix->field == V4L2_FIELD_SEQ_TB)
1765 		q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
1766 	else if (qpix->field == V4L2_FIELD_SEQ_BT)
1767 		q_data->flags |= Q_DATA_INTERLACED_SEQ_BT;
1768 	else
1769 		q_data->flags &= ~Q_IS_INTERLACED;
1770 
1771 	/* the crop height is halved for the case of SEQ_XX buffers */
1772 	if (q_data->flags & Q_IS_SEQ_XX)
1773 		q_data->c_rect.height /= 2;
1774 
1775 	vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1776 		f->type, pix->width, pix->height, pix->pixelformat,
1777 		pix->plane_fmt[0].bytesperline);
1778 	if (pix->num_planes == 2)
1779 		vpe_dbg(ctx->dev, " bpl_uv %d\n",
1780 			pix->plane_fmt[1].bytesperline);
1781 
1782 	return 0;
1783 }
1784 
vpe_s_fmt(struct file * file,void * priv,struct v4l2_format * f)1785 static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1786 {
1787 	int ret;
1788 	struct vpe_ctx *ctx = file->private_data;
1789 
1790 	ret = vpe_try_fmt(file, priv, f);
1791 	if (ret)
1792 		return ret;
1793 
1794 	ret = __vpe_s_fmt(ctx, f);
1795 	if (ret)
1796 		return ret;
1797 
1798 	if (V4L2_TYPE_IS_OUTPUT(f->type))
1799 		set_src_registers(ctx);
1800 	else
1801 		set_dst_registers(ctx);
1802 
1803 	return set_srcdst_params(ctx);
1804 }
1805 
__vpe_try_selection(struct vpe_ctx * ctx,struct v4l2_selection * s)1806 static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
1807 {
1808 	struct vpe_q_data *q_data;
1809 	struct v4l2_pix_format_mplane *pix;
1810 	int height;
1811 
1812 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1813 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1814 		return -EINVAL;
1815 
1816 	q_data = get_q_data(ctx, s->type);
1817 	if (!q_data)
1818 		return -EINVAL;
1819 
1820 	pix = &q_data->format.fmt.pix_mp;
1821 
1822 	switch (s->target) {
1823 	case V4L2_SEL_TGT_COMPOSE:
1824 		/*
1825 		 * COMPOSE target is only valid for capture buffer type, return
1826 		 * error for output buffer type
1827 		 */
1828 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1829 			return -EINVAL;
1830 		break;
1831 	case V4L2_SEL_TGT_CROP:
1832 		/*
1833 		 * CROP target is only valid for output buffer type, return
1834 		 * error for capture buffer type
1835 		 */
1836 		if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1837 			return -EINVAL;
1838 		break;
1839 	/*
1840 	 * bound and default crop/compose targets are invalid targets to
1841 	 * try/set
1842 	 */
1843 	default:
1844 		return -EINVAL;
1845 	}
1846 
1847 	/*
1848 	 * For SEQ_XX buffers, crop height should be less than the height of
1849 	 * the field height, not the buffer height
1850 	 */
1851 	if (q_data->flags & Q_IS_SEQ_XX)
1852 		height = pix->height / 2;
1853 	else
1854 		height = pix->height;
1855 
1856 	if (s->r.top < 0 || s->r.left < 0) {
1857 		vpe_err(ctx->dev, "negative values for top and left\n");
1858 		s->r.top = s->r.left = 0;
1859 	}
1860 
1861 	v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1,
1862 		&s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
1863 
1864 	/* adjust left/top if cropping rectangle is out of bounds */
1865 	if (s->r.left + s->r.width > pix->width)
1866 		s->r.left = pix->width - s->r.width;
1867 	if (s->r.top + s->r.height > pix->height)
1868 		s->r.top = pix->height - s->r.height;
1869 
1870 	return 0;
1871 }
1872 
vpe_g_selection(struct file * file,void * fh,struct v4l2_selection * s)1873 static int vpe_g_selection(struct file *file, void *fh,
1874 		struct v4l2_selection *s)
1875 {
1876 	struct vpe_ctx *ctx = file->private_data;
1877 	struct vpe_q_data *q_data;
1878 	struct v4l2_pix_format_mplane *pix;
1879 	bool use_c_rect = false;
1880 
1881 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1882 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1883 		return -EINVAL;
1884 
1885 	q_data = get_q_data(ctx, s->type);
1886 	if (!q_data)
1887 		return -EINVAL;
1888 
1889 	pix = &q_data->format.fmt.pix_mp;
1890 
1891 	switch (s->target) {
1892 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
1893 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
1894 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1895 			return -EINVAL;
1896 		break;
1897 	case V4L2_SEL_TGT_CROP_BOUNDS:
1898 	case V4L2_SEL_TGT_CROP_DEFAULT:
1899 		if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1900 			return -EINVAL;
1901 		break;
1902 	case V4L2_SEL_TGT_COMPOSE:
1903 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1904 			return -EINVAL;
1905 		use_c_rect = true;
1906 		break;
1907 	case V4L2_SEL_TGT_CROP:
1908 		if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1909 			return -EINVAL;
1910 		use_c_rect = true;
1911 		break;
1912 	default:
1913 		return -EINVAL;
1914 	}
1915 
1916 	if (use_c_rect) {
1917 		/*
1918 		 * for CROP/COMPOSE target type, return c_rect params from the
1919 		 * respective buffer type
1920 		 */
1921 		s->r = q_data->c_rect;
1922 	} else {
1923 		/*
1924 		 * for DEFAULT/BOUNDS target type, return width and height from
1925 		 * S_FMT of the respective buffer type
1926 		 */
1927 		s->r.left = 0;
1928 		s->r.top = 0;
1929 		s->r.width = pix->width;
1930 		s->r.height = pix->height;
1931 	}
1932 
1933 	return 0;
1934 }
1935 
1936 
vpe_s_selection(struct file * file,void * fh,struct v4l2_selection * s)1937 static int vpe_s_selection(struct file *file, void *fh,
1938 		struct v4l2_selection *s)
1939 {
1940 	struct vpe_ctx *ctx = file->private_data;
1941 	struct vpe_q_data *q_data;
1942 	struct v4l2_selection sel = *s;
1943 	int ret;
1944 
1945 	ret = __vpe_try_selection(ctx, &sel);
1946 	if (ret)
1947 		return ret;
1948 
1949 	q_data = get_q_data(ctx, sel.type);
1950 	if (!q_data)
1951 		return -EINVAL;
1952 
1953 	if ((q_data->c_rect.left == sel.r.left) &&
1954 			(q_data->c_rect.top == sel.r.top) &&
1955 			(q_data->c_rect.width == sel.r.width) &&
1956 			(q_data->c_rect.height == sel.r.height)) {
1957 		vpe_dbg(ctx->dev,
1958 			"requested crop/compose values are already set\n");
1959 		return 0;
1960 	}
1961 
1962 	q_data->c_rect = sel.r;
1963 
1964 	return set_srcdst_params(ctx);
1965 }
1966 
1967 /*
1968  * defines number of buffers/frames a context can process with VPE before
1969  * switching to a different context. default value is 1 buffer per context
1970  */
1971 #define V4L2_CID_VPE_BUFS_PER_JOB		(V4L2_CID_USER_TI_VPE_BASE + 0)
1972 
vpe_s_ctrl(struct v4l2_ctrl * ctrl)1973 static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1974 {
1975 	struct vpe_ctx *ctx =
1976 		container_of(ctrl->handler, struct vpe_ctx, hdl);
1977 
1978 	switch (ctrl->id) {
1979 	case V4L2_CID_VPE_BUFS_PER_JOB:
1980 		ctx->bufs_per_job = ctrl->val;
1981 		break;
1982 
1983 	default:
1984 		vpe_err(ctx->dev, "Invalid control\n");
1985 		return -EINVAL;
1986 	}
1987 
1988 	return 0;
1989 }
1990 
1991 static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1992 	.s_ctrl = vpe_s_ctrl,
1993 };
1994 
1995 static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1996 	.vidioc_querycap		= vpe_querycap,
1997 
1998 	.vidioc_enum_fmt_vid_cap	= vpe_enum_fmt,
1999 	.vidioc_g_fmt_vid_cap_mplane	= vpe_g_fmt,
2000 	.vidioc_try_fmt_vid_cap_mplane	= vpe_try_fmt,
2001 	.vidioc_s_fmt_vid_cap_mplane	= vpe_s_fmt,
2002 
2003 	.vidioc_enum_fmt_vid_out	= vpe_enum_fmt,
2004 	.vidioc_g_fmt_vid_out_mplane	= vpe_g_fmt,
2005 	.vidioc_try_fmt_vid_out_mplane	= vpe_try_fmt,
2006 	.vidioc_s_fmt_vid_out_mplane	= vpe_s_fmt,
2007 
2008 	.vidioc_g_selection		= vpe_g_selection,
2009 	.vidioc_s_selection		= vpe_s_selection,
2010 
2011 	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
2012 	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
2013 	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
2014 	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
2015 	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
2016 	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
2017 	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
2018 
2019 	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
2020 	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
2021 };
2022 
2023 /*
2024  * Queue operations
2025  */
vpe_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])2026 static int vpe_queue_setup(struct vb2_queue *vq,
2027 			   unsigned int *nbuffers, unsigned int *nplanes,
2028 			   unsigned int sizes[], struct device *alloc_devs[])
2029 {
2030 	int i;
2031 	struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
2032 	struct vpe_q_data *q_data;
2033 	struct v4l2_pix_format_mplane *pix;
2034 
2035 	q_data = get_q_data(ctx, vq->type);
2036 	if (!q_data)
2037 		return -EINVAL;
2038 
2039 	pix = &q_data->format.fmt.pix_mp;
2040 	*nplanes = pix->num_planes;
2041 
2042 	for (i = 0; i < *nplanes; i++)
2043 		sizes[i] = pix->plane_fmt[i].sizeimage;
2044 
2045 	vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
2046 		sizes[VPE_LUMA]);
2047 	if (*nplanes == 2)
2048 		vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
2049 
2050 	return 0;
2051 }
2052 
vpe_buf_prepare(struct vb2_buffer * vb)2053 static int vpe_buf_prepare(struct vb2_buffer *vb)
2054 {
2055 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
2056 	struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
2057 	struct vpe_q_data *q_data;
2058 	struct v4l2_pix_format_mplane *pix;
2059 	int i;
2060 
2061 	vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
2062 
2063 	q_data = get_q_data(ctx, vb->vb2_queue->type);
2064 	if (!q_data)
2065 		return -EINVAL;
2066 
2067 	pix = &q_data->format.fmt.pix_mp;
2068 
2069 	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
2070 		if (!(q_data->flags & Q_IS_INTERLACED)) {
2071 			vbuf->field = V4L2_FIELD_NONE;
2072 		} else {
2073 			if (vbuf->field != V4L2_FIELD_TOP &&
2074 			    vbuf->field != V4L2_FIELD_BOTTOM &&
2075 			    vbuf->field != V4L2_FIELD_SEQ_TB &&
2076 			    vbuf->field != V4L2_FIELD_SEQ_BT)
2077 				return -EINVAL;
2078 		}
2079 	}
2080 
2081 	for (i = 0; i < pix->num_planes; i++) {
2082 		if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
2083 			vpe_err(ctx->dev,
2084 				"data will not fit into plane (%lu < %lu)\n",
2085 				vb2_plane_size(vb, i),
2086 				(long)pix->plane_fmt[i].sizeimage);
2087 			return -EINVAL;
2088 		}
2089 	}
2090 
2091 	for (i = 0; i < pix->num_planes; i++)
2092 		vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
2093 
2094 	return 0;
2095 }
2096 
vpe_buf_queue(struct vb2_buffer * vb)2097 static void vpe_buf_queue(struct vb2_buffer *vb)
2098 {
2099 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
2100 	struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
2101 
2102 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
2103 }
2104 
check_srcdst_sizes(struct vpe_ctx * ctx)2105 static int check_srcdst_sizes(struct vpe_ctx *ctx)
2106 {
2107 	struct vpe_q_data *s_q_data =  &ctx->q_data[Q_DATA_SRC];
2108 	struct vpe_q_data *d_q_data =  &ctx->q_data[Q_DATA_DST];
2109 	unsigned int src_w = s_q_data->c_rect.width;
2110 	unsigned int src_h = s_q_data->c_rect.height;
2111 	unsigned int dst_w = d_q_data->c_rect.width;
2112 	unsigned int dst_h = d_q_data->c_rect.height;
2113 
2114 	if (src_w == dst_w && src_h == dst_h)
2115 		return 0;
2116 
2117 	if (src_h <= SC_MAX_PIXEL_HEIGHT &&
2118 	    src_w <= SC_MAX_PIXEL_WIDTH &&
2119 	    dst_h <= SC_MAX_PIXEL_HEIGHT &&
2120 	    dst_w <= SC_MAX_PIXEL_WIDTH)
2121 		return 0;
2122 
2123 	return -1;
2124 }
2125 
vpe_return_all_buffers(struct vpe_ctx * ctx,struct vb2_queue * q,enum vb2_buffer_state state)2126 static void vpe_return_all_buffers(struct vpe_ctx *ctx,  struct vb2_queue *q,
2127 				   enum vb2_buffer_state state)
2128 {
2129 	struct vb2_v4l2_buffer *vb;
2130 	unsigned long flags;
2131 
2132 	for (;;) {
2133 		if (V4L2_TYPE_IS_OUTPUT(q->type))
2134 			vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
2135 		else
2136 			vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
2137 		if (!vb)
2138 			break;
2139 		spin_lock_irqsave(&ctx->dev->lock, flags);
2140 		v4l2_m2m_buf_done(vb, state);
2141 		spin_unlock_irqrestore(&ctx->dev->lock, flags);
2142 	}
2143 
2144 	/*
2145 	 * Cleanup the in-transit vb2 buffers that have been
2146 	 * removed from their respective queue already but for
2147 	 * which procecessing has not been completed yet.
2148 	 */
2149 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2150 		spin_lock_irqsave(&ctx->dev->lock, flags);
2151 
2152 		if (ctx->src_vbs[2])
2153 			v4l2_m2m_buf_done(ctx->src_vbs[2], state);
2154 
2155 		if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
2156 			v4l2_m2m_buf_done(ctx->src_vbs[1], state);
2157 
2158 		if (ctx->src_vbs[0] &&
2159 		    (ctx->src_vbs[0] != ctx->src_vbs[1]) &&
2160 		    (ctx->src_vbs[0] != ctx->src_vbs[2]))
2161 			v4l2_m2m_buf_done(ctx->src_vbs[0], state);
2162 
2163 		ctx->src_vbs[2] = NULL;
2164 		ctx->src_vbs[1] = NULL;
2165 		ctx->src_vbs[0] = NULL;
2166 
2167 		spin_unlock_irqrestore(&ctx->dev->lock, flags);
2168 	} else {
2169 		if (ctx->dst_vb) {
2170 			spin_lock_irqsave(&ctx->dev->lock, flags);
2171 
2172 			v4l2_m2m_buf_done(ctx->dst_vb, state);
2173 			ctx->dst_vb = NULL;
2174 			spin_unlock_irqrestore(&ctx->dev->lock, flags);
2175 		}
2176 	}
2177 }
2178 
vpe_start_streaming(struct vb2_queue * q,unsigned int count)2179 static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
2180 {
2181 	struct vpe_ctx *ctx = vb2_get_drv_priv(q);
2182 
2183 	/* Check any of the size exceed maximum scaling sizes */
2184 	if (check_srcdst_sizes(ctx)) {
2185 		vpe_err(ctx->dev,
2186 			"Conversion setup failed, check source and destination parameters\n"
2187 			);
2188 		vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED);
2189 		return -EINVAL;
2190 	}
2191 
2192 	if (ctx->deinterlacing)
2193 		config_edi_input_mode(ctx, 0x0);
2194 
2195 	if (ctx->sequence != 0)
2196 		set_srcdst_params(ctx);
2197 
2198 	return 0;
2199 }
2200 
vpe_stop_streaming(struct vb2_queue * q)2201 static void vpe_stop_streaming(struct vb2_queue *q)
2202 {
2203 	struct vpe_ctx *ctx = vb2_get_drv_priv(q);
2204 
2205 	vpe_dump_regs(ctx->dev);
2206 	vpdma_dump_regs(ctx->dev->vpdma);
2207 
2208 	vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR);
2209 }
2210 
2211 static const struct vb2_ops vpe_qops = {
2212 	.queue_setup	 = vpe_queue_setup,
2213 	.buf_prepare	 = vpe_buf_prepare,
2214 	.buf_queue	 = vpe_buf_queue,
2215 	.wait_prepare	 = vb2_ops_wait_prepare,
2216 	.wait_finish	 = vb2_ops_wait_finish,
2217 	.start_streaming = vpe_start_streaming,
2218 	.stop_streaming  = vpe_stop_streaming,
2219 };
2220 
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)2221 static int queue_init(void *priv, struct vb2_queue *src_vq,
2222 		      struct vb2_queue *dst_vq)
2223 {
2224 	struct vpe_ctx *ctx = priv;
2225 	struct vpe_dev *dev = ctx->dev;
2226 	int ret;
2227 
2228 	memset(src_vq, 0, sizeof(*src_vq));
2229 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2230 	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
2231 	src_vq->drv_priv = ctx;
2232 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
2233 	src_vq->ops = &vpe_qops;
2234 	src_vq->mem_ops = &vb2_dma_contig_memops;
2235 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2236 	src_vq->lock = &dev->dev_mutex;
2237 	src_vq->dev = dev->v4l2_dev.dev;
2238 
2239 	ret = vb2_queue_init(src_vq);
2240 	if (ret)
2241 		return ret;
2242 
2243 	memset(dst_vq, 0, sizeof(*dst_vq));
2244 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2245 	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
2246 	dst_vq->drv_priv = ctx;
2247 	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
2248 	dst_vq->ops = &vpe_qops;
2249 	dst_vq->mem_ops = &vb2_dma_contig_memops;
2250 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2251 	dst_vq->lock = &dev->dev_mutex;
2252 	dst_vq->dev = dev->v4l2_dev.dev;
2253 
2254 	return vb2_queue_init(dst_vq);
2255 }
2256 
2257 static const struct v4l2_ctrl_config vpe_bufs_per_job = {
2258 	.ops = &vpe_ctrl_ops,
2259 	.id = V4L2_CID_VPE_BUFS_PER_JOB,
2260 	.name = "Buffers Per Transaction",
2261 	.type = V4L2_CTRL_TYPE_INTEGER,
2262 	.def = VPE_DEF_BUFS_PER_JOB,
2263 	.min = 1,
2264 	.max = VIDEO_MAX_FRAME,
2265 	.step = 1,
2266 };
2267 
2268 /*
2269  * File operations
2270  */
vpe_open(struct file * file)2271 static int vpe_open(struct file *file)
2272 {
2273 	struct vpe_dev *dev = video_drvdata(file);
2274 	struct vpe_q_data *s_q_data;
2275 	struct v4l2_ctrl_handler *hdl;
2276 	struct vpe_ctx *ctx;
2277 	struct v4l2_pix_format_mplane *pix;
2278 	int ret;
2279 
2280 	vpe_dbg(dev, "vpe_open\n");
2281 
2282 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2283 	if (!ctx)
2284 		return -ENOMEM;
2285 
2286 	ctx->dev = dev;
2287 
2288 	if (mutex_lock_interruptible(&dev->dev_mutex)) {
2289 		ret = -ERESTARTSYS;
2290 		goto free_ctx;
2291 	}
2292 
2293 	ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
2294 			VPDMA_LIST_TYPE_NORMAL);
2295 	if (ret != 0)
2296 		goto unlock;
2297 
2298 	ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
2299 	if (ret != 0)
2300 		goto free_desc_list;
2301 
2302 	ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
2303 	if (ret != 0)
2304 		goto free_mmr_adb;
2305 
2306 	ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
2307 	if (ret != 0)
2308 		goto free_sc_h;
2309 
2310 	init_adb_hdrs(ctx);
2311 
2312 	v4l2_fh_init(&ctx->fh, video_devdata(file));
2313 	file->private_data = ctx;
2314 
2315 	hdl = &ctx->hdl;
2316 	v4l2_ctrl_handler_init(hdl, 1);
2317 	v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
2318 	if (hdl->error) {
2319 		ret = hdl->error;
2320 		goto exit_fh;
2321 	}
2322 	ctx->fh.ctrl_handler = hdl;
2323 	v4l2_ctrl_handler_setup(hdl);
2324 
2325 	s_q_data = &ctx->q_data[Q_DATA_SRC];
2326 	pix = &s_q_data->format.fmt.pix_mp;
2327 	s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
2328 	pix->pixelformat = s_q_data->fmt->fourcc;
2329 	s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2330 	pix->width = 1920;
2331 	pix->height = 1080;
2332 	pix->num_planes = 1;
2333 	pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width *
2334 			s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
2335 	pix->plane_fmt[VPE_LUMA].sizeimage =
2336 			pix->plane_fmt[VPE_LUMA].bytesperline *
2337 			pix->height;
2338 	pix->colorspace = V4L2_COLORSPACE_REC709;
2339 	pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
2340 	pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
2341 	pix->quantization = V4L2_QUANTIZATION_DEFAULT;
2342 	pix->field = V4L2_FIELD_NONE;
2343 	s_q_data->c_rect.left = 0;
2344 	s_q_data->c_rect.top = 0;
2345 	s_q_data->c_rect.width = pix->width;
2346 	s_q_data->c_rect.height = pix->height;
2347 	s_q_data->flags = 0;
2348 
2349 	ctx->q_data[Q_DATA_DST] = *s_q_data;
2350 	ctx->q_data[Q_DATA_DST].format.type =
2351 			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2352 
2353 	set_dei_shadow_registers(ctx);
2354 	set_src_registers(ctx);
2355 	set_dst_registers(ctx);
2356 	ret = set_srcdst_params(ctx);
2357 	if (ret)
2358 		goto exit_fh;
2359 
2360 	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
2361 
2362 	if (IS_ERR(ctx->fh.m2m_ctx)) {
2363 		ret = PTR_ERR(ctx->fh.m2m_ctx);
2364 		goto exit_fh;
2365 	}
2366 
2367 	v4l2_fh_add(&ctx->fh);
2368 
2369 	/*
2370 	 * for now, just report the creation of the first instance, we can later
2371 	 * optimize the driver to enable or disable clocks when the first
2372 	 * instance is created or the last instance released
2373 	 */
2374 	if (atomic_inc_return(&dev->num_instances) == 1)
2375 		vpe_dbg(dev, "first instance created\n");
2376 
2377 	ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
2378 
2379 	ctx->load_mmrs = true;
2380 
2381 	vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
2382 		ctx, ctx->fh.m2m_ctx);
2383 
2384 	mutex_unlock(&dev->dev_mutex);
2385 
2386 	return 0;
2387 exit_fh:
2388 	v4l2_ctrl_handler_free(hdl);
2389 	v4l2_fh_exit(&ctx->fh);
2390 	vpdma_free_desc_buf(&ctx->sc_coeff_v);
2391 free_sc_h:
2392 	vpdma_free_desc_buf(&ctx->sc_coeff_h);
2393 free_mmr_adb:
2394 	vpdma_free_desc_buf(&ctx->mmr_adb);
2395 free_desc_list:
2396 	vpdma_free_desc_list(&ctx->desc_list);
2397 unlock:
2398 	mutex_unlock(&dev->dev_mutex);
2399 free_ctx:
2400 	kfree(ctx);
2401 	return ret;
2402 }
2403 
vpe_release(struct file * file)2404 static int vpe_release(struct file *file)
2405 {
2406 	struct vpe_dev *dev = video_drvdata(file);
2407 	struct vpe_ctx *ctx = file->private_data;
2408 
2409 	vpe_dbg(dev, "releasing instance %p\n", ctx);
2410 
2411 	mutex_lock(&dev->dev_mutex);
2412 	free_mv_buffers(ctx);
2413 
2414 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
2415 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
2416 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
2417 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
2418 
2419 	vpdma_free_desc_list(&ctx->desc_list);
2420 	vpdma_free_desc_buf(&ctx->mmr_adb);
2421 
2422 	vpdma_free_desc_buf(&ctx->sc_coeff_v);
2423 	vpdma_free_desc_buf(&ctx->sc_coeff_h);
2424 
2425 	v4l2_fh_del(&ctx->fh);
2426 	v4l2_fh_exit(&ctx->fh);
2427 	v4l2_ctrl_handler_free(&ctx->hdl);
2428 	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
2429 
2430 	kfree(ctx);
2431 
2432 	/*
2433 	 * for now, just report the release of the last instance, we can later
2434 	 * optimize the driver to enable or disable clocks when the first
2435 	 * instance is created or the last instance released
2436 	 */
2437 	if (atomic_dec_return(&dev->num_instances) == 0)
2438 		vpe_dbg(dev, "last instance released\n");
2439 
2440 	mutex_unlock(&dev->dev_mutex);
2441 
2442 	return 0;
2443 }
2444 
2445 static const struct v4l2_file_operations vpe_fops = {
2446 	.owner		= THIS_MODULE,
2447 	.open		= vpe_open,
2448 	.release	= vpe_release,
2449 	.poll		= v4l2_m2m_fop_poll,
2450 	.unlocked_ioctl	= video_ioctl2,
2451 	.mmap		= v4l2_m2m_fop_mmap,
2452 };
2453 
2454 static const struct video_device vpe_videodev = {
2455 	.name		= VPE_MODULE_NAME,
2456 	.fops		= &vpe_fops,
2457 	.ioctl_ops	= &vpe_ioctl_ops,
2458 	.minor		= -1,
2459 	.release	= video_device_release_empty,
2460 	.vfl_dir	= VFL_DIR_M2M,
2461 	.device_caps	= V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
2462 };
2463 
2464 static const struct v4l2_m2m_ops m2m_ops = {
2465 	.device_run	= device_run,
2466 	.job_ready	= job_ready,
2467 	.job_abort	= job_abort,
2468 };
2469 
vpe_runtime_get(struct platform_device * pdev)2470 static int vpe_runtime_get(struct platform_device *pdev)
2471 {
2472 	int r;
2473 
2474 	dev_dbg(&pdev->dev, "vpe_runtime_get\n");
2475 
2476 	r = pm_runtime_get_sync(&pdev->dev);
2477 	WARN_ON(r < 0);
2478 	if (r)
2479 		pm_runtime_put_noidle(&pdev->dev);
2480 	return r < 0 ? r : 0;
2481 }
2482 
vpe_runtime_put(struct platform_device * pdev)2483 static void vpe_runtime_put(struct platform_device *pdev)
2484 {
2485 
2486 	int r;
2487 
2488 	dev_dbg(&pdev->dev, "vpe_runtime_put\n");
2489 
2490 	r = pm_runtime_put_sync(&pdev->dev);
2491 	WARN_ON(r < 0 && r != -ENOSYS);
2492 }
2493 
vpe_fw_cb(struct platform_device * pdev)2494 static void vpe_fw_cb(struct platform_device *pdev)
2495 {
2496 	struct vpe_dev *dev = platform_get_drvdata(pdev);
2497 	struct video_device *vfd;
2498 	int ret;
2499 
2500 	vfd = &dev->vfd;
2501 	*vfd = vpe_videodev;
2502 	vfd->lock = &dev->dev_mutex;
2503 	vfd->v4l2_dev = &dev->v4l2_dev;
2504 
2505 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
2506 	if (ret) {
2507 		vpe_err(dev, "Failed to register video device\n");
2508 
2509 		vpe_set_clock_enable(dev, 0);
2510 		vpe_runtime_put(pdev);
2511 		pm_runtime_disable(&pdev->dev);
2512 		v4l2_m2m_release(dev->m2m_dev);
2513 		v4l2_device_unregister(&dev->v4l2_dev);
2514 
2515 		return;
2516 	}
2517 
2518 	video_set_drvdata(vfd, dev);
2519 	dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
2520 		vfd->num);
2521 }
2522 
vpe_probe(struct platform_device * pdev)2523 static int vpe_probe(struct platform_device *pdev)
2524 {
2525 	struct vpe_dev *dev;
2526 	int ret, irq, func;
2527 
2528 	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2529 	if (ret) {
2530 		dev_err(&pdev->dev,
2531 			"32-bit consistent DMA enable failed\n");
2532 		return ret;
2533 	}
2534 
2535 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2536 	if (!dev)
2537 		return -ENOMEM;
2538 
2539 	spin_lock_init(&dev->lock);
2540 
2541 	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
2542 	if (ret)
2543 		return ret;
2544 
2545 	atomic_set(&dev->num_instances, 0);
2546 	mutex_init(&dev->dev_mutex);
2547 
2548 	dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2549 						"vpe_top");
2550 	if (!dev->res) {
2551 		dev_err(&pdev->dev, "missing 'vpe_top' resources data\n");
2552 		return -ENODEV;
2553 	}
2554 
2555 	/*
2556 	 * HACK: we get resource info from device tree in the form of a list of
2557 	 * VPE sub blocks, the driver currently uses only the base of vpe_top
2558 	 * for register access, the driver should be changed later to access
2559 	 * registers based on the sub block base addresses
2560 	 */
2561 	dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
2562 	if (!dev->base) {
2563 		ret = -ENOMEM;
2564 		goto v4l2_dev_unreg;
2565 	}
2566 
2567 	irq = platform_get_irq(pdev, 0);
2568 	ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
2569 			dev);
2570 	if (ret)
2571 		goto v4l2_dev_unreg;
2572 
2573 	platform_set_drvdata(pdev, dev);
2574 
2575 	dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
2576 	if (IS_ERR(dev->m2m_dev)) {
2577 		vpe_err(dev, "Failed to init mem2mem device\n");
2578 		ret = PTR_ERR(dev->m2m_dev);
2579 		goto v4l2_dev_unreg;
2580 	}
2581 
2582 	pm_runtime_enable(&pdev->dev);
2583 
2584 	ret = vpe_runtime_get(pdev);
2585 	if (ret)
2586 		goto rel_m2m;
2587 
2588 	/* Perform clk enable followed by reset */
2589 	vpe_set_clock_enable(dev, 1);
2590 
2591 	vpe_top_reset(dev);
2592 
2593 	func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
2594 		VPE_PID_FUNC_SHIFT);
2595 	vpe_dbg(dev, "VPE PID function %x\n", func);
2596 
2597 	vpe_top_vpdma_reset(dev);
2598 
2599 	dev->sc = sc_create(pdev, "sc");
2600 	if (IS_ERR(dev->sc)) {
2601 		ret = PTR_ERR(dev->sc);
2602 		goto runtime_put;
2603 	}
2604 
2605 	dev->csc = csc_create(pdev, "csc");
2606 	if (IS_ERR(dev->csc)) {
2607 		ret = PTR_ERR(dev->csc);
2608 		goto runtime_put;
2609 	}
2610 
2611 	dev->vpdma = &dev->vpdma_data;
2612 	ret = vpdma_create(pdev, dev->vpdma, vpe_fw_cb);
2613 	if (ret)
2614 		goto runtime_put;
2615 
2616 	return 0;
2617 
2618 runtime_put:
2619 	vpe_runtime_put(pdev);
2620 rel_m2m:
2621 	pm_runtime_disable(&pdev->dev);
2622 	v4l2_m2m_release(dev->m2m_dev);
2623 v4l2_dev_unreg:
2624 	v4l2_device_unregister(&dev->v4l2_dev);
2625 
2626 	return ret;
2627 }
2628 
vpe_remove(struct platform_device * pdev)2629 static int vpe_remove(struct platform_device *pdev)
2630 {
2631 	struct vpe_dev *dev = platform_get_drvdata(pdev);
2632 
2633 	v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
2634 
2635 	v4l2_m2m_release(dev->m2m_dev);
2636 	video_unregister_device(&dev->vfd);
2637 	v4l2_device_unregister(&dev->v4l2_dev);
2638 
2639 	vpe_set_clock_enable(dev, 0);
2640 	vpe_runtime_put(pdev);
2641 	pm_runtime_disable(&pdev->dev);
2642 
2643 	return 0;
2644 }
2645 
2646 #if defined(CONFIG_OF)
2647 static const struct of_device_id vpe_of_match[] = {
2648 	{
2649 		.compatible = "ti,dra7-vpe",
2650 	},
2651 	{},
2652 };
2653 MODULE_DEVICE_TABLE(of, vpe_of_match);
2654 #endif
2655 
2656 static struct platform_driver vpe_pdrv = {
2657 	.probe		= vpe_probe,
2658 	.remove		= vpe_remove,
2659 	.driver		= {
2660 		.name	= VPE_MODULE_NAME,
2661 		.of_match_table = of_match_ptr(vpe_of_match),
2662 	},
2663 };
2664 
2665 module_platform_driver(vpe_pdrv);
2666 
2667 MODULE_DESCRIPTION("TI VPE driver");
2668 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2669 MODULE_LICENSE("GPL");
2670