• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * vivid-vid-cap.c - video capture support functions.
3  *
4  * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5  *
6  * This program is free software; you may redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17  * SOFTWARE.
18  */
19 
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/vmalloc.h>
24 #include <linux/videodev2.h>
25 #include <linux/v4l2-dv-timings.h>
26 #include <media/v4l2-common.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-dv-timings.h>
29 
30 #include "vivid-core.h"
31 #include "vivid-vid-common.h"
32 #include "vivid-kthread-cap.h"
33 #include "vivid-vid-cap.h"
34 
35 /* timeperframe: min/max and default */
36 static const struct v4l2_fract
37 	tpf_min     = {.numerator = 1,		.denominator = FPS_MAX},
38 	tpf_max     = {.numerator = FPS_MAX,	.denominator = 1},
39 	tpf_default = {.numerator = 1,		.denominator = 30};
40 
41 static const struct vivid_fmt formats_ovl[] = {
42 	{
43 		.name     = "RGB565 (LE)",
44 		.fourcc   = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
45 		.depth    = 16,
46 		.planes   = 1,
47 	},
48 	{
49 		.name     = "XRGB555 (LE)",
50 		.fourcc   = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
51 		.depth    = 16,
52 		.planes   = 1,
53 	},
54 	{
55 		.name     = "ARGB555 (LE)",
56 		.fourcc   = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
57 		.depth    = 16,
58 		.planes   = 1,
59 	},
60 };
61 
62 /* The number of discrete webcam framesizes */
63 #define VIVID_WEBCAM_SIZES 3
64 /* The number of discrete webcam frameintervals */
65 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
66 
67 /* Sizes must be in increasing order */
68 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
69 	{  320, 180 },
70 	{  640, 360 },
71 	{ 1280, 720 },
72 };
73 
74 /*
75  * Intervals must be in increasing order and there must be twice as many
76  * elements in this array as there are in webcam_sizes.
77  */
78 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
79 	{  1, 10 },
80 	{  1, 15 },
81 	{  1, 25 },
82 	{  1, 30 },
83 	{  1, 50 },
84 	{  1, 60 },
85 };
86 
87 static const struct v4l2_discrete_probe webcam_probe = {
88 	webcam_sizes,
89 	VIVID_WEBCAM_SIZES
90 };
91 
vid_cap_queue_setup(struct vb2_queue * vq,const struct v4l2_format * fmt,unsigned * nbuffers,unsigned * nplanes,unsigned sizes[],void * alloc_ctxs[])92 static int vid_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
93 		       unsigned *nbuffers, unsigned *nplanes,
94 		       unsigned sizes[], void *alloc_ctxs[])
95 {
96 	struct vivid_dev *dev = vb2_get_drv_priv(vq);
97 	unsigned planes = tpg_g_planes(&dev->tpg);
98 	unsigned h = dev->fmt_cap_rect.height;
99 	unsigned p;
100 
101 	if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
102 		/*
103 		 * You cannot use read() with FIELD_ALTERNATE since the field
104 		 * information (TOP/BOTTOM) cannot be passed back to the user.
105 		 */
106 		if (vb2_fileio_is_active(vq))
107 			return -EINVAL;
108 	}
109 
110 	if (dev->queue_setup_error) {
111 		/*
112 		 * Error injection: test what happens if queue_setup() returns
113 		 * an error.
114 		 */
115 		dev->queue_setup_error = false;
116 		return -EINVAL;
117 	}
118 	if (fmt) {
119 		const struct v4l2_pix_format_mplane *mp;
120 		struct v4l2_format mp_fmt;
121 		const struct vivid_fmt *vfmt;
122 
123 		if (!V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) {
124 			fmt_sp2mp(fmt, &mp_fmt);
125 			fmt = &mp_fmt;
126 		}
127 		mp = &fmt->fmt.pix_mp;
128 		/*
129 		 * Check if the number of planes in the specified format match
130 		 * the number of planes in the current format. You can't mix that.
131 		 */
132 		if (mp->num_planes != planes)
133 			return -EINVAL;
134 		vfmt = vivid_get_format(dev, mp->pixelformat);
135 		for (p = 0; p < planes; p++) {
136 			sizes[p] = mp->plane_fmt[p].sizeimage;
137 			if (sizes[0] < tpg_g_bytesperline(&dev->tpg, 0) * h +
138 							vfmt->data_offset[p])
139 				return -EINVAL;
140 		}
141 	} else {
142 		for (p = 0; p < planes; p++)
143 			sizes[p] = tpg_g_bytesperline(&dev->tpg, p) * h +
144 					dev->fmt_cap->data_offset[p];
145 	}
146 
147 	if (vq->num_buffers + *nbuffers < 2)
148 		*nbuffers = 2 - vq->num_buffers;
149 
150 	*nplanes = planes;
151 
152 	/*
153 	 * videobuf2-vmalloc allocator is context-less so no need to set
154 	 * alloc_ctxs array.
155 	 */
156 
157 	if (planes == 2)
158 		dprintk(dev, 1, "%s, count=%d, sizes=%u, %u\n", __func__,
159 			*nbuffers, sizes[0], sizes[1]);
160 	else
161 		dprintk(dev, 1, "%s, count=%d, size=%u\n", __func__,
162 			*nbuffers, sizes[0]);
163 
164 	return 0;
165 }
166 
vid_cap_buf_prepare(struct vb2_buffer * vb)167 static int vid_cap_buf_prepare(struct vb2_buffer *vb)
168 {
169 	struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
170 	unsigned long size;
171 	unsigned planes = tpg_g_planes(&dev->tpg);
172 	unsigned p;
173 
174 	dprintk(dev, 1, "%s\n", __func__);
175 
176 	if (WARN_ON(NULL == dev->fmt_cap))
177 		return -EINVAL;
178 
179 	if (dev->buf_prepare_error) {
180 		/*
181 		 * Error injection: test what happens if buf_prepare() returns
182 		 * an error.
183 		 */
184 		dev->buf_prepare_error = false;
185 		return -EINVAL;
186 	}
187 	for (p = 0; p < planes; p++) {
188 		size = tpg_g_bytesperline(&dev->tpg, p) * dev->fmt_cap_rect.height +
189 			dev->fmt_cap->data_offset[p];
190 
191 		if (vb2_plane_size(vb, 0) < size) {
192 			dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
193 					__func__, p, vb2_plane_size(vb, 0), size);
194 			return -EINVAL;
195 		}
196 
197 		vb2_set_plane_payload(vb, p, size);
198 		vb->v4l2_planes[p].data_offset = dev->fmt_cap->data_offset[p];
199 	}
200 
201 	return 0;
202 }
203 
vid_cap_buf_finish(struct vb2_buffer * vb)204 static void vid_cap_buf_finish(struct vb2_buffer *vb)
205 {
206 	struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
207 	struct v4l2_timecode *tc = &vb->v4l2_buf.timecode;
208 	unsigned fps = 25;
209 	unsigned seq = vb->v4l2_buf.sequence;
210 
211 	if (!vivid_is_sdtv_cap(dev))
212 		return;
213 
214 	/*
215 	 * Set the timecode. Rarely used, so it is interesting to
216 	 * test this.
217 	 */
218 	vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE;
219 	if (dev->std_cap & V4L2_STD_525_60)
220 		fps = 30;
221 	tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
222 	tc->flags = 0;
223 	tc->frames = seq % fps;
224 	tc->seconds = (seq / fps) % 60;
225 	tc->minutes = (seq / (60 * fps)) % 60;
226 	tc->hours = (seq / (60 * 60 * fps)) % 24;
227 }
228 
vid_cap_buf_queue(struct vb2_buffer * vb)229 static void vid_cap_buf_queue(struct vb2_buffer *vb)
230 {
231 	struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
232 	struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
233 
234 	dprintk(dev, 1, "%s\n", __func__);
235 
236 	spin_lock(&dev->slock);
237 	list_add_tail(&buf->list, &dev->vid_cap_active);
238 	spin_unlock(&dev->slock);
239 }
240 
vid_cap_start_streaming(struct vb2_queue * vq,unsigned count)241 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
242 {
243 	struct vivid_dev *dev = vb2_get_drv_priv(vq);
244 	unsigned i;
245 	int err;
246 
247 	if (vb2_is_streaming(&dev->vb_vid_out_q))
248 		dev->can_loop_video = vivid_vid_can_loop(dev);
249 
250 	if (dev->kthread_vid_cap)
251 		return 0;
252 
253 	dev->vid_cap_seq_count = 0;
254 	dprintk(dev, 1, "%s\n", __func__);
255 	for (i = 0; i < VIDEO_MAX_FRAME; i++)
256 		dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
257 	if (dev->start_streaming_error) {
258 		dev->start_streaming_error = false;
259 		err = -EINVAL;
260 	} else {
261 		err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
262 	}
263 	if (err) {
264 		struct vivid_buffer *buf, *tmp;
265 
266 		list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
267 			list_del(&buf->list);
268 			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
269 		}
270 	}
271 	return err;
272 }
273 
274 /* abort streaming and wait for last buffer */
vid_cap_stop_streaming(struct vb2_queue * vq)275 static void vid_cap_stop_streaming(struct vb2_queue *vq)
276 {
277 	struct vivid_dev *dev = vb2_get_drv_priv(vq);
278 
279 	dprintk(dev, 1, "%s\n", __func__);
280 	vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
281 	dev->can_loop_video = false;
282 }
283 
284 const struct vb2_ops vivid_vid_cap_qops = {
285 	.queue_setup		= vid_cap_queue_setup,
286 	.buf_prepare		= vid_cap_buf_prepare,
287 	.buf_finish		= vid_cap_buf_finish,
288 	.buf_queue		= vid_cap_buf_queue,
289 	.start_streaming	= vid_cap_start_streaming,
290 	.stop_streaming		= vid_cap_stop_streaming,
291 	.wait_prepare		= vivid_unlock,
292 	.wait_finish		= vivid_lock,
293 };
294 
295 /*
296  * Determine the 'picture' quality based on the current TV frequency: either
297  * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
298  * signal or NOISE for no signal.
299  */
vivid_update_quality(struct vivid_dev * dev)300 void vivid_update_quality(struct vivid_dev *dev)
301 {
302 	unsigned freq_modulus;
303 
304 	if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
305 		/*
306 		 * The 'noise' will only be replaced by the actual video
307 		 * if the output video matches the input video settings.
308 		 */
309 		tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
310 		return;
311 	}
312 	if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) {
313 		tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
314 		return;
315 	}
316 	if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
317 		tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
318 		return;
319 	}
320 	if (!vivid_is_tv_cap(dev)) {
321 		tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
322 		return;
323 	}
324 
325 	/*
326 	 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
327 	 * From +/- 0.25 MHz around the channel there is color, and from
328 	 * +/- 1 MHz there is grayscale (chroma is lost).
329 	 * Everywhere else it is just noise.
330 	 */
331 	freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
332 	if (freq_modulus > 2 * 16) {
333 		tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
334 			next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
335 		return;
336 	}
337 	if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
338 		tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
339 	else
340 		tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
341 }
342 
343 /*
344  * Get the current picture quality and the associated afc value.
345  */
vivid_get_quality(struct vivid_dev * dev,s32 * afc)346 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
347 {
348 	unsigned freq_modulus;
349 
350 	if (afc)
351 		*afc = 0;
352 	if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
353 	    tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
354 		return tpg_g_quality(&dev->tpg);
355 
356 	/*
357 	 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
358 	 * From +/- 0.25 MHz around the channel there is color, and from
359 	 * +/- 1 MHz there is grayscale (chroma is lost).
360 	 * Everywhere else it is just gray.
361 	 */
362 	freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
363 	if (afc)
364 		*afc = freq_modulus - 1 * 16;
365 	return TPG_QUAL_GRAY;
366 }
367 
vivid_get_video_aspect(const struct vivid_dev * dev)368 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
369 {
370 	if (vivid_is_sdtv_cap(dev))
371 		return dev->std_aspect_ratio;
372 
373 	if (vivid_is_hdmi_cap(dev))
374 		return dev->dv_timings_aspect_ratio;
375 
376 	return TPG_VIDEO_ASPECT_IMAGE;
377 }
378 
vivid_get_pixel_aspect(const struct vivid_dev * dev)379 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
380 {
381 	if (vivid_is_sdtv_cap(dev))
382 		return (dev->std_cap & V4L2_STD_525_60) ?
383 			TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
384 
385 	if (vivid_is_hdmi_cap(dev) &&
386 	    dev->src_rect.width == 720 && dev->src_rect.height <= 576)
387 		return dev->src_rect.height == 480 ?
388 			TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
389 
390 	return TPG_PIXEL_ASPECT_SQUARE;
391 }
392 
393 /*
394  * Called whenever the format has to be reset which can occur when
395  * changing inputs, standard, timings, etc.
396  */
vivid_update_format_cap(struct vivid_dev * dev,bool keep_controls)397 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
398 {
399 	struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
400 	unsigned size;
401 
402 	switch (dev->input_type[dev->input]) {
403 	case WEBCAM:
404 	default:
405 		dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
406 		dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
407 		dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
408 		dev->field_cap = V4L2_FIELD_NONE;
409 		tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
410 		break;
411 	case TV:
412 	case SVID:
413 		dev->field_cap = dev->tv_field_cap;
414 		dev->src_rect.width = 720;
415 		if (dev->std_cap & V4L2_STD_525_60) {
416 			dev->src_rect.height = 480;
417 			dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
418 			dev->service_set_cap = V4L2_SLICED_CAPTION_525;
419 		} else {
420 			dev->src_rect.height = 576;
421 			dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
422 			dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
423 		}
424 		tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
425 		break;
426 	case HDMI:
427 		dev->src_rect.width = bt->width;
428 		dev->src_rect.height = bt->height;
429 		size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
430 		dev->timeperframe_vid_cap = (struct v4l2_fract) {
431 			size / 100, (u32)bt->pixelclock / 100
432 		};
433 		if (bt->interlaced)
434 			dev->field_cap = V4L2_FIELD_ALTERNATE;
435 		else
436 			dev->field_cap = V4L2_FIELD_NONE;
437 
438 		/*
439 		 * We can be called from within s_ctrl, in that case we can't
440 		 * set/get controls. Luckily we don't need to in that case.
441 		 */
442 		if (keep_controls || !dev->colorspace)
443 			break;
444 		if (bt->standards & V4L2_DV_BT_STD_CEA861) {
445 			if (bt->width == 720 && bt->height <= 576)
446 				v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
447 			else
448 				v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709);
449 			v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
450 		} else {
451 			v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
452 			v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
453 		}
454 		tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
455 		break;
456 	}
457 	vivid_update_quality(dev);
458 	tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
459 	dev->crop_cap = dev->src_rect;
460 	dev->crop_bounds_cap = dev->src_rect;
461 	dev->compose_cap = dev->crop_cap;
462 	if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
463 		dev->compose_cap.height /= 2;
464 	dev->fmt_cap_rect = dev->compose_cap;
465 	tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
466 	tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
467 	tpg_update_mv_step(&dev->tpg);
468 }
469 
470 /* Map the field to something that is valid for the current input */
vivid_field_cap(struct vivid_dev * dev,enum v4l2_field field)471 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
472 {
473 	if (vivid_is_sdtv_cap(dev)) {
474 		switch (field) {
475 		case V4L2_FIELD_INTERLACED_TB:
476 		case V4L2_FIELD_INTERLACED_BT:
477 		case V4L2_FIELD_SEQ_TB:
478 		case V4L2_FIELD_SEQ_BT:
479 		case V4L2_FIELD_TOP:
480 		case V4L2_FIELD_BOTTOM:
481 		case V4L2_FIELD_ALTERNATE:
482 			return field;
483 		case V4L2_FIELD_INTERLACED:
484 		default:
485 			return V4L2_FIELD_INTERLACED;
486 		}
487 	}
488 	if (vivid_is_hdmi_cap(dev))
489 		return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE :
490 						       V4L2_FIELD_NONE;
491 	return V4L2_FIELD_NONE;
492 }
493 
vivid_colorspace_cap(struct vivid_dev * dev)494 static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
495 {
496 	if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
497 		return tpg_g_colorspace(&dev->tpg);
498 	return dev->colorspace_out;
499 }
500 
vivid_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)501 int vivid_g_fmt_vid_cap(struct file *file, void *priv,
502 					struct v4l2_format *f)
503 {
504 	struct vivid_dev *dev = video_drvdata(file);
505 	struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
506 	unsigned p;
507 
508 	mp->width        = dev->fmt_cap_rect.width;
509 	mp->height       = dev->fmt_cap_rect.height;
510 	mp->field        = dev->field_cap;
511 	mp->pixelformat  = dev->fmt_cap->fourcc;
512 	mp->colorspace   = vivid_colorspace_cap(dev);
513 	mp->num_planes = dev->fmt_cap->planes;
514 	for (p = 0; p < mp->num_planes; p++) {
515 		mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
516 		mp->plane_fmt[p].sizeimage =
517 			mp->plane_fmt[p].bytesperline * mp->height +
518 			dev->fmt_cap->data_offset[p];
519 	}
520 	return 0;
521 }
522 
vivid_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)523 int vivid_try_fmt_vid_cap(struct file *file, void *priv,
524 			struct v4l2_format *f)
525 {
526 	struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
527 	struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
528 	struct vivid_dev *dev = video_drvdata(file);
529 	const struct vivid_fmt *fmt;
530 	unsigned bytesperline, max_bpl;
531 	unsigned factor = 1;
532 	unsigned w, h;
533 	unsigned p;
534 
535 	fmt = vivid_get_format(dev, mp->pixelformat);
536 	if (!fmt) {
537 		dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
538 			mp->pixelformat);
539 		mp->pixelformat = V4L2_PIX_FMT_YUYV;
540 		fmt = vivid_get_format(dev, mp->pixelformat);
541 	}
542 
543 	mp->field = vivid_field_cap(dev, mp->field);
544 	if (vivid_is_webcam(dev)) {
545 		const struct v4l2_frmsize_discrete *sz =
546 			v4l2_find_nearest_format(&webcam_probe, mp->width, mp->height);
547 
548 		w = sz->width;
549 		h = sz->height;
550 	} else if (vivid_is_sdtv_cap(dev)) {
551 		w = 720;
552 		h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576;
553 	} else {
554 		w = dev->src_rect.width;
555 		h = dev->src_rect.height;
556 	}
557 	if (V4L2_FIELD_HAS_T_OR_B(mp->field))
558 		factor = 2;
559 	if (vivid_is_webcam(dev) ||
560 	    (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
561 		mp->width = w;
562 		mp->height = h / factor;
563 	} else {
564 		struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
565 
566 		rect_set_min_size(&r, &vivid_min_rect);
567 		rect_set_max_size(&r, &vivid_max_rect);
568 		if (dev->has_scaler_cap && !dev->has_compose_cap) {
569 			struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
570 
571 			rect_set_max_size(&r, &max_r);
572 		} else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
573 			rect_set_max_size(&r, &dev->src_rect);
574 		} else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
575 			rect_set_min_size(&r, &dev->src_rect);
576 		}
577 		mp->width = r.width;
578 		mp->height = r.height / factor;
579 	}
580 
581 	/* This driver supports custom bytesperline values */
582 
583 	/* Calculate the minimum supported bytesperline value */
584 	bytesperline = (mp->width * fmt->depth) >> 3;
585 	/* Calculate the maximum supported bytesperline value */
586 	max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->depth) >> 3;
587 	mp->num_planes = fmt->planes;
588 	for (p = 0; p < mp->num_planes; p++) {
589 		if (pfmt[p].bytesperline > max_bpl)
590 			pfmt[p].bytesperline = max_bpl;
591 		if (pfmt[p].bytesperline < bytesperline)
592 			pfmt[p].bytesperline = bytesperline;
593 		pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height +
594 			fmt->data_offset[p];
595 		memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
596 	}
597 	mp->colorspace = vivid_colorspace_cap(dev);
598 	memset(mp->reserved, 0, sizeof(mp->reserved));
599 	return 0;
600 }
601 
vivid_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)602 int vivid_s_fmt_vid_cap(struct file *file, void *priv,
603 					struct v4l2_format *f)
604 {
605 	struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
606 	struct vivid_dev *dev = video_drvdata(file);
607 	struct v4l2_rect *crop = &dev->crop_cap;
608 	struct v4l2_rect *compose = &dev->compose_cap;
609 	struct vb2_queue *q = &dev->vb_vid_cap_q;
610 	int ret = vivid_try_fmt_vid_cap(file, priv, f);
611 	unsigned factor = 1;
612 	unsigned i;
613 
614 	if (ret < 0)
615 		return ret;
616 
617 	if (vb2_is_busy(q)) {
618 		dprintk(dev, 1, "%s device busy\n", __func__);
619 		return -EBUSY;
620 	}
621 
622 	if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
623 		dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
624 		return -EBUSY;
625 	}
626 
627 	dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
628 	if (V4L2_FIELD_HAS_T_OR_B(mp->field))
629 		factor = 2;
630 
631 	/* Note: the webcam input doesn't support scaling, cropping or composing */
632 
633 	if (!vivid_is_webcam(dev) &&
634 	    (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
635 		struct v4l2_rect r = { 0, 0, mp->width, mp->height };
636 
637 		if (dev->has_scaler_cap) {
638 			if (dev->has_compose_cap)
639 				rect_map_inside(compose, &r);
640 			else
641 				*compose = r;
642 			if (dev->has_crop_cap && !dev->has_compose_cap) {
643 				struct v4l2_rect min_r = {
644 					0, 0,
645 					r.width / MAX_ZOOM,
646 					factor * r.height / MAX_ZOOM
647 				};
648 				struct v4l2_rect max_r = {
649 					0, 0,
650 					r.width * MAX_ZOOM,
651 					factor * r.height * MAX_ZOOM
652 				};
653 
654 				rect_set_min_size(crop, &min_r);
655 				rect_set_max_size(crop, &max_r);
656 				rect_map_inside(crop, &dev->crop_bounds_cap);
657 			} else if (dev->has_crop_cap) {
658 				struct v4l2_rect min_r = {
659 					0, 0,
660 					compose->width / MAX_ZOOM,
661 					factor * compose->height / MAX_ZOOM
662 				};
663 				struct v4l2_rect max_r = {
664 					0, 0,
665 					compose->width * MAX_ZOOM,
666 					factor * compose->height * MAX_ZOOM
667 				};
668 
669 				rect_set_min_size(crop, &min_r);
670 				rect_set_max_size(crop, &max_r);
671 				rect_map_inside(crop, &dev->crop_bounds_cap);
672 			}
673 		} else if (dev->has_crop_cap && !dev->has_compose_cap) {
674 			r.height *= factor;
675 			rect_set_size_to(crop, &r);
676 			rect_map_inside(crop, &dev->crop_bounds_cap);
677 			r = *crop;
678 			r.height /= factor;
679 			rect_set_size_to(compose, &r);
680 		} else if (!dev->has_crop_cap) {
681 			rect_map_inside(compose, &r);
682 		} else {
683 			r.height *= factor;
684 			rect_set_max_size(crop, &r);
685 			rect_map_inside(crop, &dev->crop_bounds_cap);
686 			compose->top *= factor;
687 			compose->height *= factor;
688 			rect_set_size_to(compose, crop);
689 			rect_map_inside(compose, &r);
690 			compose->top /= factor;
691 			compose->height /= factor;
692 		}
693 	} else if (vivid_is_webcam(dev)) {
694 		/* Guaranteed to be a match */
695 		for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
696 			if (webcam_sizes[i].width == mp->width &&
697 					webcam_sizes[i].height == mp->height)
698 				break;
699 		dev->webcam_size_idx = i;
700 		if (dev->webcam_ival_idx >= 2 * (3 - i))
701 			dev->webcam_ival_idx = 2 * (3 - i) - 1;
702 		vivid_update_format_cap(dev, false);
703 	} else {
704 		struct v4l2_rect r = { 0, 0, mp->width, mp->height };
705 
706 		rect_set_size_to(compose, &r);
707 		r.height *= factor;
708 		rect_set_size_to(crop, &r);
709 	}
710 
711 	dev->fmt_cap_rect.width = mp->width;
712 	dev->fmt_cap_rect.height = mp->height;
713 	tpg_s_buf_height(&dev->tpg, mp->height);
714 	tpg_s_bytesperline(&dev->tpg, 0, mp->plane_fmt[0].bytesperline);
715 	if (tpg_g_planes(&dev->tpg) > 1)
716 		tpg_s_bytesperline(&dev->tpg, 1, mp->plane_fmt[1].bytesperline);
717 	dev->field_cap = mp->field;
718 	tpg_s_field(&dev->tpg, dev->field_cap);
719 	tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
720 	tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
721 	if (vivid_is_sdtv_cap(dev))
722 		dev->tv_field_cap = mp->field;
723 	tpg_update_mv_step(&dev->tpg);
724 	return 0;
725 }
726 
vidioc_g_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)727 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
728 					struct v4l2_format *f)
729 {
730 	struct vivid_dev *dev = video_drvdata(file);
731 
732 	if (!dev->multiplanar)
733 		return -ENOTTY;
734 	return vivid_g_fmt_vid_cap(file, priv, f);
735 }
736 
vidioc_try_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)737 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
738 			struct v4l2_format *f)
739 {
740 	struct vivid_dev *dev = video_drvdata(file);
741 
742 	if (!dev->multiplanar)
743 		return -ENOTTY;
744 	return vivid_try_fmt_vid_cap(file, priv, f);
745 }
746 
vidioc_s_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)747 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
748 			struct v4l2_format *f)
749 {
750 	struct vivid_dev *dev = video_drvdata(file);
751 
752 	if (!dev->multiplanar)
753 		return -ENOTTY;
754 	return vivid_s_fmt_vid_cap(file, priv, f);
755 }
756 
vidioc_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)757 int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
758 					struct v4l2_format *f)
759 {
760 	struct vivid_dev *dev = video_drvdata(file);
761 
762 	if (dev->multiplanar)
763 		return -ENOTTY;
764 	return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
765 }
766 
vidioc_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)767 int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
768 			struct v4l2_format *f)
769 {
770 	struct vivid_dev *dev = video_drvdata(file);
771 
772 	if (dev->multiplanar)
773 		return -ENOTTY;
774 	return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
775 }
776 
vidioc_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)777 int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
778 			struct v4l2_format *f)
779 {
780 	struct vivid_dev *dev = video_drvdata(file);
781 
782 	if (dev->multiplanar)
783 		return -ENOTTY;
784 	return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
785 }
786 
vivid_vid_cap_g_selection(struct file * file,void * priv,struct v4l2_selection * sel)787 int vivid_vid_cap_g_selection(struct file *file, void *priv,
788 			      struct v4l2_selection *sel)
789 {
790 	struct vivid_dev *dev = video_drvdata(file);
791 
792 	if (!dev->has_crop_cap && !dev->has_compose_cap)
793 		return -ENOTTY;
794 	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
795 		return -EINVAL;
796 	if (vivid_is_webcam(dev))
797 		return -EINVAL;
798 
799 	sel->r.left = sel->r.top = 0;
800 	switch (sel->target) {
801 	case V4L2_SEL_TGT_CROP:
802 		if (!dev->has_crop_cap)
803 			return -EINVAL;
804 		sel->r = dev->crop_cap;
805 		break;
806 	case V4L2_SEL_TGT_CROP_DEFAULT:
807 	case V4L2_SEL_TGT_CROP_BOUNDS:
808 		if (!dev->has_crop_cap)
809 			return -EINVAL;
810 		sel->r = dev->src_rect;
811 		break;
812 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
813 		if (!dev->has_compose_cap)
814 			return -EINVAL;
815 		sel->r = vivid_max_rect;
816 		break;
817 	case V4L2_SEL_TGT_COMPOSE:
818 		if (!dev->has_compose_cap)
819 			return -EINVAL;
820 		sel->r = dev->compose_cap;
821 		break;
822 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
823 		if (!dev->has_compose_cap)
824 			return -EINVAL;
825 		sel->r = dev->fmt_cap_rect;
826 		break;
827 	default:
828 		return -EINVAL;
829 	}
830 	return 0;
831 }
832 
vivid_vid_cap_s_selection(struct file * file,void * fh,struct v4l2_selection * s)833 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
834 {
835 	struct vivid_dev *dev = video_drvdata(file);
836 	struct v4l2_rect *crop = &dev->crop_cap;
837 	struct v4l2_rect *compose = &dev->compose_cap;
838 	unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
839 	int ret;
840 
841 	if (!dev->has_crop_cap && !dev->has_compose_cap)
842 		return -ENOTTY;
843 	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
844 		return -EINVAL;
845 	if (vivid_is_webcam(dev))
846 		return -EINVAL;
847 
848 	switch (s->target) {
849 	case V4L2_SEL_TGT_CROP:
850 		if (!dev->has_crop_cap)
851 			return -EINVAL;
852 		ret = vivid_vid_adjust_sel(s->flags, &s->r);
853 		if (ret)
854 			return ret;
855 		rect_set_min_size(&s->r, &vivid_min_rect);
856 		rect_set_max_size(&s->r, &dev->src_rect);
857 		rect_map_inside(&s->r, &dev->crop_bounds_cap);
858 		s->r.top /= factor;
859 		s->r.height /= factor;
860 		if (dev->has_scaler_cap) {
861 			struct v4l2_rect fmt = dev->fmt_cap_rect;
862 			struct v4l2_rect max_rect = {
863 				0, 0,
864 				s->r.width * MAX_ZOOM,
865 				s->r.height * MAX_ZOOM
866 			};
867 			struct v4l2_rect min_rect = {
868 				0, 0,
869 				s->r.width / MAX_ZOOM,
870 				s->r.height / MAX_ZOOM
871 			};
872 
873 			rect_set_min_size(&fmt, &min_rect);
874 			if (!dev->has_compose_cap)
875 				rect_set_max_size(&fmt, &max_rect);
876 			if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
877 			    vb2_is_busy(&dev->vb_vid_cap_q))
878 				return -EBUSY;
879 			if (dev->has_compose_cap) {
880 				rect_set_min_size(compose, &min_rect);
881 				rect_set_max_size(compose, &max_rect);
882 			}
883 			dev->fmt_cap_rect = fmt;
884 			tpg_s_buf_height(&dev->tpg, fmt.height);
885 		} else if (dev->has_compose_cap) {
886 			struct v4l2_rect fmt = dev->fmt_cap_rect;
887 
888 			rect_set_min_size(&fmt, &s->r);
889 			if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
890 			    vb2_is_busy(&dev->vb_vid_cap_q))
891 				return -EBUSY;
892 			dev->fmt_cap_rect = fmt;
893 			tpg_s_buf_height(&dev->tpg, fmt.height);
894 			rect_set_size_to(compose, &s->r);
895 			rect_map_inside(compose, &dev->fmt_cap_rect);
896 		} else {
897 			if (!rect_same_size(&s->r, &dev->fmt_cap_rect) &&
898 			    vb2_is_busy(&dev->vb_vid_cap_q))
899 				return -EBUSY;
900 			rect_set_size_to(&dev->fmt_cap_rect, &s->r);
901 			rect_set_size_to(compose, &s->r);
902 			rect_map_inside(compose, &dev->fmt_cap_rect);
903 			tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
904 		}
905 		s->r.top *= factor;
906 		s->r.height *= factor;
907 		*crop = s->r;
908 		break;
909 	case V4L2_SEL_TGT_COMPOSE:
910 		if (!dev->has_compose_cap)
911 			return -EINVAL;
912 		ret = vivid_vid_adjust_sel(s->flags, &s->r);
913 		if (ret)
914 			return ret;
915 		rect_set_min_size(&s->r, &vivid_min_rect);
916 		rect_set_max_size(&s->r, &dev->fmt_cap_rect);
917 		if (dev->has_scaler_cap) {
918 			struct v4l2_rect max_rect = {
919 				0, 0,
920 				dev->src_rect.width * MAX_ZOOM,
921 				(dev->src_rect.height / factor) * MAX_ZOOM
922 			};
923 
924 			rect_set_max_size(&s->r, &max_rect);
925 			if (dev->has_crop_cap) {
926 				struct v4l2_rect min_rect = {
927 					0, 0,
928 					s->r.width / MAX_ZOOM,
929 					(s->r.height * factor) / MAX_ZOOM
930 				};
931 				struct v4l2_rect max_rect = {
932 					0, 0,
933 					s->r.width * MAX_ZOOM,
934 					(s->r.height * factor) * MAX_ZOOM
935 				};
936 
937 				rect_set_min_size(crop, &min_rect);
938 				rect_set_max_size(crop, &max_rect);
939 				rect_map_inside(crop, &dev->crop_bounds_cap);
940 			}
941 		} else if (dev->has_crop_cap) {
942 			s->r.top *= factor;
943 			s->r.height *= factor;
944 			rect_set_max_size(&s->r, &dev->src_rect);
945 			rect_set_size_to(crop, &s->r);
946 			rect_map_inside(crop, &dev->crop_bounds_cap);
947 			s->r.top /= factor;
948 			s->r.height /= factor;
949 		} else {
950 			rect_set_size_to(&s->r, &dev->src_rect);
951 			s->r.height /= factor;
952 		}
953 		rect_map_inside(&s->r, &dev->fmt_cap_rect);
954 		if (dev->bitmap_cap && (compose->width != s->r.width ||
955 					compose->height != s->r.height)) {
956 			kfree(dev->bitmap_cap);
957 			dev->bitmap_cap = NULL;
958 		}
959 		*compose = s->r;
960 		break;
961 	default:
962 		return -EINVAL;
963 	}
964 
965 	tpg_s_crop_compose(&dev->tpg, crop, compose);
966 	return 0;
967 }
968 
vivid_vid_cap_cropcap(struct file * file,void * priv,struct v4l2_cropcap * cap)969 int vivid_vid_cap_cropcap(struct file *file, void *priv,
970 			      struct v4l2_cropcap *cap)
971 {
972 	struct vivid_dev *dev = video_drvdata(file);
973 
974 	if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
975 		return -EINVAL;
976 
977 	switch (vivid_get_pixel_aspect(dev)) {
978 	case TPG_PIXEL_ASPECT_NTSC:
979 		cap->pixelaspect.numerator = 11;
980 		cap->pixelaspect.denominator = 10;
981 		break;
982 	case TPG_PIXEL_ASPECT_PAL:
983 		cap->pixelaspect.numerator = 54;
984 		cap->pixelaspect.denominator = 59;
985 		break;
986 	case TPG_PIXEL_ASPECT_SQUARE:
987 		cap->pixelaspect.numerator = 1;
988 		cap->pixelaspect.denominator = 1;
989 		break;
990 	}
991 	return 0;
992 }
993 
vidioc_enum_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_fmtdesc * f)994 int vidioc_enum_fmt_vid_overlay(struct file *file, void  *priv,
995 					struct v4l2_fmtdesc *f)
996 {
997 	const struct vivid_fmt *fmt;
998 
999 	if (f->index >= ARRAY_SIZE(formats_ovl))
1000 		return -EINVAL;
1001 
1002 	fmt = &formats_ovl[f->index];
1003 
1004 	strlcpy(f->description, fmt->name, sizeof(f->description));
1005 	f->pixelformat = fmt->fourcc;
1006 	return 0;
1007 }
1008 
vidioc_g_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1009 int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
1010 					struct v4l2_format *f)
1011 {
1012 	struct vivid_dev *dev = video_drvdata(file);
1013 	const struct v4l2_rect *compose = &dev->compose_cap;
1014 	struct v4l2_window *win = &f->fmt.win;
1015 	unsigned clipcount = win->clipcount;
1016 
1017 	win->w.top = dev->overlay_cap_top;
1018 	win->w.left = dev->overlay_cap_left;
1019 	win->w.width = compose->width;
1020 	win->w.height = compose->height;
1021 	win->field = dev->overlay_cap_field;
1022 	win->clipcount = dev->clipcount_cap;
1023 	if (clipcount > dev->clipcount_cap)
1024 		clipcount = dev->clipcount_cap;
1025 	if (dev->bitmap_cap == NULL)
1026 		win->bitmap = NULL;
1027 	else if (win->bitmap) {
1028 		if (copy_to_user(win->bitmap, dev->bitmap_cap,
1029 		    ((compose->width + 7) / 8) * compose->height))
1030 			return -EFAULT;
1031 	}
1032 	if (clipcount && win->clips) {
1033 		if (copy_to_user(win->clips, dev->clips_cap,
1034 				 clipcount * sizeof(dev->clips_cap[0])))
1035 			return -EFAULT;
1036 	}
1037 	return 0;
1038 }
1039 
vidioc_try_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1040 int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
1041 					struct v4l2_format *f)
1042 {
1043 	struct vivid_dev *dev = video_drvdata(file);
1044 	const struct v4l2_rect *compose = &dev->compose_cap;
1045 	struct v4l2_window *win = &f->fmt.win;
1046 	int i, j;
1047 
1048 	win->w.left = clamp_t(int, win->w.left,
1049 			      -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1050 	win->w.top = clamp_t(int, win->w.top,
1051 			     -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1052 	win->w.width = compose->width;
1053 	win->w.height = compose->height;
1054 	if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
1055 		win->field = V4L2_FIELD_ANY;
1056 	win->chromakey = 0;
1057 	win->global_alpha = 0;
1058 	if (win->clipcount && !win->clips)
1059 		win->clipcount = 0;
1060 	if (win->clipcount > MAX_CLIPS)
1061 		win->clipcount = MAX_CLIPS;
1062 	if (win->clipcount) {
1063 		if (copy_from_user(dev->try_clips_cap, win->clips,
1064 				   win->clipcount * sizeof(dev->clips_cap[0])))
1065 			return -EFAULT;
1066 		for (i = 0; i < win->clipcount; i++) {
1067 			struct v4l2_rect *r = &dev->try_clips_cap[i].c;
1068 
1069 			r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
1070 			r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
1071 			r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
1072 			r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
1073 		}
1074 		/*
1075 		 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
1076 		 * number and it's typically a one-time deal.
1077 		 */
1078 		for (i = 0; i < win->clipcount - 1; i++) {
1079 			struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
1080 
1081 			for (j = i + 1; j < win->clipcount; j++) {
1082 				struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
1083 
1084 				if (rect_overlap(r1, r2))
1085 					return -EINVAL;
1086 			}
1087 		}
1088 		if (copy_to_user(win->clips, dev->try_clips_cap,
1089 				 win->clipcount * sizeof(dev->clips_cap[0])))
1090 			return -EFAULT;
1091 	}
1092 	return 0;
1093 }
1094 
vidioc_s_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1095 int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
1096 					struct v4l2_format *f)
1097 {
1098 	struct vivid_dev *dev = video_drvdata(file);
1099 	const struct v4l2_rect *compose = &dev->compose_cap;
1100 	struct v4l2_window *win = &f->fmt.win;
1101 	int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
1102 	unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
1103 	unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
1104 	void *new_bitmap = NULL;
1105 
1106 	if (ret)
1107 		return ret;
1108 
1109 	if (win->bitmap) {
1110 		new_bitmap = vzalloc(bitmap_size);
1111 
1112 		if (new_bitmap == NULL)
1113 			return -ENOMEM;
1114 		if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
1115 			vfree(new_bitmap);
1116 			return -EFAULT;
1117 		}
1118 	}
1119 
1120 	dev->overlay_cap_top = win->w.top;
1121 	dev->overlay_cap_left = win->w.left;
1122 	dev->overlay_cap_field = win->field;
1123 	vfree(dev->bitmap_cap);
1124 	dev->bitmap_cap = new_bitmap;
1125 	dev->clipcount_cap = win->clipcount;
1126 	if (dev->clipcount_cap)
1127 		memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
1128 	return 0;
1129 }
1130 
vivid_vid_cap_overlay(struct file * file,void * fh,unsigned i)1131 int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
1132 {
1133 	struct vivid_dev *dev = video_drvdata(file);
1134 
1135 	if (i && dev->fb_vbase_cap == NULL)
1136 		return -EINVAL;
1137 
1138 	if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
1139 		dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
1140 		return -EINVAL;
1141 	}
1142 
1143 	if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
1144 		return -EBUSY;
1145 	dev->overlay_cap_owner = i ? fh : NULL;
1146 	return 0;
1147 }
1148 
vivid_vid_cap_g_fbuf(struct file * file,void * fh,struct v4l2_framebuffer * a)1149 int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
1150 				struct v4l2_framebuffer *a)
1151 {
1152 	struct vivid_dev *dev = video_drvdata(file);
1153 
1154 	*a = dev->fb_cap;
1155 	a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
1156 			V4L2_FBUF_CAP_LIST_CLIPPING;
1157 	a->flags = V4L2_FBUF_FLAG_PRIMARY;
1158 	a->fmt.field = V4L2_FIELD_NONE;
1159 	a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1160 	a->fmt.priv = 0;
1161 	return 0;
1162 }
1163 
vivid_vid_cap_s_fbuf(struct file * file,void * fh,const struct v4l2_framebuffer * a)1164 int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
1165 				const struct v4l2_framebuffer *a)
1166 {
1167 	struct vivid_dev *dev = video_drvdata(file);
1168 	const struct vivid_fmt *fmt;
1169 
1170 	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
1171 		return -EPERM;
1172 
1173 	if (dev->overlay_cap_owner)
1174 		return -EBUSY;
1175 
1176 	if (a->base == NULL) {
1177 		dev->fb_cap.base = NULL;
1178 		dev->fb_vbase_cap = NULL;
1179 		return 0;
1180 	}
1181 
1182 	if (a->fmt.width < 48 || a->fmt.height < 32)
1183 		return -EINVAL;
1184 	fmt = vivid_get_format(dev, a->fmt.pixelformat);
1185 	if (!fmt || !fmt->can_do_overlay)
1186 		return -EINVAL;
1187 	if (a->fmt.bytesperline < (a->fmt.width * fmt->depth) / 8)
1188 		return -EINVAL;
1189 	if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
1190 		return -EINVAL;
1191 
1192 	dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
1193 	dev->fb_cap = *a;
1194 	dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
1195 				    -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1196 	dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
1197 				   -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1198 	return 0;
1199 }
1200 
1201 static const struct v4l2_audio vivid_audio_inputs[] = {
1202 	{ 0, "TV", V4L2_AUDCAP_STEREO },
1203 	{ 1, "Line-In", V4L2_AUDCAP_STEREO },
1204 };
1205 
vidioc_enum_input(struct file * file,void * priv,struct v4l2_input * inp)1206 int vidioc_enum_input(struct file *file, void *priv,
1207 				struct v4l2_input *inp)
1208 {
1209 	struct vivid_dev *dev = video_drvdata(file);
1210 
1211 	if (inp->index >= dev->num_inputs)
1212 		return -EINVAL;
1213 
1214 	inp->type = V4L2_INPUT_TYPE_CAMERA;
1215 	switch (dev->input_type[inp->index]) {
1216 	case WEBCAM:
1217 		snprintf(inp->name, sizeof(inp->name), "Webcam %u",
1218 				dev->input_name_counter[inp->index]);
1219 		inp->capabilities = 0;
1220 		break;
1221 	case TV:
1222 		snprintf(inp->name, sizeof(inp->name), "TV %u",
1223 				dev->input_name_counter[inp->index]);
1224 		inp->type = V4L2_INPUT_TYPE_TUNER;
1225 		inp->std = V4L2_STD_ALL;
1226 		if (dev->has_audio_inputs)
1227 			inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1228 		inp->capabilities = V4L2_IN_CAP_STD;
1229 		break;
1230 	case SVID:
1231 		snprintf(inp->name, sizeof(inp->name), "S-Video %u",
1232 				dev->input_name_counter[inp->index]);
1233 		inp->std = V4L2_STD_ALL;
1234 		if (dev->has_audio_inputs)
1235 			inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1236 		inp->capabilities = V4L2_IN_CAP_STD;
1237 		break;
1238 	case HDMI:
1239 		snprintf(inp->name, sizeof(inp->name), "HDMI %u",
1240 				dev->input_name_counter[inp->index]);
1241 		inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
1242 		if (dev->edid_blocks == 0 ||
1243 		    dev->dv_timings_signal_mode == NO_SIGNAL)
1244 			inp->status |= V4L2_IN_ST_NO_SIGNAL;
1245 		else if (dev->dv_timings_signal_mode == NO_LOCK ||
1246 			 dev->dv_timings_signal_mode == OUT_OF_RANGE)
1247 			inp->status |= V4L2_IN_ST_NO_H_LOCK;
1248 		break;
1249 	}
1250 	if (dev->sensor_hflip)
1251 		inp->status |= V4L2_IN_ST_HFLIP;
1252 	if (dev->sensor_vflip)
1253 		inp->status |= V4L2_IN_ST_VFLIP;
1254 	if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
1255 		if (dev->std_signal_mode == NO_SIGNAL) {
1256 			inp->status |= V4L2_IN_ST_NO_SIGNAL;
1257 		} else if (dev->std_signal_mode == NO_LOCK) {
1258 			inp->status |= V4L2_IN_ST_NO_H_LOCK;
1259 		} else if (vivid_is_tv_cap(dev)) {
1260 			switch (tpg_g_quality(&dev->tpg)) {
1261 			case TPG_QUAL_GRAY:
1262 				inp->status |= V4L2_IN_ST_COLOR_KILL;
1263 				break;
1264 			case TPG_QUAL_NOISE:
1265 				inp->status |= V4L2_IN_ST_NO_H_LOCK;
1266 				break;
1267 			default:
1268 				break;
1269 			}
1270 		}
1271 	}
1272 	return 0;
1273 }
1274 
vidioc_g_input(struct file * file,void * priv,unsigned * i)1275 int vidioc_g_input(struct file *file, void *priv, unsigned *i)
1276 {
1277 	struct vivid_dev *dev = video_drvdata(file);
1278 
1279 	*i = dev->input;
1280 	return 0;
1281 }
1282 
vidioc_s_input(struct file * file,void * priv,unsigned i)1283 int vidioc_s_input(struct file *file, void *priv, unsigned i)
1284 {
1285 	struct vivid_dev *dev = video_drvdata(file);
1286 	struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
1287 	unsigned brightness;
1288 
1289 	if (i >= dev->num_inputs)
1290 		return -EINVAL;
1291 
1292 	if (i == dev->input)
1293 		return 0;
1294 
1295 	if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1296 		return -EBUSY;
1297 
1298 	dev->input = i;
1299 	dev->vid_cap_dev.tvnorms = 0;
1300 	if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
1301 		dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
1302 		dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
1303 	}
1304 	dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1305 	vivid_update_format_cap(dev, false);
1306 
1307 	if (dev->colorspace) {
1308 		switch (dev->input_type[i]) {
1309 		case WEBCAM:
1310 			v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
1311 			break;
1312 		case TV:
1313 		case SVID:
1314 			v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
1315 			break;
1316 		case HDMI:
1317 			if (bt->standards & V4L2_DV_BT_STD_CEA861) {
1318 				if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
1319 					v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
1320 				else
1321 					v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709);
1322 			} else {
1323 				v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
1324 			}
1325 			break;
1326 		}
1327 	}
1328 
1329 	/*
1330 	 * Modify the brightness range depending on the input.
1331 	 * This makes it easy to use vivid to test if applications can
1332 	 * handle control range modifications and is also how this is
1333 	 * typically used in practice as different inputs may be hooked
1334 	 * up to different receivers with different control ranges.
1335 	 */
1336 	brightness = 128 * i + dev->input_brightness[i];
1337 	v4l2_ctrl_modify_range(dev->brightness,
1338 			128 * i, 255 + 128 * i, 1, 128 + 128 * i);
1339 	v4l2_ctrl_s_ctrl(dev->brightness, brightness);
1340 	return 0;
1341 }
1342 
vidioc_enumaudio(struct file * file,void * fh,struct v4l2_audio * vin)1343 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
1344 {
1345 	if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1346 		return -EINVAL;
1347 	*vin = vivid_audio_inputs[vin->index];
1348 	return 0;
1349 }
1350 
vidioc_g_audio(struct file * file,void * fh,struct v4l2_audio * vin)1351 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
1352 {
1353 	struct vivid_dev *dev = video_drvdata(file);
1354 
1355 	if (!vivid_is_sdtv_cap(dev))
1356 		return -EINVAL;
1357 	*vin = vivid_audio_inputs[dev->tv_audio_input];
1358 	return 0;
1359 }
1360 
vidioc_s_audio(struct file * file,void * fh,const struct v4l2_audio * vin)1361 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
1362 {
1363 	struct vivid_dev *dev = video_drvdata(file);
1364 
1365 	if (!vivid_is_sdtv_cap(dev))
1366 		return -EINVAL;
1367 	if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1368 		return -EINVAL;
1369 	dev->tv_audio_input = vin->index;
1370 	return 0;
1371 }
1372 
vivid_video_g_frequency(struct file * file,void * fh,struct v4l2_frequency * vf)1373 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
1374 {
1375 	struct vivid_dev *dev = video_drvdata(file);
1376 
1377 	if (vf->tuner != 0)
1378 		return -EINVAL;
1379 	vf->frequency = dev->tv_freq;
1380 	return 0;
1381 }
1382 
vivid_video_s_frequency(struct file * file,void * fh,const struct v4l2_frequency * vf)1383 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
1384 {
1385 	struct vivid_dev *dev = video_drvdata(file);
1386 
1387 	if (vf->tuner != 0)
1388 		return -EINVAL;
1389 	dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
1390 	if (vivid_is_tv_cap(dev))
1391 		vivid_update_quality(dev);
1392 	return 0;
1393 }
1394 
vivid_video_s_tuner(struct file * file,void * fh,const struct v4l2_tuner * vt)1395 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
1396 {
1397 	struct vivid_dev *dev = video_drvdata(file);
1398 
1399 	if (vt->index != 0)
1400 		return -EINVAL;
1401 	if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
1402 		return -EINVAL;
1403 	dev->tv_audmode = vt->audmode;
1404 	return 0;
1405 }
1406 
vivid_video_g_tuner(struct file * file,void * fh,struct v4l2_tuner * vt)1407 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1408 {
1409 	struct vivid_dev *dev = video_drvdata(file);
1410 	enum tpg_quality qual;
1411 
1412 	if (vt->index != 0)
1413 		return -EINVAL;
1414 
1415 	vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
1416 			 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
1417 	vt->audmode = dev->tv_audmode;
1418 	vt->rangelow = MIN_TV_FREQ;
1419 	vt->rangehigh = MAX_TV_FREQ;
1420 	qual = vivid_get_quality(dev, &vt->afc);
1421 	if (qual == TPG_QUAL_COLOR)
1422 		vt->signal = 0xffff;
1423 	else if (qual == TPG_QUAL_GRAY)
1424 		vt->signal = 0x8000;
1425 	else
1426 		vt->signal = 0;
1427 	if (qual == TPG_QUAL_NOISE) {
1428 		vt->rxsubchans = 0;
1429 	} else if (qual == TPG_QUAL_GRAY) {
1430 		vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1431 	} else {
1432 		unsigned channel_nr = dev->tv_freq / (6 * 16);
1433 		unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3;
1434 
1435 		switch (channel_nr % options) {
1436 		case 0:
1437 			vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1438 			break;
1439 		case 1:
1440 			vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
1441 			break;
1442 		case 2:
1443 			if (dev->std_cap & V4L2_STD_NTSC_M)
1444 				vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
1445 			else
1446 				vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
1447 			break;
1448 		case 3:
1449 			vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
1450 			break;
1451 		}
1452 	}
1453 	strlcpy(vt->name, "TV Tuner", sizeof(vt->name));
1454 	return 0;
1455 }
1456 
1457 /* Must remain in sync with the vivid_ctrl_standard_strings array */
1458 const v4l2_std_id vivid_standard[] = {
1459 	V4L2_STD_NTSC_M,
1460 	V4L2_STD_NTSC_M_JP,
1461 	V4L2_STD_NTSC_M_KR,
1462 	V4L2_STD_NTSC_443,
1463 	V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
1464 	V4L2_STD_PAL_I,
1465 	V4L2_STD_PAL_DK,
1466 	V4L2_STD_PAL_M,
1467 	V4L2_STD_PAL_N,
1468 	V4L2_STD_PAL_Nc,
1469 	V4L2_STD_PAL_60,
1470 	V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
1471 	V4L2_STD_SECAM_DK,
1472 	V4L2_STD_SECAM_L,
1473 	V4L2_STD_SECAM_LC,
1474 	V4L2_STD_UNKNOWN
1475 };
1476 
1477 /* Must remain in sync with the vivid_standard array */
1478 const char * const vivid_ctrl_standard_strings[] = {
1479 	"NTSC-M",
1480 	"NTSC-M-JP",
1481 	"NTSC-M-KR",
1482 	"NTSC-443",
1483 	"PAL-BGH",
1484 	"PAL-I",
1485 	"PAL-DK",
1486 	"PAL-M",
1487 	"PAL-N",
1488 	"PAL-Nc",
1489 	"PAL-60",
1490 	"SECAM-BGH",
1491 	"SECAM-DK",
1492 	"SECAM-L",
1493 	"SECAM-Lc",
1494 	NULL,
1495 };
1496 
vidioc_querystd(struct file * file,void * priv,v4l2_std_id * id)1497 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
1498 {
1499 	struct vivid_dev *dev = video_drvdata(file);
1500 
1501 	if (!vivid_is_sdtv_cap(dev))
1502 		return -ENODATA;
1503 	if (dev->std_signal_mode == NO_SIGNAL ||
1504 	    dev->std_signal_mode == NO_LOCK) {
1505 		*id = V4L2_STD_UNKNOWN;
1506 		return 0;
1507 	}
1508 	if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
1509 		*id = V4L2_STD_UNKNOWN;
1510 	} else if (dev->std_signal_mode == CURRENT_STD) {
1511 		*id = dev->std_cap;
1512 	} else if (dev->std_signal_mode == SELECTED_STD) {
1513 		*id = dev->query_std;
1514 	} else {
1515 		*id = vivid_standard[dev->query_std_last];
1516 		dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard);
1517 	}
1518 
1519 	return 0;
1520 }
1521 
vivid_vid_cap_s_std(struct file * file,void * priv,v4l2_std_id id)1522 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
1523 {
1524 	struct vivid_dev *dev = video_drvdata(file);
1525 
1526 	if (!vivid_is_sdtv_cap(dev))
1527 		return -ENODATA;
1528 	if (dev->std_cap == id)
1529 		return 0;
1530 	if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1531 		return -EBUSY;
1532 	dev->std_cap = id;
1533 	vivid_update_format_cap(dev, false);
1534 	return 0;
1535 }
1536 
vivid_vid_cap_s_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1537 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
1538 				    struct v4l2_dv_timings *timings)
1539 {
1540 	struct vivid_dev *dev = video_drvdata(file);
1541 
1542 	if (!vivid_is_hdmi_cap(dev))
1543 		return -ENODATA;
1544 	if (vb2_is_busy(&dev->vb_vid_cap_q))
1545 		return -EBUSY;
1546 	if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
1547 				0, NULL, NULL))
1548 		return -EINVAL;
1549 	if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0))
1550 		return 0;
1551 	dev->dv_timings_cap = *timings;
1552 	vivid_update_format_cap(dev, false);
1553 	return 0;
1554 }
1555 
vidioc_query_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1556 int vidioc_query_dv_timings(struct file *file, void *_fh,
1557 				    struct v4l2_dv_timings *timings)
1558 {
1559 	struct vivid_dev *dev = video_drvdata(file);
1560 
1561 	if (!vivid_is_hdmi_cap(dev))
1562 		return -ENODATA;
1563 	if (dev->dv_timings_signal_mode == NO_SIGNAL ||
1564 	    dev->edid_blocks == 0)
1565 		return -ENOLINK;
1566 	if (dev->dv_timings_signal_mode == NO_LOCK)
1567 		return -ENOLCK;
1568 	if (dev->dv_timings_signal_mode == OUT_OF_RANGE) {
1569 		timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
1570 		return -ERANGE;
1571 	}
1572 	if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) {
1573 		*timings = dev->dv_timings_cap;
1574 	} else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) {
1575 		*timings = v4l2_dv_timings_presets[dev->query_dv_timings];
1576 	} else {
1577 		*timings = v4l2_dv_timings_presets[dev->query_dv_timings_last];
1578 		dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) %
1579 						dev->query_dv_timings_size;
1580 	}
1581 	return 0;
1582 }
1583 
vidioc_s_edid(struct file * file,void * _fh,struct v4l2_edid * edid)1584 int vidioc_s_edid(struct file *file, void *_fh,
1585 			 struct v4l2_edid *edid)
1586 {
1587 	struct vivid_dev *dev = video_drvdata(file);
1588 
1589 	memset(edid->reserved, 0, sizeof(edid->reserved));
1590 	if (edid->pad >= dev->num_inputs)
1591 		return -EINVAL;
1592 	if (dev->input_type[edid->pad] != HDMI || edid->start_block)
1593 		return -EINVAL;
1594 	if (edid->blocks == 0) {
1595 		dev->edid_blocks = 0;
1596 		return 0;
1597 	}
1598 	if (edid->blocks > dev->edid_max_blocks) {
1599 		edid->blocks = dev->edid_max_blocks;
1600 		return -E2BIG;
1601 	}
1602 	dev->edid_blocks = edid->blocks;
1603 	memcpy(dev->edid, edid->edid, edid->blocks * 128);
1604 	return 0;
1605 }
1606 
vidioc_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)1607 int vidioc_enum_framesizes(struct file *file, void *fh,
1608 					 struct v4l2_frmsizeenum *fsize)
1609 {
1610 	struct vivid_dev *dev = video_drvdata(file);
1611 
1612 	if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
1613 		return -EINVAL;
1614 	if (vivid_get_format(dev, fsize->pixel_format) == NULL)
1615 		return -EINVAL;
1616 	if (vivid_is_webcam(dev)) {
1617 		if (fsize->index >= ARRAY_SIZE(webcam_sizes))
1618 			return -EINVAL;
1619 		fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1620 		fsize->discrete = webcam_sizes[fsize->index];
1621 		return 0;
1622 	}
1623 	if (fsize->index)
1624 		return -EINVAL;
1625 	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1626 	fsize->stepwise.min_width = MIN_WIDTH;
1627 	fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
1628 	fsize->stepwise.step_width = 2;
1629 	fsize->stepwise.min_height = MIN_HEIGHT;
1630 	fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
1631 	fsize->stepwise.step_height = 2;
1632 	return 0;
1633 }
1634 
1635 /* timeperframe is arbitrary and continuous */
vidioc_enum_frameintervals(struct file * file,void * priv,struct v4l2_frmivalenum * fival)1636 int vidioc_enum_frameintervals(struct file *file, void *priv,
1637 					     struct v4l2_frmivalenum *fival)
1638 {
1639 	struct vivid_dev *dev = video_drvdata(file);
1640 	const struct vivid_fmt *fmt;
1641 	int i;
1642 
1643 	fmt = vivid_get_format(dev, fival->pixel_format);
1644 	if (!fmt)
1645 		return -EINVAL;
1646 
1647 	if (!vivid_is_webcam(dev)) {
1648 		static const struct v4l2_fract step = { 1, 1 };
1649 
1650 		if (fival->index)
1651 			return -EINVAL;
1652 		if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
1653 			return -EINVAL;
1654 		if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
1655 			return -EINVAL;
1656 		fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
1657 		fival->stepwise.min = tpf_min;
1658 		fival->stepwise.max = tpf_max;
1659 		fival->stepwise.step = step;
1660 		return 0;
1661 	}
1662 
1663 	for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
1664 		if (fival->width == webcam_sizes[i].width &&
1665 		    fival->height == webcam_sizes[i].height)
1666 			break;
1667 	if (i == ARRAY_SIZE(webcam_sizes))
1668 		return -EINVAL;
1669 	if (fival->index >= 2 * (3 - i))
1670 		return -EINVAL;
1671 	fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1672 	fival->discrete = webcam_intervals[fival->index];
1673 	return 0;
1674 }
1675 
vivid_vid_cap_g_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1676 int vivid_vid_cap_g_parm(struct file *file, void *priv,
1677 			  struct v4l2_streamparm *parm)
1678 {
1679 	struct vivid_dev *dev = video_drvdata(file);
1680 
1681 	if (parm->type != (dev->multiplanar ?
1682 			   V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1683 			   V4L2_BUF_TYPE_VIDEO_CAPTURE))
1684 		return -EINVAL;
1685 
1686 	parm->parm.capture.capability   = V4L2_CAP_TIMEPERFRAME;
1687 	parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
1688 	parm->parm.capture.readbuffers  = 1;
1689 	return 0;
1690 }
1691 
1692 #define FRACT_CMP(a, OP, b)	\
1693 	((u64)(a).numerator * (b).denominator  OP  (u64)(b).numerator * (a).denominator)
1694 
vivid_vid_cap_s_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1695 int vivid_vid_cap_s_parm(struct file *file, void *priv,
1696 			  struct v4l2_streamparm *parm)
1697 {
1698 	struct vivid_dev *dev = video_drvdata(file);
1699 	unsigned ival_sz = 2 * (3 - dev->webcam_size_idx);
1700 	struct v4l2_fract tpf;
1701 	unsigned i;
1702 
1703 	if (parm->type != (dev->multiplanar ?
1704 			   V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1705 			   V4L2_BUF_TYPE_VIDEO_CAPTURE))
1706 		return -EINVAL;
1707 	if (!vivid_is_webcam(dev))
1708 		return vivid_vid_cap_g_parm(file, priv, parm);
1709 
1710 	tpf = parm->parm.capture.timeperframe;
1711 
1712 	if (tpf.denominator == 0)
1713 		tpf = webcam_intervals[ival_sz - 1];
1714 	for (i = 0; i < ival_sz; i++)
1715 		if (FRACT_CMP(tpf, >=, webcam_intervals[i]))
1716 			break;
1717 	if (i == ival_sz)
1718 		i = ival_sz - 1;
1719 	dev->webcam_ival_idx = i;
1720 	tpf = webcam_intervals[dev->webcam_ival_idx];
1721 	tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
1722 	tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
1723 
1724 	/* resync the thread's timings */
1725 	dev->cap_seq_resync = true;
1726 	dev->timeperframe_vid_cap = tpf;
1727 	parm->parm.capture.timeperframe = tpf;
1728 	parm->parm.capture.readbuffers  = 1;
1729 	return 0;
1730 }
1731