1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vivid-vid-cap.c - video capture support functions.
4 *
5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 */
7
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/vmalloc.h>
12 #include <linux/videodev2.h>
13 #include <linux/v4l2-dv-timings.h>
14 #include <media/v4l2-common.h>
15 #include <media/v4l2-event.h>
16 #include <media/v4l2-dv-timings.h>
17 #include <media/v4l2-rect.h>
18
19 #include "vivid-core.h"
20 #include "vivid-vid-common.h"
21 #include "vivid-kthread-cap.h"
22 #include "vivid-vid-cap.h"
23
24 static const struct vivid_fmt formats_ovl[] = {
25 {
26 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
27 .vdownsampling = { 1 },
28 .bit_depth = { 16 },
29 .planes = 1,
30 .buffers = 1,
31 },
32 {
33 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
34 .vdownsampling = { 1 },
35 .bit_depth = { 16 },
36 .planes = 1,
37 .buffers = 1,
38 },
39 {
40 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
41 .vdownsampling = { 1 },
42 .bit_depth = { 16 },
43 .planes = 1,
44 .buffers = 1,
45 },
46 };
47
48 /* The number of discrete webcam framesizes */
49 #define VIVID_WEBCAM_SIZES 6
50 /* The number of discrete webcam frameintervals */
51 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
52
53 /* Sizes must be in increasing order */
54 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
55 { 320, 180 },
56 { 640, 360 },
57 { 640, 480 },
58 { 1280, 720 },
59 { 1920, 1080 },
60 { 3840, 2160 },
61 };
62
63 /*
64 * Intervals must be in increasing order and there must be twice as many
65 * elements in this array as there are in webcam_sizes.
66 */
67 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
68 { 1, 1 },
69 { 1, 2 },
70 { 1, 4 },
71 { 1, 5 },
72 { 1, 10 },
73 { 2, 25 },
74 { 1, 15 },
75 { 1, 25 },
76 { 1, 30 },
77 { 1, 40 },
78 { 1, 50 },
79 { 1, 60 },
80 };
81
vid_cap_queue_setup(struct vb2_queue * vq,unsigned * nbuffers,unsigned * nplanes,unsigned sizes[],struct device * alloc_devs[])82 static int vid_cap_queue_setup(struct vb2_queue *vq,
83 unsigned *nbuffers, unsigned *nplanes,
84 unsigned sizes[], struct device *alloc_devs[])
85 {
86 struct vivid_dev *dev = vb2_get_drv_priv(vq);
87 unsigned buffers = tpg_g_buffers(&dev->tpg);
88 unsigned h = dev->fmt_cap_rect.height;
89 unsigned p;
90
91 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
92 /*
93 * You cannot use read() with FIELD_ALTERNATE since the field
94 * information (TOP/BOTTOM) cannot be passed back to the user.
95 */
96 if (vb2_fileio_is_active(vq))
97 return -EINVAL;
98 }
99
100 if (dev->queue_setup_error) {
101 /*
102 * Error injection: test what happens if queue_setup() returns
103 * an error.
104 */
105 dev->queue_setup_error = false;
106 return -EINVAL;
107 }
108 if (*nplanes) {
109 /*
110 * Check if the number of requested planes match
111 * the number of buffers in the current format. You can't mix that.
112 */
113 if (*nplanes != buffers)
114 return -EINVAL;
115 for (p = 0; p < buffers; p++) {
116 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
117 dev->fmt_cap->data_offset[p])
118 return -EINVAL;
119 }
120 } else {
121 for (p = 0; p < buffers; p++)
122 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) /
123 dev->fmt_cap->vdownsampling[p] +
124 dev->fmt_cap->data_offset[p];
125 }
126
127 if (vq->num_buffers + *nbuffers < 2)
128 *nbuffers = 2 - vq->num_buffers;
129
130 *nplanes = buffers;
131
132 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
133 for (p = 0; p < buffers; p++)
134 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
135
136 return 0;
137 }
138
vid_cap_buf_prepare(struct vb2_buffer * vb)139 static int vid_cap_buf_prepare(struct vb2_buffer *vb)
140 {
141 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
142 unsigned long size;
143 unsigned buffers = tpg_g_buffers(&dev->tpg);
144 unsigned p;
145
146 dprintk(dev, 1, "%s\n", __func__);
147
148 if (WARN_ON(NULL == dev->fmt_cap))
149 return -EINVAL;
150
151 if (dev->buf_prepare_error) {
152 /*
153 * Error injection: test what happens if buf_prepare() returns
154 * an error.
155 */
156 dev->buf_prepare_error = false;
157 return -EINVAL;
158 }
159 for (p = 0; p < buffers; p++) {
160 size = (tpg_g_line_width(&dev->tpg, p) *
161 dev->fmt_cap_rect.height) /
162 dev->fmt_cap->vdownsampling[p] +
163 dev->fmt_cap->data_offset[p];
164
165 if (vb2_plane_size(vb, p) < size) {
166 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
167 __func__, p, vb2_plane_size(vb, p), size);
168 return -EINVAL;
169 }
170
171 vb2_set_plane_payload(vb, p, size);
172 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
173 }
174
175 return 0;
176 }
177
vid_cap_buf_finish(struct vb2_buffer * vb)178 static void vid_cap_buf_finish(struct vb2_buffer *vb)
179 {
180 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
181 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
182 struct v4l2_timecode *tc = &vbuf->timecode;
183 unsigned fps = 25;
184 unsigned seq = vbuf->sequence;
185
186 if (!vivid_is_sdtv_cap(dev))
187 return;
188
189 /*
190 * Set the timecode. Rarely used, so it is interesting to
191 * test this.
192 */
193 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
194 if (dev->std_cap[dev->input] & V4L2_STD_525_60)
195 fps = 30;
196 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
197 tc->flags = 0;
198 tc->frames = seq % fps;
199 tc->seconds = (seq / fps) % 60;
200 tc->minutes = (seq / (60 * fps)) % 60;
201 tc->hours = (seq / (60 * 60 * fps)) % 24;
202 }
203
vid_cap_buf_queue(struct vb2_buffer * vb)204 static void vid_cap_buf_queue(struct vb2_buffer *vb)
205 {
206 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
207 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
208 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
209
210 dprintk(dev, 1, "%s\n", __func__);
211
212 spin_lock(&dev->slock);
213 list_add_tail(&buf->list, &dev->vid_cap_active);
214 spin_unlock(&dev->slock);
215 }
216
vid_cap_start_streaming(struct vb2_queue * vq,unsigned count)217 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
218 {
219 struct vivid_dev *dev = vb2_get_drv_priv(vq);
220 unsigned i;
221 int err;
222
223 if (vb2_is_streaming(&dev->vb_vid_out_q))
224 dev->can_loop_video = vivid_vid_can_loop(dev);
225
226 dev->vid_cap_seq_count = 0;
227 dprintk(dev, 1, "%s\n", __func__);
228 for (i = 0; i < VIDEO_MAX_FRAME; i++)
229 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
230 if (dev->start_streaming_error) {
231 dev->start_streaming_error = false;
232 err = -EINVAL;
233 } else {
234 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
235 }
236 if (err) {
237 struct vivid_buffer *buf, *tmp;
238
239 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
240 list_del(&buf->list);
241 vb2_buffer_done(&buf->vb.vb2_buf,
242 VB2_BUF_STATE_QUEUED);
243 }
244 }
245 return err;
246 }
247
248 /* abort streaming and wait for last buffer */
vid_cap_stop_streaming(struct vb2_queue * vq)249 static void vid_cap_stop_streaming(struct vb2_queue *vq)
250 {
251 struct vivid_dev *dev = vb2_get_drv_priv(vq);
252
253 dprintk(dev, 1, "%s\n", __func__);
254 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
255 dev->can_loop_video = false;
256 }
257
vid_cap_buf_request_complete(struct vb2_buffer * vb)258 static void vid_cap_buf_request_complete(struct vb2_buffer *vb)
259 {
260 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
261
262 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap);
263 }
264
265 const struct vb2_ops vivid_vid_cap_qops = {
266 .queue_setup = vid_cap_queue_setup,
267 .buf_prepare = vid_cap_buf_prepare,
268 .buf_finish = vid_cap_buf_finish,
269 .buf_queue = vid_cap_buf_queue,
270 .start_streaming = vid_cap_start_streaming,
271 .stop_streaming = vid_cap_stop_streaming,
272 .buf_request_complete = vid_cap_buf_request_complete,
273 .wait_prepare = vb2_ops_wait_prepare,
274 .wait_finish = vb2_ops_wait_finish,
275 };
276
277 /*
278 * Determine the 'picture' quality based on the current TV frequency: either
279 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
280 * signal or NOISE for no signal.
281 */
vivid_update_quality(struct vivid_dev * dev)282 void vivid_update_quality(struct vivid_dev *dev)
283 {
284 unsigned freq_modulus;
285
286 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
287 /*
288 * The 'noise' will only be replaced by the actual video
289 * if the output video matches the input video settings.
290 */
291 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
292 return;
293 }
294 if (vivid_is_hdmi_cap(dev) &&
295 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) {
296 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
297 return;
298 }
299 if (vivid_is_sdtv_cap(dev) &&
300 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) {
301 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
302 return;
303 }
304 if (!vivid_is_tv_cap(dev)) {
305 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
306 return;
307 }
308
309 /*
310 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
311 * From +/- 0.25 MHz around the channel there is color, and from
312 * +/- 1 MHz there is grayscale (chroma is lost).
313 * Everywhere else it is just noise.
314 */
315 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
316 if (freq_modulus > 2 * 16) {
317 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
318 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
319 return;
320 }
321 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
322 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
323 else
324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
325 }
326
327 /*
328 * Get the current picture quality and the associated afc value.
329 */
vivid_get_quality(struct vivid_dev * dev,s32 * afc)330 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
331 {
332 unsigned freq_modulus;
333
334 if (afc)
335 *afc = 0;
336 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
337 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
338 return tpg_g_quality(&dev->tpg);
339
340 /*
341 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
342 * From +/- 0.25 MHz around the channel there is color, and from
343 * +/- 1 MHz there is grayscale (chroma is lost).
344 * Everywhere else it is just gray.
345 */
346 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
347 if (afc)
348 *afc = freq_modulus - 1 * 16;
349 return TPG_QUAL_GRAY;
350 }
351
vivid_get_video_aspect(const struct vivid_dev * dev)352 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
353 {
354 if (vivid_is_sdtv_cap(dev))
355 return dev->std_aspect_ratio[dev->input];
356
357 if (vivid_is_hdmi_cap(dev))
358 return dev->dv_timings_aspect_ratio[dev->input];
359
360 return TPG_VIDEO_ASPECT_IMAGE;
361 }
362
vivid_get_pixel_aspect(const struct vivid_dev * dev)363 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
364 {
365 if (vivid_is_sdtv_cap(dev))
366 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ?
367 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
368
369 if (vivid_is_hdmi_cap(dev) &&
370 dev->src_rect.width == 720 && dev->src_rect.height <= 576)
371 return dev->src_rect.height == 480 ?
372 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
373
374 return TPG_PIXEL_ASPECT_SQUARE;
375 }
376
377 /*
378 * Called whenever the format has to be reset which can occur when
379 * changing inputs, standard, timings, etc.
380 */
vivid_update_format_cap(struct vivid_dev * dev,bool keep_controls)381 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
382 {
383 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
384 unsigned size;
385 u64 pixelclock;
386
387 switch (dev->input_type[dev->input]) {
388 case WEBCAM:
389 default:
390 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
391 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
392 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
393 dev->field_cap = V4L2_FIELD_NONE;
394 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
395 break;
396 case TV:
397 case SVID:
398 dev->field_cap = dev->tv_field_cap;
399 dev->src_rect.width = 720;
400 if (dev->std_cap[dev->input] & V4L2_STD_525_60) {
401 dev->src_rect.height = 480;
402 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
403 dev->service_set_cap = V4L2_SLICED_CAPTION_525;
404 } else {
405 dev->src_rect.height = 576;
406 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
407 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
408 }
409 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
410 break;
411 case HDMI:
412 dev->src_rect.width = bt->width;
413 dev->src_rect.height = bt->height;
414 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
415 if (dev->reduced_fps && can_reduce_fps(bt)) {
416 pixelclock = div_u64(bt->pixelclock * 1000, 1001);
417 bt->flags |= V4L2_DV_FL_REDUCED_FPS;
418 } else {
419 pixelclock = bt->pixelclock;
420 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS;
421 }
422 dev->timeperframe_vid_cap = (struct v4l2_fract) {
423 size / 100, (u32)pixelclock / 100
424 };
425 if (bt->interlaced)
426 dev->field_cap = V4L2_FIELD_ALTERNATE;
427 else
428 dev->field_cap = V4L2_FIELD_NONE;
429
430 /*
431 * We can be called from within s_ctrl, in that case we can't
432 * set/get controls. Luckily we don't need to in that case.
433 */
434 if (keep_controls || !dev->colorspace)
435 break;
436 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
437 if (bt->width == 720 && bt->height <= 576)
438 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
439 else
440 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
441 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
442 } else {
443 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
444 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
445 }
446 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
447 break;
448 }
449 vfree(dev->bitmap_cap);
450 dev->bitmap_cap = NULL;
451 vivid_update_quality(dev);
452 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
453 dev->crop_cap = dev->src_rect;
454 dev->crop_bounds_cap = dev->src_rect;
455 dev->compose_cap = dev->crop_cap;
456 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
457 dev->compose_cap.height /= 2;
458 dev->fmt_cap_rect = dev->compose_cap;
459 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
460 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
461 tpg_update_mv_step(&dev->tpg);
462 }
463
464 /* Map the field to something that is valid for the current input */
vivid_field_cap(struct vivid_dev * dev,enum v4l2_field field)465 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
466 {
467 if (vivid_is_sdtv_cap(dev)) {
468 switch (field) {
469 case V4L2_FIELD_INTERLACED_TB:
470 case V4L2_FIELD_INTERLACED_BT:
471 case V4L2_FIELD_SEQ_TB:
472 case V4L2_FIELD_SEQ_BT:
473 case V4L2_FIELD_TOP:
474 case V4L2_FIELD_BOTTOM:
475 case V4L2_FIELD_ALTERNATE:
476 return field;
477 case V4L2_FIELD_INTERLACED:
478 default:
479 return V4L2_FIELD_INTERLACED;
480 }
481 }
482 if (vivid_is_hdmi_cap(dev))
483 return dev->dv_timings_cap[dev->input].bt.interlaced ?
484 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE;
485 return V4L2_FIELD_NONE;
486 }
487
vivid_colorspace_cap(struct vivid_dev * dev)488 static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
489 {
490 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
491 return tpg_g_colorspace(&dev->tpg);
492 return dev->colorspace_out;
493 }
494
vivid_xfer_func_cap(struct vivid_dev * dev)495 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev)
496 {
497 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
498 return tpg_g_xfer_func(&dev->tpg);
499 return dev->xfer_func_out;
500 }
501
vivid_ycbcr_enc_cap(struct vivid_dev * dev)502 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
503 {
504 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
505 return tpg_g_ycbcr_enc(&dev->tpg);
506 return dev->ycbcr_enc_out;
507 }
508
vivid_hsv_enc_cap(struct vivid_dev * dev)509 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
510 {
511 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
512 return tpg_g_hsv_enc(&dev->tpg);
513 return dev->hsv_enc_out;
514 }
515
vivid_quantization_cap(struct vivid_dev * dev)516 static unsigned vivid_quantization_cap(struct vivid_dev *dev)
517 {
518 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
519 return tpg_g_quantization(&dev->tpg);
520 return dev->quantization_out;
521 }
522
vivid_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)523 int vivid_g_fmt_vid_cap(struct file *file, void *priv,
524 struct v4l2_format *f)
525 {
526 struct vivid_dev *dev = video_drvdata(file);
527 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
528 unsigned p;
529
530 mp->width = dev->fmt_cap_rect.width;
531 mp->height = dev->fmt_cap_rect.height;
532 mp->field = dev->field_cap;
533 mp->pixelformat = dev->fmt_cap->fourcc;
534 mp->colorspace = vivid_colorspace_cap(dev);
535 mp->xfer_func = vivid_xfer_func_cap(dev);
536 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
537 mp->hsv_enc = vivid_hsv_enc_cap(dev);
538 else
539 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
540 mp->quantization = vivid_quantization_cap(dev);
541 mp->num_planes = dev->fmt_cap->buffers;
542 for (p = 0; p < mp->num_planes; p++) {
543 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
544 mp->plane_fmt[p].sizeimage =
545 (tpg_g_line_width(&dev->tpg, p) * mp->height) /
546 dev->fmt_cap->vdownsampling[p] +
547 dev->fmt_cap->data_offset[p];
548 }
549 return 0;
550 }
551
vivid_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)552 int vivid_try_fmt_vid_cap(struct file *file, void *priv,
553 struct v4l2_format *f)
554 {
555 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
556 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
557 struct vivid_dev *dev = video_drvdata(file);
558 const struct vivid_fmt *fmt;
559 unsigned bytesperline, max_bpl;
560 unsigned factor = 1;
561 unsigned w, h;
562 unsigned p;
563 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC);
564
565 fmt = vivid_get_format(dev, mp->pixelformat);
566 if (!fmt) {
567 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
568 mp->pixelformat);
569 mp->pixelformat = V4L2_PIX_FMT_YUYV;
570 fmt = vivid_get_format(dev, mp->pixelformat);
571 }
572
573 mp->field = vivid_field_cap(dev, mp->field);
574 if (vivid_is_webcam(dev)) {
575 const struct v4l2_frmsize_discrete *sz =
576 v4l2_find_nearest_size(webcam_sizes,
577 VIVID_WEBCAM_SIZES, width,
578 height, mp->width, mp->height);
579
580 w = sz->width;
581 h = sz->height;
582 } else if (vivid_is_sdtv_cap(dev)) {
583 w = 720;
584 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576;
585 } else {
586 w = dev->src_rect.width;
587 h = dev->src_rect.height;
588 }
589 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
590 factor = 2;
591 if (vivid_is_webcam(dev) ||
592 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
593 mp->width = w;
594 mp->height = h / factor;
595 } else {
596 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
597
598 v4l2_rect_set_min_size(&r, &vivid_min_rect);
599 v4l2_rect_set_max_size(&r, &vivid_max_rect);
600 if (dev->has_scaler_cap && !dev->has_compose_cap) {
601 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
602
603 v4l2_rect_set_max_size(&r, &max_r);
604 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
605 v4l2_rect_set_max_size(&r, &dev->src_rect);
606 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
607 v4l2_rect_set_min_size(&r, &dev->src_rect);
608 }
609 mp->width = r.width;
610 mp->height = r.height / factor;
611 }
612
613 /* This driver supports custom bytesperline values */
614
615 mp->num_planes = fmt->buffers;
616 for (p = 0; p < fmt->buffers; p++) {
617 /* Calculate the minimum supported bytesperline value */
618 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
619 /* Calculate the maximum supported bytesperline value */
620 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
621
622 if (pfmt[p].bytesperline > max_bpl)
623 pfmt[p].bytesperline = max_bpl;
624 if (pfmt[p].bytesperline < bytesperline)
625 pfmt[p].bytesperline = bytesperline;
626
627 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
628 fmt->vdownsampling[p] + fmt->data_offset[p];
629
630 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
631 }
632 for (p = fmt->buffers; p < fmt->planes; p++)
633 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
634 (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
635 (fmt->bit_depth[0] / fmt->vdownsampling[0]);
636
637 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace))
638 mp->colorspace = vivid_colorspace_cap(dev);
639
640 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func))
641 mp->xfer_func = vivid_xfer_func_cap(dev);
642
643 if (fmt->color_enc == TGP_COLOR_ENC_HSV) {
644 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc))
645 mp->hsv_enc = vivid_hsv_enc_cap(dev);
646 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) {
647 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc))
648 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
649 } else {
650 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
651 }
652
653 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR ||
654 fmt->color_enc == TGP_COLOR_ENC_RGB) {
655 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization))
656 mp->quantization = vivid_quantization_cap(dev);
657 } else {
658 mp->quantization = vivid_quantization_cap(dev);
659 }
660
661 memset(mp->reserved, 0, sizeof(mp->reserved));
662 return 0;
663 }
664
vivid_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)665 int vivid_s_fmt_vid_cap(struct file *file, void *priv,
666 struct v4l2_format *f)
667 {
668 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
669 struct vivid_dev *dev = video_drvdata(file);
670 struct v4l2_rect *crop = &dev->crop_cap;
671 struct v4l2_rect *compose = &dev->compose_cap;
672 struct vb2_queue *q = &dev->vb_vid_cap_q;
673 int ret = vivid_try_fmt_vid_cap(file, priv, f);
674 unsigned factor = 1;
675 unsigned p;
676 unsigned i;
677
678 if (ret < 0)
679 return ret;
680
681 if (vb2_is_busy(q)) {
682 dprintk(dev, 1, "%s device busy\n", __func__);
683 return -EBUSY;
684 }
685
686 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
687 dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
688 return -EBUSY;
689 }
690
691 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
692 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
693 factor = 2;
694
695 /* Note: the webcam input doesn't support scaling, cropping or composing */
696
697 if (!vivid_is_webcam(dev) &&
698 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
699 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
700
701 if (dev->has_scaler_cap) {
702 if (dev->has_compose_cap)
703 v4l2_rect_map_inside(compose, &r);
704 else
705 *compose = r;
706 if (dev->has_crop_cap && !dev->has_compose_cap) {
707 struct v4l2_rect min_r = {
708 0, 0,
709 r.width / MAX_ZOOM,
710 factor * r.height / MAX_ZOOM
711 };
712 struct v4l2_rect max_r = {
713 0, 0,
714 r.width * MAX_ZOOM,
715 factor * r.height * MAX_ZOOM
716 };
717
718 v4l2_rect_set_min_size(crop, &min_r);
719 v4l2_rect_set_max_size(crop, &max_r);
720 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
721 } else if (dev->has_crop_cap) {
722 struct v4l2_rect min_r = {
723 0, 0,
724 compose->width / MAX_ZOOM,
725 factor * compose->height / MAX_ZOOM
726 };
727 struct v4l2_rect max_r = {
728 0, 0,
729 compose->width * MAX_ZOOM,
730 factor * compose->height * MAX_ZOOM
731 };
732
733 v4l2_rect_set_min_size(crop, &min_r);
734 v4l2_rect_set_max_size(crop, &max_r);
735 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
736 }
737 } else if (dev->has_crop_cap && !dev->has_compose_cap) {
738 r.height *= factor;
739 v4l2_rect_set_size_to(crop, &r);
740 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
741 r = *crop;
742 r.height /= factor;
743 v4l2_rect_set_size_to(compose, &r);
744 } else if (!dev->has_crop_cap) {
745 v4l2_rect_map_inside(compose, &r);
746 } else {
747 r.height *= factor;
748 v4l2_rect_set_max_size(crop, &r);
749 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
750 compose->top *= factor;
751 compose->height *= factor;
752 v4l2_rect_set_size_to(compose, crop);
753 v4l2_rect_map_inside(compose, &r);
754 compose->top /= factor;
755 compose->height /= factor;
756 }
757 } else if (vivid_is_webcam(dev)) {
758 /* Guaranteed to be a match */
759 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
760 if (webcam_sizes[i].width == mp->width &&
761 webcam_sizes[i].height == mp->height)
762 break;
763 dev->webcam_size_idx = i;
764 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
765 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
766 vivid_update_format_cap(dev, false);
767 } else {
768 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
769
770 v4l2_rect_set_size_to(compose, &r);
771 r.height *= factor;
772 v4l2_rect_set_size_to(crop, &r);
773 }
774
775 dev->fmt_cap_rect.width = mp->width;
776 dev->fmt_cap_rect.height = mp->height;
777 tpg_s_buf_height(&dev->tpg, mp->height);
778 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
779 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
780 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
781 dev->field_cap = mp->field;
782 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
783 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
784 else
785 tpg_s_field(&dev->tpg, dev->field_cap, false);
786 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
787 if (vivid_is_sdtv_cap(dev))
788 dev->tv_field_cap = mp->field;
789 tpg_update_mv_step(&dev->tpg);
790 dev->tpg.colorspace = mp->colorspace;
791 dev->tpg.xfer_func = mp->xfer_func;
792 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR)
793 dev->tpg.ycbcr_enc = mp->ycbcr_enc;
794 else
795 dev->tpg.hsv_enc = mp->hsv_enc;
796 dev->tpg.quantization = mp->quantization;
797
798 return 0;
799 }
800
vidioc_g_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)801 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
802 struct v4l2_format *f)
803 {
804 struct vivid_dev *dev = video_drvdata(file);
805
806 if (!dev->multiplanar)
807 return -ENOTTY;
808 return vivid_g_fmt_vid_cap(file, priv, f);
809 }
810
vidioc_try_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)811 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
812 struct v4l2_format *f)
813 {
814 struct vivid_dev *dev = video_drvdata(file);
815
816 if (!dev->multiplanar)
817 return -ENOTTY;
818 return vivid_try_fmt_vid_cap(file, priv, f);
819 }
820
vidioc_s_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)821 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
822 struct v4l2_format *f)
823 {
824 struct vivid_dev *dev = video_drvdata(file);
825
826 if (!dev->multiplanar)
827 return -ENOTTY;
828 return vivid_s_fmt_vid_cap(file, priv, f);
829 }
830
vidioc_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)831 int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
832 struct v4l2_format *f)
833 {
834 struct vivid_dev *dev = video_drvdata(file);
835
836 if (dev->multiplanar)
837 return -ENOTTY;
838 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
839 }
840
vidioc_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)841 int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
842 struct v4l2_format *f)
843 {
844 struct vivid_dev *dev = video_drvdata(file);
845
846 if (dev->multiplanar)
847 return -ENOTTY;
848 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
849 }
850
vidioc_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)851 int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
852 struct v4l2_format *f)
853 {
854 struct vivid_dev *dev = video_drvdata(file);
855
856 if (dev->multiplanar)
857 return -ENOTTY;
858 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
859 }
860
vivid_vid_cap_g_selection(struct file * file,void * priv,struct v4l2_selection * sel)861 int vivid_vid_cap_g_selection(struct file *file, void *priv,
862 struct v4l2_selection *sel)
863 {
864 struct vivid_dev *dev = video_drvdata(file);
865
866 if (!dev->has_crop_cap && !dev->has_compose_cap)
867 return -ENOTTY;
868 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
869 return -EINVAL;
870 if (vivid_is_webcam(dev))
871 return -ENODATA;
872
873 sel->r.left = sel->r.top = 0;
874 switch (sel->target) {
875 case V4L2_SEL_TGT_CROP:
876 if (!dev->has_crop_cap)
877 return -EINVAL;
878 sel->r = dev->crop_cap;
879 break;
880 case V4L2_SEL_TGT_CROP_DEFAULT:
881 case V4L2_SEL_TGT_CROP_BOUNDS:
882 if (!dev->has_crop_cap)
883 return -EINVAL;
884 sel->r = dev->src_rect;
885 break;
886 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
887 if (!dev->has_compose_cap)
888 return -EINVAL;
889 sel->r = vivid_max_rect;
890 break;
891 case V4L2_SEL_TGT_COMPOSE:
892 if (!dev->has_compose_cap)
893 return -EINVAL;
894 sel->r = dev->compose_cap;
895 break;
896 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
897 if (!dev->has_compose_cap)
898 return -EINVAL;
899 sel->r = dev->fmt_cap_rect;
900 break;
901 default:
902 return -EINVAL;
903 }
904 return 0;
905 }
906
vivid_vid_cap_s_selection(struct file * file,void * fh,struct v4l2_selection * s)907 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
908 {
909 struct vivid_dev *dev = video_drvdata(file);
910 struct v4l2_rect *crop = &dev->crop_cap;
911 struct v4l2_rect *compose = &dev->compose_cap;
912 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
913 int ret;
914
915 if (!dev->has_crop_cap && !dev->has_compose_cap)
916 return -ENOTTY;
917 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
918 return -EINVAL;
919 if (vivid_is_webcam(dev))
920 return -ENODATA;
921
922 switch (s->target) {
923 case V4L2_SEL_TGT_CROP:
924 if (!dev->has_crop_cap)
925 return -EINVAL;
926 ret = vivid_vid_adjust_sel(s->flags, &s->r);
927 if (ret)
928 return ret;
929 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
930 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
931 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
932 s->r.top /= factor;
933 s->r.height /= factor;
934 if (dev->has_scaler_cap) {
935 struct v4l2_rect fmt = dev->fmt_cap_rect;
936 struct v4l2_rect max_rect = {
937 0, 0,
938 s->r.width * MAX_ZOOM,
939 s->r.height * MAX_ZOOM
940 };
941 struct v4l2_rect min_rect = {
942 0, 0,
943 s->r.width / MAX_ZOOM,
944 s->r.height / MAX_ZOOM
945 };
946
947 v4l2_rect_set_min_size(&fmt, &min_rect);
948 if (!dev->has_compose_cap)
949 v4l2_rect_set_max_size(&fmt, &max_rect);
950 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
951 vb2_is_busy(&dev->vb_vid_cap_q))
952 return -EBUSY;
953 if (dev->has_compose_cap) {
954 v4l2_rect_set_min_size(compose, &min_rect);
955 v4l2_rect_set_max_size(compose, &max_rect);
956 v4l2_rect_map_inside(compose, &fmt);
957 }
958 dev->fmt_cap_rect = fmt;
959 tpg_s_buf_height(&dev->tpg, fmt.height);
960 } else if (dev->has_compose_cap) {
961 struct v4l2_rect fmt = dev->fmt_cap_rect;
962
963 v4l2_rect_set_min_size(&fmt, &s->r);
964 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
965 vb2_is_busy(&dev->vb_vid_cap_q))
966 return -EBUSY;
967 dev->fmt_cap_rect = fmt;
968 tpg_s_buf_height(&dev->tpg, fmt.height);
969 v4l2_rect_set_size_to(compose, &s->r);
970 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
971 } else {
972 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
973 vb2_is_busy(&dev->vb_vid_cap_q))
974 return -EBUSY;
975 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
976 v4l2_rect_set_size_to(compose, &s->r);
977 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
978 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
979 }
980 s->r.top *= factor;
981 s->r.height *= factor;
982 *crop = s->r;
983 break;
984 case V4L2_SEL_TGT_COMPOSE:
985 if (!dev->has_compose_cap)
986 return -EINVAL;
987 ret = vivid_vid_adjust_sel(s->flags, &s->r);
988 if (ret)
989 return ret;
990 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
991 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
992 if (dev->has_scaler_cap) {
993 struct v4l2_rect max_rect = {
994 0, 0,
995 dev->src_rect.width * MAX_ZOOM,
996 (dev->src_rect.height / factor) * MAX_ZOOM
997 };
998
999 v4l2_rect_set_max_size(&s->r, &max_rect);
1000 if (dev->has_crop_cap) {
1001 struct v4l2_rect min_rect = {
1002 0, 0,
1003 s->r.width / MAX_ZOOM,
1004 (s->r.height * factor) / MAX_ZOOM
1005 };
1006 struct v4l2_rect max_rect = {
1007 0, 0,
1008 s->r.width * MAX_ZOOM,
1009 (s->r.height * factor) * MAX_ZOOM
1010 };
1011
1012 v4l2_rect_set_min_size(crop, &min_rect);
1013 v4l2_rect_set_max_size(crop, &max_rect);
1014 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
1015 }
1016 } else if (dev->has_crop_cap) {
1017 s->r.top *= factor;
1018 s->r.height *= factor;
1019 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
1020 v4l2_rect_set_size_to(crop, &s->r);
1021 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
1022 s->r.top /= factor;
1023 s->r.height /= factor;
1024 } else {
1025 v4l2_rect_set_size_to(&s->r, &dev->src_rect);
1026 s->r.height /= factor;
1027 }
1028 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
1029 if (dev->bitmap_cap && (compose->width != s->r.width ||
1030 compose->height != s->r.height)) {
1031 vfree(dev->bitmap_cap);
1032 dev->bitmap_cap = NULL;
1033 }
1034 *compose = s->r;
1035 break;
1036 default:
1037 return -EINVAL;
1038 }
1039
1040 tpg_s_crop_compose(&dev->tpg, crop, compose);
1041 return 0;
1042 }
1043
vivid_vid_cap_g_pixelaspect(struct file * file,void * priv,int type,struct v4l2_fract * f)1044 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv,
1045 int type, struct v4l2_fract *f)
1046 {
1047 struct vivid_dev *dev = video_drvdata(file);
1048
1049 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1050 return -EINVAL;
1051
1052 switch (vivid_get_pixel_aspect(dev)) {
1053 case TPG_PIXEL_ASPECT_NTSC:
1054 f->numerator = 11;
1055 f->denominator = 10;
1056 break;
1057 case TPG_PIXEL_ASPECT_PAL:
1058 f->numerator = 54;
1059 f->denominator = 59;
1060 break;
1061 default:
1062 break;
1063 }
1064 return 0;
1065 }
1066
vidioc_enum_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_fmtdesc * f)1067 int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
1068 struct v4l2_fmtdesc *f)
1069 {
1070 struct vivid_dev *dev = video_drvdata(file);
1071 const struct vivid_fmt *fmt;
1072
1073 if (dev->multiplanar)
1074 return -ENOTTY;
1075
1076 if (f->index >= ARRAY_SIZE(formats_ovl))
1077 return -EINVAL;
1078
1079 fmt = &formats_ovl[f->index];
1080
1081 f->pixelformat = fmt->fourcc;
1082 return 0;
1083 }
1084
vidioc_g_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1085 int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
1086 struct v4l2_format *f)
1087 {
1088 struct vivid_dev *dev = video_drvdata(file);
1089 const struct v4l2_rect *compose = &dev->compose_cap;
1090 struct v4l2_window *win = &f->fmt.win;
1091 unsigned clipcount = win->clipcount;
1092
1093 if (dev->multiplanar)
1094 return -ENOTTY;
1095
1096 win->w.top = dev->overlay_cap_top;
1097 win->w.left = dev->overlay_cap_left;
1098 win->w.width = compose->width;
1099 win->w.height = compose->height;
1100 win->field = dev->overlay_cap_field;
1101 win->clipcount = dev->clipcount_cap;
1102 if (clipcount > dev->clipcount_cap)
1103 clipcount = dev->clipcount_cap;
1104 if (dev->bitmap_cap == NULL)
1105 win->bitmap = NULL;
1106 else if (win->bitmap) {
1107 if (copy_to_user(win->bitmap, dev->bitmap_cap,
1108 ((compose->width + 7) / 8) * compose->height))
1109 return -EFAULT;
1110 }
1111 if (clipcount && win->clips) {
1112 if (copy_to_user(win->clips, dev->clips_cap,
1113 clipcount * sizeof(dev->clips_cap[0])))
1114 return -EFAULT;
1115 }
1116 return 0;
1117 }
1118
vidioc_try_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1119 int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
1120 struct v4l2_format *f)
1121 {
1122 struct vivid_dev *dev = video_drvdata(file);
1123 const struct v4l2_rect *compose = &dev->compose_cap;
1124 struct v4l2_window *win = &f->fmt.win;
1125 int i, j;
1126
1127 if (dev->multiplanar)
1128 return -ENOTTY;
1129
1130 win->w.left = clamp_t(int, win->w.left,
1131 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1132 win->w.top = clamp_t(int, win->w.top,
1133 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1134 win->w.width = compose->width;
1135 win->w.height = compose->height;
1136 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
1137 win->field = V4L2_FIELD_ANY;
1138 win->chromakey = 0;
1139 win->global_alpha = 0;
1140 if (win->clipcount && !win->clips)
1141 win->clipcount = 0;
1142 if (win->clipcount > MAX_CLIPS)
1143 win->clipcount = MAX_CLIPS;
1144 if (win->clipcount) {
1145 if (copy_from_user(dev->try_clips_cap, win->clips,
1146 win->clipcount * sizeof(dev->clips_cap[0])))
1147 return -EFAULT;
1148 for (i = 0; i < win->clipcount; i++) {
1149 struct v4l2_rect *r = &dev->try_clips_cap[i].c;
1150
1151 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
1152 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
1153 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
1154 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
1155 }
1156 /*
1157 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
1158 * number and it's typically a one-time deal.
1159 */
1160 for (i = 0; i < win->clipcount - 1; i++) {
1161 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
1162
1163 for (j = i + 1; j < win->clipcount; j++) {
1164 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
1165
1166 if (v4l2_rect_overlap(r1, r2))
1167 return -EINVAL;
1168 }
1169 }
1170 if (copy_to_user(win->clips, dev->try_clips_cap,
1171 win->clipcount * sizeof(dev->clips_cap[0])))
1172 return -EFAULT;
1173 }
1174 return 0;
1175 }
1176
vidioc_s_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1177 int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
1178 struct v4l2_format *f)
1179 {
1180 struct vivid_dev *dev = video_drvdata(file);
1181 const struct v4l2_rect *compose = &dev->compose_cap;
1182 struct v4l2_window *win = &f->fmt.win;
1183 int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
1184 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
1185 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
1186 void *new_bitmap = NULL;
1187
1188 if (ret)
1189 return ret;
1190
1191 if (win->bitmap) {
1192 new_bitmap = vzalloc(bitmap_size);
1193
1194 if (new_bitmap == NULL)
1195 return -ENOMEM;
1196 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
1197 vfree(new_bitmap);
1198 return -EFAULT;
1199 }
1200 }
1201
1202 dev->overlay_cap_top = win->w.top;
1203 dev->overlay_cap_left = win->w.left;
1204 dev->overlay_cap_field = win->field;
1205 vfree(dev->bitmap_cap);
1206 dev->bitmap_cap = new_bitmap;
1207 dev->clipcount_cap = win->clipcount;
1208 if (dev->clipcount_cap)
1209 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
1210 return 0;
1211 }
1212
vivid_vid_cap_overlay(struct file * file,void * fh,unsigned i)1213 int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
1214 {
1215 struct vivid_dev *dev = video_drvdata(file);
1216
1217 if (dev->multiplanar)
1218 return -ENOTTY;
1219
1220 if (i && dev->fb_vbase_cap == NULL)
1221 return -EINVAL;
1222
1223 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
1224 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
1225 return -EINVAL;
1226 }
1227
1228 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
1229 return -EBUSY;
1230 dev->overlay_cap_owner = i ? fh : NULL;
1231 return 0;
1232 }
1233
vivid_vid_cap_g_fbuf(struct file * file,void * fh,struct v4l2_framebuffer * a)1234 int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
1235 struct v4l2_framebuffer *a)
1236 {
1237 struct vivid_dev *dev = video_drvdata(file);
1238
1239 if (dev->multiplanar)
1240 return -ENOTTY;
1241
1242 *a = dev->fb_cap;
1243 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
1244 V4L2_FBUF_CAP_LIST_CLIPPING;
1245 a->flags = V4L2_FBUF_FLAG_PRIMARY;
1246 a->fmt.field = V4L2_FIELD_NONE;
1247 a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1248 a->fmt.priv = 0;
1249 return 0;
1250 }
1251
vivid_vid_cap_s_fbuf(struct file * file,void * fh,const struct v4l2_framebuffer * a)1252 int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
1253 const struct v4l2_framebuffer *a)
1254 {
1255 struct vivid_dev *dev = video_drvdata(file);
1256 const struct vivid_fmt *fmt;
1257
1258 if (dev->multiplanar)
1259 return -ENOTTY;
1260
1261 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
1262 return -EPERM;
1263
1264 if (dev->overlay_cap_owner)
1265 return -EBUSY;
1266
1267 if (a->base == NULL) {
1268 dev->fb_cap.base = NULL;
1269 dev->fb_vbase_cap = NULL;
1270 return 0;
1271 }
1272
1273 if (a->fmt.width < 48 || a->fmt.height < 32)
1274 return -EINVAL;
1275 fmt = vivid_get_format(dev, a->fmt.pixelformat);
1276 if (!fmt || !fmt->can_do_overlay)
1277 return -EINVAL;
1278 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
1279 return -EINVAL;
1280 if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
1281 return -EINVAL;
1282
1283 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
1284 dev->fb_cap = *a;
1285 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
1286 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1287 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
1288 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1289 return 0;
1290 }
1291
1292 static const struct v4l2_audio vivid_audio_inputs[] = {
1293 { 0, "TV", V4L2_AUDCAP_STEREO },
1294 { 1, "Line-In", V4L2_AUDCAP_STEREO },
1295 };
1296
vidioc_enum_input(struct file * file,void * priv,struct v4l2_input * inp)1297 int vidioc_enum_input(struct file *file, void *priv,
1298 struct v4l2_input *inp)
1299 {
1300 struct vivid_dev *dev = video_drvdata(file);
1301
1302 if (inp->index >= dev->num_inputs)
1303 return -EINVAL;
1304
1305 inp->type = V4L2_INPUT_TYPE_CAMERA;
1306 switch (dev->input_type[inp->index]) {
1307 case WEBCAM:
1308 snprintf(inp->name, sizeof(inp->name), "Webcam %u",
1309 dev->input_name_counter[inp->index]);
1310 inp->capabilities = 0;
1311 break;
1312 case TV:
1313 snprintf(inp->name, sizeof(inp->name), "TV %u",
1314 dev->input_name_counter[inp->index]);
1315 inp->type = V4L2_INPUT_TYPE_TUNER;
1316 inp->std = V4L2_STD_ALL;
1317 if (dev->has_audio_inputs)
1318 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1319 inp->capabilities = V4L2_IN_CAP_STD;
1320 break;
1321 case SVID:
1322 snprintf(inp->name, sizeof(inp->name), "S-Video %u",
1323 dev->input_name_counter[inp->index]);
1324 inp->std = V4L2_STD_ALL;
1325 if (dev->has_audio_inputs)
1326 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1327 inp->capabilities = V4L2_IN_CAP_STD;
1328 break;
1329 case HDMI:
1330 snprintf(inp->name, sizeof(inp->name), "HDMI %u",
1331 dev->input_name_counter[inp->index]);
1332 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
1333 if (dev->edid_blocks == 0 ||
1334 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL)
1335 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1336 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK ||
1337 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE)
1338 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1339 break;
1340 }
1341 if (dev->sensor_hflip)
1342 inp->status |= V4L2_IN_ST_HFLIP;
1343 if (dev->sensor_vflip)
1344 inp->status |= V4L2_IN_ST_VFLIP;
1345 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
1346 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) {
1347 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1348 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) {
1349 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1350 } else if (vivid_is_tv_cap(dev)) {
1351 switch (tpg_g_quality(&dev->tpg)) {
1352 case TPG_QUAL_GRAY:
1353 inp->status |= V4L2_IN_ST_COLOR_KILL;
1354 break;
1355 case TPG_QUAL_NOISE:
1356 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1357 break;
1358 default:
1359 break;
1360 }
1361 }
1362 }
1363 return 0;
1364 }
1365
vidioc_g_input(struct file * file,void * priv,unsigned * i)1366 int vidioc_g_input(struct file *file, void *priv, unsigned *i)
1367 {
1368 struct vivid_dev *dev = video_drvdata(file);
1369
1370 *i = dev->input;
1371 return 0;
1372 }
1373
vidioc_s_input(struct file * file,void * priv,unsigned i)1374 int vidioc_s_input(struct file *file, void *priv, unsigned i)
1375 {
1376 struct vivid_dev *dev = video_drvdata(file);
1377 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
1378 unsigned brightness;
1379
1380 if (i >= dev->num_inputs)
1381 return -EINVAL;
1382
1383 if (i == dev->input)
1384 return 0;
1385
1386 if (vb2_is_busy(&dev->vb_vid_cap_q) ||
1387 vb2_is_busy(&dev->vb_vbi_cap_q) ||
1388 vb2_is_busy(&dev->vb_meta_cap_q))
1389 return -EBUSY;
1390
1391 dev->input = i;
1392 dev->vid_cap_dev.tvnorms = 0;
1393 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
1394 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
1395 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
1396 }
1397 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1398 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1399 vivid_update_format_cap(dev, false);
1400
1401 if (dev->colorspace) {
1402 switch (dev->input_type[i]) {
1403 case WEBCAM:
1404 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1405 break;
1406 case TV:
1407 case SVID:
1408 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1409 break;
1410 case HDMI:
1411 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
1412 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
1413 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1414 else
1415 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
1416 } else {
1417 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1418 }
1419 break;
1420 }
1421 }
1422
1423 /*
1424 * Modify the brightness range depending on the input.
1425 * This makes it easy to use vivid to test if applications can
1426 * handle control range modifications and is also how this is
1427 * typically used in practice as different inputs may be hooked
1428 * up to different receivers with different control ranges.
1429 */
1430 brightness = 128 * i + dev->input_brightness[i];
1431 v4l2_ctrl_modify_range(dev->brightness,
1432 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
1433 v4l2_ctrl_s_ctrl(dev->brightness, brightness);
1434
1435 /* Restore per-input states. */
1436 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode,
1437 vivid_is_hdmi_cap(dev));
1438 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) &&
1439 dev->dv_timings_signal_mode[dev->input] ==
1440 SELECTED_DV_TIMINGS);
1441 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev));
1442 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) &&
1443 dev->std_signal_mode[dev->input]);
1444
1445 if (vivid_is_hdmi_cap(dev)) {
1446 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode,
1447 dev->dv_timings_signal_mode[dev->input]);
1448 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings,
1449 dev->query_dv_timings[dev->input]);
1450 } else if (vivid_is_sdtv_cap(dev)) {
1451 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode,
1452 dev->std_signal_mode[dev->input]);
1453 v4l2_ctrl_s_ctrl(dev->ctrl_standard,
1454 dev->std_signal_mode[dev->input]);
1455 }
1456
1457 return 0;
1458 }
1459
vidioc_enumaudio(struct file * file,void * fh,struct v4l2_audio * vin)1460 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
1461 {
1462 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1463 return -EINVAL;
1464 *vin = vivid_audio_inputs[vin->index];
1465 return 0;
1466 }
1467
vidioc_g_audio(struct file * file,void * fh,struct v4l2_audio * vin)1468 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
1469 {
1470 struct vivid_dev *dev = video_drvdata(file);
1471
1472 if (!vivid_is_sdtv_cap(dev))
1473 return -EINVAL;
1474 *vin = vivid_audio_inputs[dev->tv_audio_input];
1475 return 0;
1476 }
1477
vidioc_s_audio(struct file * file,void * fh,const struct v4l2_audio * vin)1478 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
1479 {
1480 struct vivid_dev *dev = video_drvdata(file);
1481
1482 if (!vivid_is_sdtv_cap(dev))
1483 return -EINVAL;
1484 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1485 return -EINVAL;
1486 dev->tv_audio_input = vin->index;
1487 return 0;
1488 }
1489
vivid_video_g_frequency(struct file * file,void * fh,struct v4l2_frequency * vf)1490 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
1491 {
1492 struct vivid_dev *dev = video_drvdata(file);
1493
1494 if (vf->tuner != 0)
1495 return -EINVAL;
1496 vf->frequency = dev->tv_freq;
1497 return 0;
1498 }
1499
vivid_video_s_frequency(struct file * file,void * fh,const struct v4l2_frequency * vf)1500 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
1501 {
1502 struct vivid_dev *dev = video_drvdata(file);
1503
1504 if (vf->tuner != 0)
1505 return -EINVAL;
1506 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
1507 if (vivid_is_tv_cap(dev))
1508 vivid_update_quality(dev);
1509 return 0;
1510 }
1511
vivid_video_s_tuner(struct file * file,void * fh,const struct v4l2_tuner * vt)1512 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
1513 {
1514 struct vivid_dev *dev = video_drvdata(file);
1515
1516 if (vt->index != 0)
1517 return -EINVAL;
1518 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
1519 return -EINVAL;
1520 dev->tv_audmode = vt->audmode;
1521 return 0;
1522 }
1523
vivid_video_g_tuner(struct file * file,void * fh,struct v4l2_tuner * vt)1524 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1525 {
1526 struct vivid_dev *dev = video_drvdata(file);
1527 enum tpg_quality qual;
1528
1529 if (vt->index != 0)
1530 return -EINVAL;
1531
1532 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
1533 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
1534 vt->audmode = dev->tv_audmode;
1535 vt->rangelow = MIN_TV_FREQ;
1536 vt->rangehigh = MAX_TV_FREQ;
1537 qual = vivid_get_quality(dev, &vt->afc);
1538 if (qual == TPG_QUAL_COLOR)
1539 vt->signal = 0xffff;
1540 else if (qual == TPG_QUAL_GRAY)
1541 vt->signal = 0x8000;
1542 else
1543 vt->signal = 0;
1544 if (qual == TPG_QUAL_NOISE) {
1545 vt->rxsubchans = 0;
1546 } else if (qual == TPG_QUAL_GRAY) {
1547 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1548 } else {
1549 unsigned int channel_nr = dev->tv_freq / (6 * 16);
1550 unsigned int options =
1551 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3;
1552
1553 switch (channel_nr % options) {
1554 case 0:
1555 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1556 break;
1557 case 1:
1558 vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
1559 break;
1560 case 2:
1561 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M)
1562 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
1563 else
1564 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
1565 break;
1566 case 3:
1567 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
1568 break;
1569 }
1570 }
1571 strscpy(vt->name, "TV Tuner", sizeof(vt->name));
1572 return 0;
1573 }
1574
1575 /* Must remain in sync with the vivid_ctrl_standard_strings array */
1576 const v4l2_std_id vivid_standard[] = {
1577 V4L2_STD_NTSC_M,
1578 V4L2_STD_NTSC_M_JP,
1579 V4L2_STD_NTSC_M_KR,
1580 V4L2_STD_NTSC_443,
1581 V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
1582 V4L2_STD_PAL_I,
1583 V4L2_STD_PAL_DK,
1584 V4L2_STD_PAL_M,
1585 V4L2_STD_PAL_N,
1586 V4L2_STD_PAL_Nc,
1587 V4L2_STD_PAL_60,
1588 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
1589 V4L2_STD_SECAM_DK,
1590 V4L2_STD_SECAM_L,
1591 V4L2_STD_SECAM_LC,
1592 V4L2_STD_UNKNOWN
1593 };
1594
1595 /* Must remain in sync with the vivid_standard array */
1596 const char * const vivid_ctrl_standard_strings[] = {
1597 "NTSC-M",
1598 "NTSC-M-JP",
1599 "NTSC-M-KR",
1600 "NTSC-443",
1601 "PAL-BGH",
1602 "PAL-I",
1603 "PAL-DK",
1604 "PAL-M",
1605 "PAL-N",
1606 "PAL-Nc",
1607 "PAL-60",
1608 "SECAM-BGH",
1609 "SECAM-DK",
1610 "SECAM-L",
1611 "SECAM-Lc",
1612 NULL,
1613 };
1614
vidioc_querystd(struct file * file,void * priv,v4l2_std_id * id)1615 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
1616 {
1617 struct vivid_dev *dev = video_drvdata(file);
1618 unsigned int last = dev->query_std_last[dev->input];
1619
1620 if (!vivid_is_sdtv_cap(dev))
1621 return -ENODATA;
1622 if (dev->std_signal_mode[dev->input] == NO_SIGNAL ||
1623 dev->std_signal_mode[dev->input] == NO_LOCK) {
1624 *id = V4L2_STD_UNKNOWN;
1625 return 0;
1626 }
1627 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
1628 *id = V4L2_STD_UNKNOWN;
1629 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) {
1630 *id = dev->std_cap[dev->input];
1631 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) {
1632 *id = dev->query_std[dev->input];
1633 } else {
1634 *id = vivid_standard[last];
1635 dev->query_std_last[dev->input] =
1636 (last + 1) % ARRAY_SIZE(vivid_standard);
1637 }
1638
1639 return 0;
1640 }
1641
vivid_vid_cap_s_std(struct file * file,void * priv,v4l2_std_id id)1642 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
1643 {
1644 struct vivid_dev *dev = video_drvdata(file);
1645
1646 if (!vivid_is_sdtv_cap(dev))
1647 return -ENODATA;
1648 if (dev->std_cap[dev->input] == id)
1649 return 0;
1650 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1651 return -EBUSY;
1652 dev->std_cap[dev->input] = id;
1653 vivid_update_format_cap(dev, false);
1654 return 0;
1655 }
1656
find_aspect_ratio(u32 width,u32 height,u32 * num,u32 * denom)1657 static void find_aspect_ratio(u32 width, u32 height,
1658 u32 *num, u32 *denom)
1659 {
1660 if (!(height % 3) && ((height * 4 / 3) == width)) {
1661 *num = 4;
1662 *denom = 3;
1663 } else if (!(height % 9) && ((height * 16 / 9) == width)) {
1664 *num = 16;
1665 *denom = 9;
1666 } else if (!(height % 10) && ((height * 16 / 10) == width)) {
1667 *num = 16;
1668 *denom = 10;
1669 } else if (!(height % 4) && ((height * 5 / 4) == width)) {
1670 *num = 5;
1671 *denom = 4;
1672 } else if (!(height % 9) && ((height * 15 / 9) == width)) {
1673 *num = 15;
1674 *denom = 9;
1675 } else { /* default to 16:9 */
1676 *num = 16;
1677 *denom = 9;
1678 }
1679 }
1680
valid_cvt_gtf_timings(struct v4l2_dv_timings * timings)1681 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
1682 {
1683 struct v4l2_bt_timings *bt = &timings->bt;
1684 u32 total_h_pixel;
1685 u32 total_v_lines;
1686 u32 h_freq;
1687
1688 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
1689 NULL, NULL))
1690 return false;
1691
1692 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
1693 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
1694
1695 h_freq = (u32)bt->pixelclock / total_h_pixel;
1696
1697 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
1698 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
1699 bt->polarities, bt->interlaced, timings))
1700 return true;
1701 }
1702
1703 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
1704 struct v4l2_fract aspect_ratio;
1705
1706 find_aspect_ratio(bt->width, bt->height,
1707 &aspect_ratio.numerator,
1708 &aspect_ratio.denominator);
1709 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
1710 bt->polarities, bt->interlaced,
1711 aspect_ratio, timings))
1712 return true;
1713 }
1714 return false;
1715 }
1716
vivid_vid_cap_s_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1717 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
1718 struct v4l2_dv_timings *timings)
1719 {
1720 struct vivid_dev *dev = video_drvdata(file);
1721
1722 if (!vivid_is_hdmi_cap(dev))
1723 return -ENODATA;
1724 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
1725 0, NULL, NULL) &&
1726 !valid_cvt_gtf_timings(timings))
1727 return -EINVAL;
1728
1729 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input],
1730 0, false))
1731 return 0;
1732 if (vb2_is_busy(&dev->vb_vid_cap_q))
1733 return -EBUSY;
1734
1735 dev->dv_timings_cap[dev->input] = *timings;
1736 vivid_update_format_cap(dev, false);
1737 return 0;
1738 }
1739
vidioc_query_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1740 int vidioc_query_dv_timings(struct file *file, void *_fh,
1741 struct v4l2_dv_timings *timings)
1742 {
1743 struct vivid_dev *dev = video_drvdata(file);
1744 unsigned int input = dev->input;
1745 unsigned int last = dev->query_dv_timings_last[input];
1746
1747 if (!vivid_is_hdmi_cap(dev))
1748 return -ENODATA;
1749 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL ||
1750 dev->edid_blocks == 0)
1751 return -ENOLINK;
1752 if (dev->dv_timings_signal_mode[input] == NO_LOCK)
1753 return -ENOLCK;
1754 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) {
1755 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
1756 return -ERANGE;
1757 }
1758 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) {
1759 *timings = dev->dv_timings_cap[input];
1760 } else if (dev->dv_timings_signal_mode[input] ==
1761 SELECTED_DV_TIMINGS) {
1762 *timings =
1763 v4l2_dv_timings_presets[dev->query_dv_timings[input]];
1764 } else {
1765 *timings =
1766 v4l2_dv_timings_presets[last];
1767 dev->query_dv_timings_last[input] =
1768 (last + 1) % dev->query_dv_timings_size;
1769 }
1770 return 0;
1771 }
1772
vidioc_s_edid(struct file * file,void * _fh,struct v4l2_edid * edid)1773 int vidioc_s_edid(struct file *file, void *_fh,
1774 struct v4l2_edid *edid)
1775 {
1776 struct vivid_dev *dev = video_drvdata(file);
1777 u16 phys_addr;
1778 u32 display_present = 0;
1779 unsigned int i, j;
1780 int ret;
1781
1782 memset(edid->reserved, 0, sizeof(edid->reserved));
1783 if (edid->pad >= dev->num_inputs)
1784 return -EINVAL;
1785 if (dev->input_type[edid->pad] != HDMI || edid->start_block)
1786 return -EINVAL;
1787 if (edid->blocks == 0) {
1788 dev->edid_blocks = 0;
1789 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
1790 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
1791 phys_addr = CEC_PHYS_ADDR_INVALID;
1792 goto set_phys_addr;
1793 }
1794 if (edid->blocks > dev->edid_max_blocks) {
1795 edid->blocks = dev->edid_max_blocks;
1796 return -E2BIG;
1797 }
1798 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
1799 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
1800 if (ret)
1801 return ret;
1802
1803 if (vb2_is_busy(&dev->vb_vid_cap_q))
1804 return -EBUSY;
1805
1806 dev->edid_blocks = edid->blocks;
1807 memcpy(dev->edid, edid->edid, edid->blocks * 128);
1808
1809 for (i = 0, j = 0; i < dev->num_outputs; i++)
1810 if (dev->output_type[i] == HDMI)
1811 display_present |=
1812 dev->display_present[i] << j++;
1813
1814 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
1815 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
1816
1817 set_phys_addr:
1818 /* TODO: a proper hotplug detect cycle should be emulated here */
1819 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
1820
1821 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
1822 cec_s_phys_addr(dev->cec_tx_adap[i],
1823 dev->display_present[i] ?
1824 v4l2_phys_addr_for_input(phys_addr, i + 1) :
1825 CEC_PHYS_ADDR_INVALID,
1826 false);
1827 return 0;
1828 }
1829
vidioc_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)1830 int vidioc_enum_framesizes(struct file *file, void *fh,
1831 struct v4l2_frmsizeenum *fsize)
1832 {
1833 struct vivid_dev *dev = video_drvdata(file);
1834
1835 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
1836 return -EINVAL;
1837 if (vivid_get_format(dev, fsize->pixel_format) == NULL)
1838 return -EINVAL;
1839 if (vivid_is_webcam(dev)) {
1840 if (fsize->index >= ARRAY_SIZE(webcam_sizes))
1841 return -EINVAL;
1842 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1843 fsize->discrete = webcam_sizes[fsize->index];
1844 return 0;
1845 }
1846 if (fsize->index)
1847 return -EINVAL;
1848 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1849 fsize->stepwise.min_width = MIN_WIDTH;
1850 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
1851 fsize->stepwise.step_width = 2;
1852 fsize->stepwise.min_height = MIN_HEIGHT;
1853 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
1854 fsize->stepwise.step_height = 2;
1855 return 0;
1856 }
1857
1858 /* timeperframe is arbitrary and continuous */
vidioc_enum_frameintervals(struct file * file,void * priv,struct v4l2_frmivalenum * fival)1859 int vidioc_enum_frameintervals(struct file *file, void *priv,
1860 struct v4l2_frmivalenum *fival)
1861 {
1862 struct vivid_dev *dev = video_drvdata(file);
1863 const struct vivid_fmt *fmt;
1864 int i;
1865
1866 fmt = vivid_get_format(dev, fival->pixel_format);
1867 if (!fmt)
1868 return -EINVAL;
1869
1870 if (!vivid_is_webcam(dev)) {
1871 if (fival->index)
1872 return -EINVAL;
1873 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
1874 return -EINVAL;
1875 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
1876 return -EINVAL;
1877 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1878 fival->discrete = dev->timeperframe_vid_cap;
1879 return 0;
1880 }
1881
1882 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
1883 if (fival->width == webcam_sizes[i].width &&
1884 fival->height == webcam_sizes[i].height)
1885 break;
1886 if (i == ARRAY_SIZE(webcam_sizes))
1887 return -EINVAL;
1888 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
1889 return -EINVAL;
1890 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1891 fival->discrete = webcam_intervals[fival->index];
1892 return 0;
1893 }
1894
vivid_vid_cap_g_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1895 int vivid_vid_cap_g_parm(struct file *file, void *priv,
1896 struct v4l2_streamparm *parm)
1897 {
1898 struct vivid_dev *dev = video_drvdata(file);
1899
1900 if (parm->type != (dev->multiplanar ?
1901 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1902 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1903 return -EINVAL;
1904
1905 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1906 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
1907 parm->parm.capture.readbuffers = 1;
1908 return 0;
1909 }
1910
vivid_vid_cap_s_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1911 int vivid_vid_cap_s_parm(struct file *file, void *priv,
1912 struct v4l2_streamparm *parm)
1913 {
1914 struct vivid_dev *dev = video_drvdata(file);
1915 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
1916 struct v4l2_fract tpf;
1917 unsigned i;
1918
1919 if (parm->type != (dev->multiplanar ?
1920 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1921 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1922 return -EINVAL;
1923 if (!vivid_is_webcam(dev))
1924 return vivid_vid_cap_g_parm(file, priv, parm);
1925
1926 tpf = parm->parm.capture.timeperframe;
1927
1928 if (tpf.denominator == 0)
1929 tpf = webcam_intervals[ival_sz - 1];
1930 for (i = 0; i < ival_sz; i++)
1931 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i]))
1932 break;
1933 if (i == ival_sz)
1934 i = ival_sz - 1;
1935 dev->webcam_ival_idx = i;
1936 tpf = webcam_intervals[dev->webcam_ival_idx];
1937
1938 /* resync the thread's timings */
1939 dev->cap_seq_resync = true;
1940 dev->timeperframe_vid_cap = tpf;
1941 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1942 parm->parm.capture.timeperframe = tpf;
1943 parm->parm.capture.readbuffers = 1;
1944 return 0;
1945 }
1946