1 /*
2 * Copyright (C) 2012 Texas Instruments Inc
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Contributors:
18 * Manjunath Hadli <manjunath.hadli@ti.com>
19 * Prabhakar Lad <prabhakar.lad@ti.com>
20 */
21
22 #include <linux/module.h>
23 #include <linux/slab.h>
24
25 #include <media/v4l2-ioctl.h>
26
27 #include "vpfe.h"
28 #include "vpfe_mc_capture.h"
29
30 static int debug;
31
32 /* get v4l2 subdev pointer to external subdev which is active */
vpfe_get_input_entity(struct vpfe_video_device * video)33 static struct media_entity *vpfe_get_input_entity
34 (struct vpfe_video_device *video)
35 {
36 struct vpfe_device *vpfe_dev = video->vpfe_dev;
37 struct media_pad *remote;
38
39 remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
40 if (!remote) {
41 pr_err("Invalid media connection to isif/ccdc\n");
42 return NULL;
43 }
44 return remote->entity;
45 }
46
47 /* updates external subdev(sensor/decoder) which is active */
vpfe_update_current_ext_subdev(struct vpfe_video_device * video)48 static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
49 {
50 struct vpfe_device *vpfe_dev = video->vpfe_dev;
51 struct vpfe_config *vpfe_cfg;
52 struct v4l2_subdev *subdev;
53 struct media_pad *remote;
54 int i;
55
56 remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
57 if (!remote) {
58 pr_err("Invalid media connection to isif/ccdc\n");
59 return -EINVAL;
60 }
61
62 subdev = media_entity_to_v4l2_subdev(remote->entity);
63 vpfe_cfg = vpfe_dev->pdev->platform_data;
64 for (i = 0; i < vpfe_cfg->num_subdevs; i++) {
65 if (!strcmp(vpfe_cfg->sub_devs[i].module_name, subdev->name)) {
66 video->current_ext_subdev = &vpfe_cfg->sub_devs[i];
67 break;
68 }
69 }
70
71 /* if user not linked decoder/sensor to isif/ccdc */
72 if (i == vpfe_cfg->num_subdevs) {
73 pr_err("Invalid media chain connection to isif/ccdc\n");
74 return -EINVAL;
75 }
76 /* find the v4l2 subdev pointer */
77 for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) {
78 if (!strcmp(video->current_ext_subdev->module_name,
79 vpfe_dev->sd[i]->name))
80 video->current_ext_subdev->subdev = vpfe_dev->sd[i];
81 }
82 return 0;
83 }
84
85 /* get the subdev which is connected to the output video node */
86 static struct v4l2_subdev *
vpfe_video_remote_subdev(struct vpfe_video_device * video,u32 * pad)87 vpfe_video_remote_subdev(struct vpfe_video_device *video, u32 *pad)
88 {
89 struct media_pad *remote = media_entity_remote_pad(&video->pad);
90
91 if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
92 return NULL;
93 if (pad)
94 *pad = remote->index;
95 return media_entity_to_v4l2_subdev(remote->entity);
96 }
97
98 /* get the format set at output pad of the adjacent subdev */
99 static int
__vpfe_video_get_format(struct vpfe_video_device * video,struct v4l2_format * format)100 __vpfe_video_get_format(struct vpfe_video_device *video,
101 struct v4l2_format *format)
102 {
103 struct v4l2_subdev_format fmt;
104 struct v4l2_subdev *subdev;
105 struct media_pad *remote;
106 u32 pad;
107 int ret;
108
109 subdev = vpfe_video_remote_subdev(video, &pad);
110 if (!subdev)
111 return -EINVAL;
112
113 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
114 remote = media_entity_remote_pad(&video->pad);
115 fmt.pad = remote->index;
116
117 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
118 if (ret == -ENOIOCTLCMD)
119 return -EINVAL;
120
121 format->type = video->type;
122 /* convert mbus_format to v4l2_format */
123 v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
124 mbus_to_pix(&fmt.format, &format->fmt.pix);
125
126 return 0;
127 }
128
129 /* make a note of pipeline details */
vpfe_prepare_pipeline(struct vpfe_video_device * video)130 static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
131 {
132 struct media_graph graph;
133 struct media_entity *entity = &video->video_dev.entity;
134 struct media_device *mdev = entity->graph_obj.mdev;
135 struct vpfe_pipeline *pipe = &video->pipe;
136 struct vpfe_video_device *far_end = NULL;
137 int ret;
138
139 pipe->input_num = 0;
140 pipe->output_num = 0;
141
142 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
143 pipe->inputs[pipe->input_num++] = video;
144 else
145 pipe->outputs[pipe->output_num++] = video;
146
147 mutex_lock(&mdev->graph_mutex);
148 ret = media_graph_walk_init(&graph, mdev);
149 if (ret) {
150 mutex_unlock(&mdev->graph_mutex);
151 return -ENOMEM;
152 }
153 media_graph_walk_start(&graph, entity);
154 while ((entity = media_graph_walk_next(&graph))) {
155 if (entity == &video->video_dev.entity)
156 continue;
157 if (!is_media_entity_v4l2_video_device(entity))
158 continue;
159 far_end = to_vpfe_video(media_entity_to_video_device(entity));
160 if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
161 pipe->inputs[pipe->input_num++] = far_end;
162 else
163 pipe->outputs[pipe->output_num++] = far_end;
164 }
165 media_graph_walk_cleanup(&graph);
166 mutex_unlock(&mdev->graph_mutex);
167
168 return 0;
169 }
170
171 /* update pipe state selected by user */
vpfe_update_pipe_state(struct vpfe_video_device * video)172 static int vpfe_update_pipe_state(struct vpfe_video_device *video)
173 {
174 struct vpfe_pipeline *pipe = &video->pipe;
175 int ret;
176
177 ret = vpfe_prepare_pipeline(video);
178 if (ret)
179 return ret;
180
181 /*
182 * Find out if there is any input video
183 * if yes, it is single shot.
184 */
185 if (pipe->input_num == 0) {
186 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
187 ret = vpfe_update_current_ext_subdev(video);
188 if (ret) {
189 pr_err("Invalid external subdev\n");
190 return ret;
191 }
192 } else {
193 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
194 }
195 video->initialized = 1;
196 video->skip_frame_count = 1;
197 video->skip_frame_count_init = 1;
198 return 0;
199 }
200
201 /* checks whether pipeline is ready for enabling */
vpfe_video_is_pipe_ready(struct vpfe_pipeline * pipe)202 int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
203 {
204 int i;
205
206 for (i = 0; i < pipe->input_num; i++)
207 if (!pipe->inputs[i]->started ||
208 pipe->inputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
209 return 0;
210 for (i = 0; i < pipe->output_num; i++)
211 if (!pipe->outputs[i]->started ||
212 pipe->outputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
213 return 0;
214 return 1;
215 }
216
217 /*
218 * Validate a pipeline by checking both ends of all links for format
219 * discrepancies.
220 *
221 * Return 0 if all formats match, or -EPIPE if at least one link is found with
222 * different formats on its two ends.
223 */
vpfe_video_validate_pipeline(struct vpfe_pipeline * pipe)224 static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe)
225 {
226 struct v4l2_subdev_format fmt_source;
227 struct v4l2_subdev_format fmt_sink;
228 struct v4l2_subdev *subdev;
229 struct media_pad *pad;
230 int ret;
231
232 /*
233 * Should not matter if it is output[0] or 1 as
234 * the general ideas is to traverse backwards and
235 * the fact that the out video node always has the
236 * format of the connected pad.
237 */
238 subdev = vpfe_video_remote_subdev(pipe->outputs[0], NULL);
239 if (!subdev)
240 return -EPIPE;
241
242 while (1) {
243 /* Retrieve the sink format */
244 pad = &subdev->entity.pads[0];
245 if (!(pad->flags & MEDIA_PAD_FL_SINK))
246 break;
247
248 fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
249 fmt_sink.pad = pad->index;
250 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL,
251 &fmt_sink);
252
253 if (ret < 0 && ret != -ENOIOCTLCMD)
254 return -EPIPE;
255
256 /* Retrieve the source format */
257 pad = media_entity_remote_pad(pad);
258 if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
259 break;
260
261 subdev = media_entity_to_v4l2_subdev(pad->entity);
262
263 fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
264 fmt_source.pad = pad->index;
265 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
266 if (ret < 0 && ret != -ENOIOCTLCMD)
267 return -EPIPE;
268
269 /* Check if the two ends match */
270 if (fmt_source.format.code != fmt_sink.format.code ||
271 fmt_source.format.width != fmt_sink.format.width ||
272 fmt_source.format.height != fmt_sink.format.height)
273 return -EPIPE;
274 }
275 return 0;
276 }
277
278 /*
279 * vpfe_pipeline_enable() - Enable streaming on a pipeline
280 * @vpfe_dev: vpfe device
281 * @pipe: vpfe pipeline
282 *
283 * Walk the entities chain starting at the pipeline output video node and start
284 * all modules in the chain in the given mode.
285 *
286 * Return 0 if successful, or the return value of the failed video::s_stream
287 * operation otherwise.
288 */
vpfe_pipeline_enable(struct vpfe_pipeline * pipe)289 static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe)
290 {
291 struct media_entity *entity;
292 struct v4l2_subdev *subdev;
293 struct media_device *mdev;
294 int ret;
295
296 if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
297 entity = vpfe_get_input_entity(pipe->outputs[0]);
298 else
299 entity = &pipe->inputs[0]->video_dev.entity;
300
301 mdev = entity->graph_obj.mdev;
302 mutex_lock(&mdev->graph_mutex);
303 ret = media_graph_walk_init(&pipe->graph, mdev);
304 if (ret)
305 goto out;
306 media_graph_walk_start(&pipe->graph, entity);
307 while ((entity = media_graph_walk_next(&pipe->graph))) {
308
309 if (!is_media_entity_v4l2_subdev(entity))
310 continue;
311 subdev = media_entity_to_v4l2_subdev(entity);
312 ret = v4l2_subdev_call(subdev, video, s_stream, 1);
313 if (ret < 0 && ret != -ENOIOCTLCMD)
314 break;
315 }
316 out:
317 if (ret)
318 media_graph_walk_cleanup(&pipe->graph);
319 mutex_unlock(&mdev->graph_mutex);
320 return ret;
321 }
322
323 /*
324 * vpfe_pipeline_disable() - Disable streaming on a pipeline
325 * @vpfe_dev: vpfe device
326 * @pipe: VPFE pipeline
327 *
328 * Walk the entities chain starting at the pipeline output video node and stop
329 * all modules in the chain.
330 *
331 * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
332 * can't be stopped.
333 */
vpfe_pipeline_disable(struct vpfe_pipeline * pipe)334 static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
335 {
336 struct media_entity *entity;
337 struct v4l2_subdev *subdev;
338 struct media_device *mdev;
339 int ret = 0;
340
341 if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
342 entity = vpfe_get_input_entity(pipe->outputs[0]);
343 else
344 entity = &pipe->inputs[0]->video_dev.entity;
345
346 mdev = entity->graph_obj.mdev;
347 mutex_lock(&mdev->graph_mutex);
348 media_graph_walk_start(&pipe->graph, entity);
349
350 while ((entity = media_graph_walk_next(&pipe->graph))) {
351
352 if (!is_media_entity_v4l2_subdev(entity))
353 continue;
354 subdev = media_entity_to_v4l2_subdev(entity);
355 ret = v4l2_subdev_call(subdev, video, s_stream, 0);
356 if (ret < 0 && ret != -ENOIOCTLCMD)
357 break;
358 }
359 mutex_unlock(&mdev->graph_mutex);
360
361 media_graph_walk_cleanup(&pipe->graph);
362 return ret ? -ETIMEDOUT : 0;
363 }
364
365 /*
366 * vpfe_pipeline_set_stream() - Enable/disable streaming on a pipeline
367 * @vpfe_dev: VPFE device
368 * @pipe: VPFE pipeline
369 * @state: Stream state (stopped or active)
370 *
371 * Set the pipeline to the given stream state.
372 *
373 * Return 0 if successful, or the return value of the failed video::s_stream
374 * operation otherwise.
375 */
vpfe_pipeline_set_stream(struct vpfe_pipeline * pipe,enum vpfe_pipeline_stream_state state)376 static int vpfe_pipeline_set_stream(struct vpfe_pipeline *pipe,
377 enum vpfe_pipeline_stream_state state)
378 {
379 if (state == VPFE_PIPELINE_STREAM_STOPPED)
380 return vpfe_pipeline_disable(pipe);
381
382 return vpfe_pipeline_enable(pipe);
383 }
384
all_videos_stopped(struct vpfe_video_device * video)385 static int all_videos_stopped(struct vpfe_video_device *video)
386 {
387 struct vpfe_pipeline *pipe = &video->pipe;
388 int i;
389
390 for (i = 0; i < pipe->input_num; i++)
391 if (pipe->inputs[i]->started)
392 return 0;
393 for (i = 0; i < pipe->output_num; i++)
394 if (pipe->outputs[i]->started)
395 return 0;
396 return 1;
397 }
398
399 /*
400 * vpfe_open() - open video device
401 * @file: file pointer
402 *
403 * initialize media pipeline state, allocate memory for file handle
404 *
405 * Return 0 if successful, or the return -ENODEV otherwise.
406 */
vpfe_open(struct file * file)407 static int vpfe_open(struct file *file)
408 {
409 struct vpfe_video_device *video = video_drvdata(file);
410 struct vpfe_fh *handle;
411
412 /* Allocate memory for the file handle object */
413 handle = kzalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
414
415 if (!handle)
416 return -ENOMEM;
417
418 v4l2_fh_init(&handle->vfh, &video->video_dev);
419 v4l2_fh_add(&handle->vfh);
420
421 mutex_lock(&video->lock);
422 /* If decoder is not initialized. initialize it */
423 if (!video->initialized && vpfe_update_pipe_state(video)) {
424 mutex_unlock(&video->lock);
425 v4l2_fh_del(&handle->vfh);
426 v4l2_fh_exit(&handle->vfh);
427 kfree(handle);
428 return -ENODEV;
429 }
430 /* Increment device users counter */
431 video->usrs++;
432 /* Set io_allowed member to false */
433 handle->io_allowed = 0;
434 handle->video = video;
435 file->private_data = &handle->vfh;
436 mutex_unlock(&video->lock);
437
438 return 0;
439 }
440
441 /* get the next buffer available from dma queue */
442 static unsigned long
vpfe_video_get_next_buffer(struct vpfe_video_device * video)443 vpfe_video_get_next_buffer(struct vpfe_video_device *video)
444 {
445 video->cur_frm = video->next_frm =
446 list_entry(video->dma_queue.next,
447 struct vpfe_cap_buffer, list);
448
449 list_del(&video->next_frm->list);
450 video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
451 return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
452 }
453
454 /* schedule the next buffer which is available on dma queue */
vpfe_video_schedule_next_buffer(struct vpfe_video_device * video)455 void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
456 {
457 struct vpfe_device *vpfe_dev = video->vpfe_dev;
458 unsigned long addr;
459
460 if (list_empty(&video->dma_queue))
461 return;
462
463 video->next_frm = list_entry(video->dma_queue.next,
464 struct vpfe_cap_buffer, list);
465
466 if (video->pipe.state == VPFE_PIPELINE_STREAM_SINGLESHOT)
467 video->cur_frm = video->next_frm;
468
469 list_del(&video->next_frm->list);
470 video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
471 addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
472 video->ops->queue(vpfe_dev, addr);
473 video->state = VPFE_VIDEO_BUFFER_QUEUED;
474 }
475
476 /* schedule the buffer for capturing bottom field */
vpfe_video_schedule_bottom_field(struct vpfe_video_device * video)477 void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video)
478 {
479 struct vpfe_device *vpfe_dev = video->vpfe_dev;
480 unsigned long addr;
481
482 addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
483 addr += video->field_off;
484 video->ops->queue(vpfe_dev, addr);
485 }
486
487 /* make buffer available for dequeue */
vpfe_video_process_buffer_complete(struct vpfe_video_device * video)488 void vpfe_video_process_buffer_complete(struct vpfe_video_device *video)
489 {
490 struct vpfe_pipeline *pipe = &video->pipe;
491
492 video->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
493 vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
494 if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
495 video->cur_frm = video->next_frm;
496 }
497
498 /* vpfe_stop_capture() - stop streaming */
vpfe_stop_capture(struct vpfe_video_device * video)499 static void vpfe_stop_capture(struct vpfe_video_device *video)
500 {
501 struct vpfe_pipeline *pipe = &video->pipe;
502
503 video->started = 0;
504
505 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
506 return;
507 if (all_videos_stopped(video))
508 vpfe_pipeline_set_stream(pipe,
509 VPFE_PIPELINE_STREAM_STOPPED);
510 }
511
512 /*
513 * vpfe_release() - release video device
514 * @file: file pointer
515 *
516 * deletes buffer queue, frees the buffers and the vpfe file handle
517 *
518 * Return 0
519 */
vpfe_release(struct file * file)520 static int vpfe_release(struct file *file)
521 {
522 struct vpfe_video_device *video = video_drvdata(file);
523 struct v4l2_fh *vfh = file->private_data;
524 struct vpfe_device *vpfe_dev = video->vpfe_dev;
525 struct vpfe_fh *fh = container_of(vfh, struct vpfe_fh, vfh);
526
527 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
528
529 /* Get the device lock */
530 mutex_lock(&video->lock);
531 /* if this instance is doing IO */
532 if (fh->io_allowed) {
533 if (video->started) {
534 vpfe_stop_capture(video);
535 /*
536 * mark pipe state as stopped in vpfe_release(),
537 * as app might call streamon() after streamoff()
538 * in which case driver has to start streaming.
539 */
540 video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED;
541 vb2_streamoff(&video->buffer_queue,
542 video->buffer_queue.type);
543 }
544 video->io_usrs = 0;
545 /* Free buffers allocated */
546 vb2_queue_release(&video->buffer_queue);
547 }
548 /* Decrement device users counter */
549 video->usrs--;
550 v4l2_fh_del(&fh->vfh);
551 v4l2_fh_exit(&fh->vfh);
552 /* If this is the last file handle */
553 if (!video->usrs)
554 video->initialized = 0;
555 mutex_unlock(&video->lock);
556 file->private_data = NULL;
557 /* Free memory allocated to file handle object */
558 v4l2_fh_del(vfh);
559 kzfree(fh);
560 return 0;
561 }
562
563 /*
564 * vpfe_mmap() - It is used to map kernel space buffers
565 * into user spaces
566 */
vpfe_mmap(struct file * file,struct vm_area_struct * vma)567 static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
568 {
569 struct vpfe_video_device *video = video_drvdata(file);
570 struct vpfe_device *vpfe_dev = video->vpfe_dev;
571
572 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
573 return vb2_mmap(&video->buffer_queue, vma);
574 }
575
576 /*
577 * vpfe_poll() - It is used for select/poll system call
578 */
vpfe_poll(struct file * file,poll_table * wait)579 static __poll_t vpfe_poll(struct file *file, poll_table *wait)
580 {
581 struct vpfe_video_device *video = video_drvdata(file);
582 struct vpfe_device *vpfe_dev = video->vpfe_dev;
583
584 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
585 if (video->started)
586 return vb2_poll(&video->buffer_queue, file, wait);
587 return 0;
588 }
589
590 /* vpfe capture driver file operations */
591 static const struct v4l2_file_operations vpfe_fops = {
592 .owner = THIS_MODULE,
593 .open = vpfe_open,
594 .release = vpfe_release,
595 .unlocked_ioctl = video_ioctl2,
596 .mmap = vpfe_mmap,
597 .poll = vpfe_poll
598 };
599
600 /*
601 * vpfe_querycap() - query capabilities of video device
602 * @file: file pointer
603 * @priv: void pointer
604 * @cap: pointer to v4l2_capability structure
605 *
606 * fills v4l2 capabilities structure
607 *
608 * Return 0
609 */
vpfe_querycap(struct file * file,void * priv,struct v4l2_capability * cap)610 static int vpfe_querycap(struct file *file, void *priv,
611 struct v4l2_capability *cap)
612 {
613 struct vpfe_video_device *video = video_drvdata(file);
614 struct vpfe_device *vpfe_dev = video->vpfe_dev;
615
616 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
617
618 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
619 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
620 else
621 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
622 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
623 V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
624 strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
625 strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
626 strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
627
628 return 0;
629 }
630
631 /*
632 * vpfe_g_fmt() - get the format which is active on video device
633 * @file: file pointer
634 * @priv: void pointer
635 * @fmt: pointer to v4l2_format structure
636 *
637 * fills v4l2 format structure with active format
638 *
639 * Return 0
640 */
vpfe_g_fmt(struct file * file,void * priv,struct v4l2_format * fmt)641 static int vpfe_g_fmt(struct file *file, void *priv,
642 struct v4l2_format *fmt)
643 {
644 struct vpfe_video_device *video = video_drvdata(file);
645 struct vpfe_device *vpfe_dev = video->vpfe_dev;
646
647 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt\n");
648 /* Fill in the information about format */
649 *fmt = video->fmt;
650 return 0;
651 }
652
653 /*
654 * vpfe_enum_fmt() - enum formats supported on media chain
655 * @file: file pointer
656 * @priv: void pointer
657 * @fmt: pointer to v4l2_fmtdesc structure
658 *
659 * fills v4l2_fmtdesc structure with output format set on adjacent subdev,
660 * only one format is enumearted as subdevs are already configured
661 *
662 * Return 0 if successful, error code otherwise
663 */
vpfe_enum_fmt(struct file * file,void * priv,struct v4l2_fmtdesc * fmt)664 static int vpfe_enum_fmt(struct file *file, void *priv,
665 struct v4l2_fmtdesc *fmt)
666 {
667 struct vpfe_video_device *video = video_drvdata(file);
668 struct vpfe_device *vpfe_dev = video->vpfe_dev;
669 struct v4l2_subdev_format sd_fmt;
670 struct v4l2_mbus_framefmt mbus;
671 struct v4l2_subdev *subdev;
672 struct v4l2_format format;
673 struct media_pad *remote;
674 int ret;
675
676 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
677
678 /*
679 * since already subdev pad format is set,
680 * only one pixel format is available
681 */
682 if (fmt->index > 0) {
683 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n");
684 return -EINVAL;
685 }
686 /* get the remote pad */
687 remote = media_entity_remote_pad(&video->pad);
688 if (!remote) {
689 v4l2_err(&vpfe_dev->v4l2_dev,
690 "invalid remote pad for video node\n");
691 return -EINVAL;
692 }
693 /* get the remote subdev */
694 subdev = vpfe_video_remote_subdev(video, NULL);
695 if (!subdev) {
696 v4l2_err(&vpfe_dev->v4l2_dev,
697 "invalid remote subdev for video node\n");
698 return -EINVAL;
699 }
700 sd_fmt.pad = remote->index;
701 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
702 /* get output format of remote subdev */
703 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
704 if (ret) {
705 v4l2_err(&vpfe_dev->v4l2_dev,
706 "invalid remote subdev for video node\n");
707 return ret;
708 }
709 /* convert to pix format */
710 mbus.code = sd_fmt.format.code;
711 mbus_to_pix(&mbus, &format.fmt.pix);
712 /* copy the result */
713 fmt->pixelformat = format.fmt.pix.pixelformat;
714
715 return 0;
716 }
717
718 /*
719 * vpfe_s_fmt() - set the format on video device
720 * @file: file pointer
721 * @priv: void pointer
722 * @fmt: pointer to v4l2_format structure
723 *
724 * validate and set the format on video device
725 *
726 * Return 0 on success, error code otherwise
727 */
vpfe_s_fmt(struct file * file,void * priv,struct v4l2_format * fmt)728 static int vpfe_s_fmt(struct file *file, void *priv,
729 struct v4l2_format *fmt)
730 {
731 struct vpfe_video_device *video = video_drvdata(file);
732 struct vpfe_device *vpfe_dev = video->vpfe_dev;
733 struct v4l2_format format;
734 int ret;
735
736 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
737 /* If streaming is started, return error */
738 if (video->started) {
739 v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
740 return -EBUSY;
741 }
742 /* get adjacent subdev's output pad format */
743 ret = __vpfe_video_get_format(video, &format);
744 if (ret)
745 return ret;
746 *fmt = format;
747 video->fmt = *fmt;
748 return 0;
749 }
750
751 /*
752 * vpfe_try_fmt() - try the format on video device
753 * @file: file pointer
754 * @priv: void pointer
755 * @fmt: pointer to v4l2_format structure
756 *
757 * validate the format, update with correct format
758 * based on output format set on adjacent subdev
759 *
760 * Return 0 on success, error code otherwise
761 */
vpfe_try_fmt(struct file * file,void * priv,struct v4l2_format * fmt)762 static int vpfe_try_fmt(struct file *file, void *priv,
763 struct v4l2_format *fmt)
764 {
765 struct vpfe_video_device *video = video_drvdata(file);
766 struct vpfe_device *vpfe_dev = video->vpfe_dev;
767 struct v4l2_format format;
768 int ret;
769
770 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
771 /* get adjacent subdev's output pad format */
772 ret = __vpfe_video_get_format(video, &format);
773 if (ret)
774 return ret;
775
776 *fmt = format;
777 return 0;
778 }
779
780 /*
781 * vpfe_enum_input() - enum inputs supported on media chain
782 * @file: file pointer
783 * @priv: void pointer
784 * @fmt: pointer to v4l2_fmtdesc structure
785 *
786 * fills v4l2_input structure with input available on media chain,
787 * only one input is enumearted as media chain is setup by this time
788 *
789 * Return 0 if successful, -EINVAL is media chain is invalid
790 */
vpfe_enum_input(struct file * file,void * priv,struct v4l2_input * inp)791 static int vpfe_enum_input(struct file *file, void *priv,
792 struct v4l2_input *inp)
793 {
794 struct vpfe_video_device *video = video_drvdata(file);
795 struct vpfe_ext_subdev_info *sdinfo = video->current_ext_subdev;
796 struct vpfe_device *vpfe_dev = video->vpfe_dev;
797
798 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
799 /* enumerate from the subdev user has chosen through mc */
800 if (inp->index < sdinfo->num_inputs) {
801 memcpy(inp, &sdinfo->inputs[inp->index],
802 sizeof(struct v4l2_input));
803 return 0;
804 }
805 return -EINVAL;
806 }
807
808 /*
809 * vpfe_g_input() - get index of the input which is active
810 * @file: file pointer
811 * @priv: void pointer
812 * @index: pointer to unsigned int
813 *
814 * set index with input index which is active
815 */
vpfe_g_input(struct file * file,void * priv,unsigned int * index)816 static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
817 {
818 struct vpfe_video_device *video = video_drvdata(file);
819 struct vpfe_device *vpfe_dev = video->vpfe_dev;
820
821 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
822
823 *index = video->current_input;
824 return 0;
825 }
826
827 /*
828 * vpfe_s_input() - set input which is pointed by input index
829 * @file: file pointer
830 * @priv: void pointer
831 * @index: pointer to unsigned int
832 *
833 * set input on external subdev
834 *
835 * Return 0 on success, error code otherwise
836 */
vpfe_s_input(struct file * file,void * priv,unsigned int index)837 static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
838 {
839 struct vpfe_video_device *video = video_drvdata(file);
840 struct vpfe_device *vpfe_dev = video->vpfe_dev;
841 struct vpfe_ext_subdev_info *sdinfo;
842 struct vpfe_route *route;
843 struct v4l2_input *inps;
844 u32 output;
845 u32 input;
846 int ret;
847 int i;
848
849 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
850
851 ret = mutex_lock_interruptible(&video->lock);
852 if (ret)
853 return ret;
854 /*
855 * If streaming is started return device busy
856 * error
857 */
858 if (video->started) {
859 v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
860 ret = -EBUSY;
861 goto unlock_out;
862 }
863
864 sdinfo = video->current_ext_subdev;
865 if (!sdinfo->registered) {
866 ret = -EINVAL;
867 goto unlock_out;
868 }
869 if (vpfe_dev->cfg->setup_input &&
870 vpfe_dev->cfg->setup_input(sdinfo->grp_id) < 0) {
871 ret = -EFAULT;
872 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
873 "couldn't setup input for %s\n",
874 sdinfo->module_name);
875 goto unlock_out;
876 }
877 route = &sdinfo->routes[index];
878 if (route && sdinfo->can_route) {
879 input = route->input;
880 output = route->output;
881 ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
882 sdinfo->grp_id, video,
883 s_routing, input, output, 0);
884 if (ret) {
885 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
886 "s_input:error in setting input in decoder\n");
887 ret = -EINVAL;
888 goto unlock_out;
889 }
890 }
891 /* set standards set by subdev in video device */
892 for (i = 0; i < sdinfo->num_inputs; i++) {
893 inps = &sdinfo->inputs[i];
894 video->video_dev.tvnorms |= inps->std;
895 }
896 video->current_input = index;
897 unlock_out:
898 mutex_unlock(&video->lock);
899 return ret;
900 }
901
902 /*
903 * vpfe_querystd() - query std which is being input on external subdev
904 * @file: file pointer
905 * @priv: void pointer
906 * @std_id: pointer to v4l2_std_id structure
907 *
908 * call external subdev through v4l2_device_call_until_err to
909 * get the std that is being active.
910 *
911 * Return 0 on success, error code otherwise
912 */
vpfe_querystd(struct file * file,void * priv,v4l2_std_id * std_id)913 static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
914 {
915 struct vpfe_video_device *video = video_drvdata(file);
916 struct vpfe_device *vpfe_dev = video->vpfe_dev;
917 struct vpfe_ext_subdev_info *sdinfo;
918 int ret;
919
920 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
921
922 ret = mutex_lock_interruptible(&video->lock);
923 sdinfo = video->current_ext_subdev;
924 if (ret)
925 return ret;
926 /* Call querystd function of decoder device */
927 ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
928 video, querystd, std_id);
929 mutex_unlock(&video->lock);
930 return ret;
931 }
932
933 /*
934 * vpfe_s_std() - set std on external subdev
935 * @file: file pointer
936 * @priv: void pointer
937 * @std_id: pointer to v4l2_std_id structure
938 *
939 * set std pointed by std_id on external subdev by calling it using
940 * v4l2_device_call_until_err
941 *
942 * Return 0 on success, error code otherwise
943 */
vpfe_s_std(struct file * file,void * priv,v4l2_std_id std_id)944 static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
945 {
946 struct vpfe_video_device *video = video_drvdata(file);
947 struct vpfe_device *vpfe_dev = video->vpfe_dev;
948 struct vpfe_ext_subdev_info *sdinfo;
949 int ret;
950
951 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
952
953 /* Call decoder driver function to set the standard */
954 ret = mutex_lock_interruptible(&video->lock);
955 if (ret)
956 return ret;
957 sdinfo = video->current_ext_subdev;
958 /* If streaming is started, return device busy error */
959 if (video->started) {
960 v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
961 ret = -EBUSY;
962 goto unlock_out;
963 }
964 ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
965 video, s_std, std_id);
966 if (ret < 0) {
967 v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
968 video->stdid = V4L2_STD_UNKNOWN;
969 goto unlock_out;
970 }
971 video->stdid = std_id;
972 unlock_out:
973 mutex_unlock(&video->lock);
974 return ret;
975 }
976
vpfe_g_std(struct file * file,void * priv,v4l2_std_id * tvnorm)977 static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
978 {
979 struct vpfe_video_device *video = video_drvdata(file);
980 struct vpfe_device *vpfe_dev = video->vpfe_dev;
981
982 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
983 *tvnorm = video->stdid;
984 return 0;
985 }
986
987 /*
988 * vpfe_enum_dv_timings() - enumerate dv_timings which are supported by
989 * to external subdev
990 * @file: file pointer
991 * @priv: void pointer
992 * @timings: pointer to v4l2_enum_dv_timings structure
993 *
994 * enum dv_timings's which are supported by external subdev through
995 * v4l2_subdev_call
996 *
997 * Return 0 on success, error code otherwise
998 */
999 static int
vpfe_enum_dv_timings(struct file * file,void * fh,struct v4l2_enum_dv_timings * timings)1000 vpfe_enum_dv_timings(struct file *file, void *fh,
1001 struct v4l2_enum_dv_timings *timings)
1002 {
1003 struct vpfe_video_device *video = video_drvdata(file);
1004 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1005 struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
1006
1007 timings->pad = 0;
1008
1009 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_dv_timings\n");
1010 return v4l2_subdev_call(subdev, pad, enum_dv_timings, timings);
1011 }
1012
1013 /*
1014 * vpfe_query_dv_timings() - query the dv_timings which is being input
1015 * to external subdev
1016 * @file: file pointer
1017 * @priv: void pointer
1018 * @timings: pointer to v4l2_dv_timings structure
1019 *
1020 * get dv_timings which is being input on external subdev through
1021 * v4l2_subdev_call
1022 *
1023 * Return 0 on success, error code otherwise
1024 */
1025 static int
vpfe_query_dv_timings(struct file * file,void * fh,struct v4l2_dv_timings * timings)1026 vpfe_query_dv_timings(struct file *file, void *fh,
1027 struct v4l2_dv_timings *timings)
1028 {
1029 struct vpfe_video_device *video = video_drvdata(file);
1030 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1031 struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
1032
1033 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_query_dv_timings\n");
1034 return v4l2_subdev_call(subdev, video, query_dv_timings, timings);
1035 }
1036
1037 /*
1038 * vpfe_s_dv_timings() - set dv_timings on external subdev
1039 * @file: file pointer
1040 * @priv: void pointer
1041 * @timings: pointer to v4l2_dv_timings structure
1042 *
1043 * set dv_timings pointed by timings on external subdev through
1044 * v4l2_device_call_until_err, this configures amplifier also
1045 *
1046 * Return 0 on success, error code otherwise
1047 */
1048 static int
vpfe_s_dv_timings(struct file * file,void * fh,struct v4l2_dv_timings * timings)1049 vpfe_s_dv_timings(struct file *file, void *fh,
1050 struct v4l2_dv_timings *timings)
1051 {
1052 struct vpfe_video_device *video = video_drvdata(file);
1053 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1054
1055 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_dv_timings\n");
1056
1057 video->stdid = V4L2_STD_UNKNOWN;
1058 return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
1059 video->current_ext_subdev->grp_id,
1060 video, s_dv_timings, timings);
1061 }
1062
1063 /*
1064 * vpfe_g_dv_timings() - get dv_timings which is set on external subdev
1065 * @file: file pointer
1066 * @priv: void pointer
1067 * @timings: pointer to v4l2_dv_timings structure
1068 *
1069 * get dv_timings which is set on external subdev through
1070 * v4l2_subdev_call
1071 *
1072 * Return 0 on success, error code otherwise
1073 */
1074 static int
vpfe_g_dv_timings(struct file * file,void * fh,struct v4l2_dv_timings * timings)1075 vpfe_g_dv_timings(struct file *file, void *fh,
1076 struct v4l2_dv_timings *timings)
1077 {
1078 struct vpfe_video_device *video = video_drvdata(file);
1079 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1080 struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
1081
1082 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_dv_timings\n");
1083 return v4l2_subdev_call(subdev, video, g_dv_timings, timings);
1084 }
1085
1086 /*
1087 * Videobuf operations
1088 */
1089 /*
1090 * vpfe_buffer_queue_setup : Callback function for buffer setup.
1091 * @vq: vb2_queue ptr
1092 * @fmt: v4l2 format
1093 * @nbuffers: ptr to number of buffers requested by application
1094 * @nplanes:: contains number of distinct video planes needed to hold a frame
1095 * @sizes[]: contains the size (in bytes) of each plane.
1096 * @alloc_devs: ptr to allocation context
1097 *
1098 * This callback function is called when reqbuf() is called to adjust
1099 * the buffer nbuffers and buffer size
1100 */
1101 static int
vpfe_buffer_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])1102 vpfe_buffer_queue_setup(struct vb2_queue *vq,
1103 unsigned int *nbuffers, unsigned int *nplanes,
1104 unsigned int sizes[], struct device *alloc_devs[])
1105 {
1106 struct vpfe_fh *fh = vb2_get_drv_priv(vq);
1107 struct vpfe_video_device *video = fh->video;
1108 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1109 unsigned long size;
1110
1111 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue_setup\n");
1112 size = video->fmt.fmt.pix.sizeimage;
1113
1114 if (vq->num_buffers + *nbuffers < 3)
1115 *nbuffers = 3 - vq->num_buffers;
1116
1117 *nplanes = 1;
1118 sizes[0] = size;
1119 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1120 "nbuffers=%d, size=%lu\n", *nbuffers, size);
1121 return 0;
1122 }
1123
1124 /*
1125 * vpfe_buffer_prepare : callback function for buffer prepare
1126 * @vb: ptr to vb2_buffer
1127 *
1128 * This is the callback function for buffer prepare when vb2_qbuf()
1129 * function is called. The buffer is prepared and user space virtual address
1130 * or user address is converted into physical address
1131 */
vpfe_buffer_prepare(struct vb2_buffer * vb)1132 static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1133 {
1134 struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
1135 struct vpfe_video_device *video = fh->video;
1136 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1137 unsigned long addr;
1138
1139 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
1140
1141 if (vb->state != VB2_BUF_STATE_ACTIVE &&
1142 vb->state != VB2_BUF_STATE_PREPARED)
1143 return 0;
1144
1145 /* Initialize buffer */
1146 vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
1147 if (vb2_plane_vaddr(vb, 0) &&
1148 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
1149 return -EINVAL;
1150
1151 addr = vb2_dma_contig_plane_dma_addr(vb, 0);
1152 /* Make sure user addresses are aligned to 32 bytes */
1153 if (!ALIGN(addr, 32))
1154 return -EINVAL;
1155
1156 return 0;
1157 }
1158
vpfe_buffer_queue(struct vb2_buffer * vb)1159 static void vpfe_buffer_queue(struct vb2_buffer *vb)
1160 {
1161 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1162 /* Get the file handle object and device object */
1163 struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
1164 struct vpfe_video_device *video = fh->video;
1165 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1166 struct vpfe_pipeline *pipe = &video->pipe;
1167 struct vpfe_cap_buffer *buf = container_of(vbuf,
1168 struct vpfe_cap_buffer, vb);
1169 unsigned long flags;
1170 unsigned long empty;
1171 unsigned long addr;
1172
1173 spin_lock_irqsave(&video->dma_queue_lock, flags);
1174 empty = list_empty(&video->dma_queue);
1175 /* add the buffer to the DMA queue */
1176 list_add_tail(&buf->list, &video->dma_queue);
1177 spin_unlock_irqrestore(&video->dma_queue_lock, flags);
1178 /* this case happens in case of single shot */
1179 if (empty && video->started && pipe->state ==
1180 VPFE_PIPELINE_STREAM_SINGLESHOT &&
1181 video->state == VPFE_VIDEO_BUFFER_NOT_QUEUED) {
1182 spin_lock(&video->dma_queue_lock);
1183 addr = vpfe_video_get_next_buffer(video);
1184 video->ops->queue(vpfe_dev, addr);
1185
1186 video->state = VPFE_VIDEO_BUFFER_QUEUED;
1187 spin_unlock(&video->dma_queue_lock);
1188
1189 /* enable h/w each time in single shot */
1190 if (vpfe_video_is_pipe_ready(pipe))
1191 vpfe_pipeline_set_stream(pipe,
1192 VPFE_PIPELINE_STREAM_SINGLESHOT);
1193 }
1194 }
1195
1196 /* vpfe_start_capture() - start streaming on all the subdevs */
vpfe_start_capture(struct vpfe_video_device * video)1197 static int vpfe_start_capture(struct vpfe_video_device *video)
1198 {
1199 struct vpfe_pipeline *pipe = &video->pipe;
1200 int ret = 0;
1201
1202 video->started = 1;
1203 if (vpfe_video_is_pipe_ready(pipe))
1204 ret = vpfe_pipeline_set_stream(pipe, pipe->state);
1205
1206 return ret;
1207 }
1208
vpfe_start_streaming(struct vb2_queue * vq,unsigned int count)1209 static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1210 {
1211 struct vpfe_fh *fh = vb2_get_drv_priv(vq);
1212 struct vpfe_video_device *video = fh->video;
1213 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1214 unsigned long addr;
1215 int ret;
1216
1217 ret = mutex_lock_interruptible(&video->lock);
1218 if (ret)
1219 goto streamoff;
1220
1221 /* Get the next frame from the buffer queue */
1222 video->cur_frm = video->next_frm =
1223 list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list);
1224 /* Remove buffer from the buffer queue */
1225 list_del(&video->cur_frm->list);
1226 /* Mark state of the current frame to active */
1227 video->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
1228 /* Initialize field_id and started member */
1229 video->field_id = 0;
1230 addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
1231 video->ops->queue(vpfe_dev, addr);
1232 video->state = VPFE_VIDEO_BUFFER_QUEUED;
1233
1234 ret = vpfe_start_capture(video);
1235 if (ret) {
1236 struct vpfe_cap_buffer *buf, *tmp;
1237
1238 vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
1239 VB2_BUF_STATE_QUEUED);
1240 list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) {
1241 list_del(&buf->list);
1242 vb2_buffer_done(&buf->vb.vb2_buf,
1243 VB2_BUF_STATE_QUEUED);
1244 }
1245 goto unlock_out;
1246 }
1247
1248 mutex_unlock(&video->lock);
1249
1250 return ret;
1251 unlock_out:
1252 mutex_unlock(&video->lock);
1253 streamoff:
1254 ret = vb2_streamoff(&video->buffer_queue, video->buffer_queue.type);
1255 return 0;
1256 }
1257
vpfe_buffer_init(struct vb2_buffer * vb)1258 static int vpfe_buffer_init(struct vb2_buffer *vb)
1259 {
1260 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1261 struct vpfe_cap_buffer *buf = container_of(vbuf,
1262 struct vpfe_cap_buffer, vb);
1263
1264 INIT_LIST_HEAD(&buf->list);
1265 return 0;
1266 }
1267
1268 /* abort streaming and wait for last buffer */
vpfe_stop_streaming(struct vb2_queue * vq)1269 static void vpfe_stop_streaming(struct vb2_queue *vq)
1270 {
1271 struct vpfe_fh *fh = vb2_get_drv_priv(vq);
1272 struct vpfe_video_device *video = fh->video;
1273
1274 /* release all active buffers */
1275 if (video->cur_frm == video->next_frm) {
1276 vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
1277 VB2_BUF_STATE_ERROR);
1278 } else {
1279 if (video->cur_frm != NULL)
1280 vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
1281 VB2_BUF_STATE_ERROR);
1282 if (video->next_frm != NULL)
1283 vb2_buffer_done(&video->next_frm->vb.vb2_buf,
1284 VB2_BUF_STATE_ERROR);
1285 }
1286
1287 while (!list_empty(&video->dma_queue)) {
1288 video->next_frm = list_entry(video->dma_queue.next,
1289 struct vpfe_cap_buffer, list);
1290 list_del(&video->next_frm->list);
1291 vb2_buffer_done(&video->next_frm->vb.vb2_buf,
1292 VB2_BUF_STATE_ERROR);
1293 }
1294 }
1295
vpfe_buf_cleanup(struct vb2_buffer * vb)1296 static void vpfe_buf_cleanup(struct vb2_buffer *vb)
1297 {
1298 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1299 struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
1300 struct vpfe_video_device *video = fh->video;
1301 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1302 struct vpfe_cap_buffer *buf = container_of(vbuf,
1303 struct vpfe_cap_buffer, vb);
1304
1305 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
1306 if (vb->state == VB2_BUF_STATE_ACTIVE)
1307 list_del_init(&buf->list);
1308 }
1309
1310 static const struct vb2_ops video_qops = {
1311 .queue_setup = vpfe_buffer_queue_setup,
1312 .buf_init = vpfe_buffer_init,
1313 .buf_prepare = vpfe_buffer_prepare,
1314 .start_streaming = vpfe_start_streaming,
1315 .stop_streaming = vpfe_stop_streaming,
1316 .buf_cleanup = vpfe_buf_cleanup,
1317 .buf_queue = vpfe_buffer_queue,
1318 .wait_prepare = vb2_ops_wait_prepare,
1319 .wait_finish = vb2_ops_wait_finish,
1320 };
1321
1322 /*
1323 * vpfe_reqbufs() - supported REQBUF only once opening
1324 * the device.
1325 */
vpfe_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * req_buf)1326 static int vpfe_reqbufs(struct file *file, void *priv,
1327 struct v4l2_requestbuffers *req_buf)
1328 {
1329 struct vpfe_video_device *video = video_drvdata(file);
1330 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1331 struct vpfe_fh *fh = file->private_data;
1332 struct vb2_queue *q;
1333 int ret;
1334
1335 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
1336
1337 if (req_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1338 req_buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT){
1339 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
1340 return -EINVAL;
1341 }
1342
1343 ret = mutex_lock_interruptible(&video->lock);
1344 if (ret)
1345 return ret;
1346
1347 if (video->io_usrs != 0) {
1348 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
1349 ret = -EBUSY;
1350 goto unlock_out;
1351 }
1352 video->memory = req_buf->memory;
1353
1354 /* Initialize videobuf2 queue as per the buffer type */
1355 q = &video->buffer_queue;
1356 q->type = req_buf->type;
1357 q->io_modes = VB2_MMAP | VB2_USERPTR;
1358 q->drv_priv = fh;
1359 q->min_buffers_needed = 1;
1360 q->ops = &video_qops;
1361 q->mem_ops = &vb2_dma_contig_memops;
1362 q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
1363 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1364 q->dev = vpfe_dev->pdev;
1365 q->lock = &video->lock;
1366
1367 ret = vb2_queue_init(q);
1368 if (ret) {
1369 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
1370 goto unlock_out;
1371 }
1372
1373 fh->io_allowed = 1;
1374 video->io_usrs = 1;
1375 INIT_LIST_HEAD(&video->dma_queue);
1376 ret = vb2_reqbufs(&video->buffer_queue, req_buf);
1377
1378 unlock_out:
1379 mutex_unlock(&video->lock);
1380 return ret;
1381 }
1382
1383 /*
1384 * vpfe_querybuf() - query buffers for exchange
1385 */
vpfe_querybuf(struct file * file,void * priv,struct v4l2_buffer * buf)1386 static int vpfe_querybuf(struct file *file, void *priv,
1387 struct v4l2_buffer *buf)
1388 {
1389 struct vpfe_video_device *video = video_drvdata(file);
1390 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1391
1392 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
1393
1394 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1395 buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1396 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
1397 return -EINVAL;
1398 }
1399
1400 if (video->memory != V4L2_MEMORY_MMAP) {
1401 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
1402 return -EINVAL;
1403 }
1404
1405 /* Call vb2_querybuf to get information */
1406 return vb2_querybuf(&video->buffer_queue, buf);
1407 }
1408
1409 /*
1410 * vpfe_qbuf() - queue buffers for capture or processing
1411 */
vpfe_qbuf(struct file * file,void * priv,struct v4l2_buffer * p)1412 static int vpfe_qbuf(struct file *file, void *priv,
1413 struct v4l2_buffer *p)
1414 {
1415 struct vpfe_video_device *video = video_drvdata(file);
1416 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1417 struct vpfe_fh *fh = file->private_data;
1418
1419 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
1420
1421 if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1422 p->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1423 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
1424 return -EINVAL;
1425 }
1426 /*
1427 * If this file handle is not allowed to do IO,
1428 * return error
1429 */
1430 if (!fh->io_allowed) {
1431 v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
1432 return -EACCES;
1433 }
1434
1435 return vb2_qbuf(&video->buffer_queue, p);
1436 }
1437
1438 /*
1439 * vpfe_dqbuf() - deque buffer which is done with processing
1440 */
vpfe_dqbuf(struct file * file,void * priv,struct v4l2_buffer * buf)1441 static int vpfe_dqbuf(struct file *file, void *priv,
1442 struct v4l2_buffer *buf)
1443 {
1444 struct vpfe_video_device *video = video_drvdata(file);
1445 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1446
1447 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
1448
1449 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1450 buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1451 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
1452 return -EINVAL;
1453 }
1454
1455 return vb2_dqbuf(&video->buffer_queue,
1456 buf, (file->f_flags & O_NONBLOCK));
1457 }
1458
1459 /*
1460 * vpfe_streamon() - start streaming
1461 * @file: file pointer
1462 * @priv: void pointer
1463 * @buf_type: enum v4l2_buf_type
1464 *
1465 * queue buffer onto hardware for capture/processing and
1466 * start all the subdevs which are in media chain
1467 *
1468 * Return 0 on success, error code otherwise
1469 */
vpfe_streamon(struct file * file,void * priv,enum v4l2_buf_type buf_type)1470 static int vpfe_streamon(struct file *file, void *priv,
1471 enum v4l2_buf_type buf_type)
1472 {
1473 struct vpfe_video_device *video = video_drvdata(file);
1474 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1475 struct vpfe_pipeline *pipe = &video->pipe;
1476 struct vpfe_fh *fh = file->private_data;
1477 int ret = -EINVAL;
1478
1479 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
1480
1481 if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1482 buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1483 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
1484 return ret;
1485 }
1486 /* If file handle is not allowed IO, return error */
1487 if (!fh->io_allowed) {
1488 v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
1489 return -EACCES;
1490 }
1491 /* If buffer queue is empty, return error */
1492 if (list_empty(&video->buffer_queue.queued_list)) {
1493 v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
1494 return -EIO;
1495 }
1496 /* Validate the pipeline */
1497 if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1498 ret = vpfe_video_validate_pipeline(pipe);
1499 if (ret < 0)
1500 return ret;
1501 }
1502 /* Call vb2_streamon to start streaming */
1503 return vb2_streamon(&video->buffer_queue, buf_type);
1504 }
1505
1506 /*
1507 * vpfe_streamoff() - stop streaming
1508 * @file: file pointer
1509 * @priv: void pointer
1510 * @buf_type: enum v4l2_buf_type
1511 *
1512 * stop all the subdevs which are in media chain
1513 *
1514 * Return 0 on success, error code otherwise
1515 */
vpfe_streamoff(struct file * file,void * priv,enum v4l2_buf_type buf_type)1516 static int vpfe_streamoff(struct file *file, void *priv,
1517 enum v4l2_buf_type buf_type)
1518 {
1519 struct vpfe_video_device *video = video_drvdata(file);
1520 struct vpfe_device *vpfe_dev = video->vpfe_dev;
1521 struct vpfe_fh *fh = file->private_data;
1522 int ret = 0;
1523
1524 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
1525
1526 if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1527 buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1528 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "Invalid buf type\n");
1529 return -EINVAL;
1530 }
1531
1532 /* If io is allowed for this file handle, return error */
1533 if (!fh->io_allowed) {
1534 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "fh->io_allowed\n");
1535 return -EACCES;
1536 }
1537
1538 /* If streaming is not started, return error */
1539 if (!video->started) {
1540 v4l2_err(&vpfe_dev->v4l2_dev, "device is not started\n");
1541 return -EINVAL;
1542 }
1543
1544 ret = mutex_lock_interruptible(&video->lock);
1545 if (ret)
1546 return ret;
1547
1548 vpfe_stop_capture(video);
1549 ret = vb2_streamoff(&video->buffer_queue, buf_type);
1550 mutex_unlock(&video->lock);
1551
1552 return ret;
1553 }
1554
1555 /* vpfe capture ioctl operations */
1556 static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
1557 .vidioc_querycap = vpfe_querycap,
1558 .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
1559 .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
1560 .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
1561 .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
1562 .vidioc_g_fmt_vid_out = vpfe_g_fmt,
1563 .vidioc_s_fmt_vid_out = vpfe_s_fmt,
1564 .vidioc_try_fmt_vid_out = vpfe_try_fmt,
1565 .vidioc_enum_fmt_vid_out = vpfe_enum_fmt,
1566 .vidioc_enum_input = vpfe_enum_input,
1567 .vidioc_g_input = vpfe_g_input,
1568 .vidioc_s_input = vpfe_s_input,
1569 .vidioc_querystd = vpfe_querystd,
1570 .vidioc_s_std = vpfe_s_std,
1571 .vidioc_g_std = vpfe_g_std,
1572 .vidioc_enum_dv_timings = vpfe_enum_dv_timings,
1573 .vidioc_query_dv_timings = vpfe_query_dv_timings,
1574 .vidioc_s_dv_timings = vpfe_s_dv_timings,
1575 .vidioc_g_dv_timings = vpfe_g_dv_timings,
1576 .vidioc_reqbufs = vpfe_reqbufs,
1577 .vidioc_querybuf = vpfe_querybuf,
1578 .vidioc_qbuf = vpfe_qbuf,
1579 .vidioc_dqbuf = vpfe_dqbuf,
1580 .vidioc_streamon = vpfe_streamon,
1581 .vidioc_streamoff = vpfe_streamoff,
1582 };
1583
1584 /* VPFE video init function */
vpfe_video_init(struct vpfe_video_device * video,const char * name)1585 int vpfe_video_init(struct vpfe_video_device *video, const char *name)
1586 {
1587 const char *direction;
1588 int ret;
1589
1590 switch (video->type) {
1591 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1592 direction = "output";
1593 video->pad.flags = MEDIA_PAD_FL_SINK;
1594 video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1595 break;
1596
1597 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1598 direction = "input";
1599 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1600 video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
1601 break;
1602
1603 default:
1604 return -EINVAL;
1605 }
1606 /* Initialize field of video device */
1607 mutex_init(&video->lock);
1608 video->video_dev.release = video_device_release;
1609 video->video_dev.fops = &vpfe_fops;
1610 video->video_dev.ioctl_ops = &vpfe_ioctl_ops;
1611 video->video_dev.minor = -1;
1612 video->video_dev.tvnorms = 0;
1613 video->video_dev.lock = &video->lock;
1614 snprintf(video->video_dev.name, sizeof(video->video_dev.name),
1615 "DAVINCI VIDEO %s %s", name, direction);
1616
1617 spin_lock_init(&video->irqlock);
1618 spin_lock_init(&video->dma_queue_lock);
1619 ret = media_entity_pads_init(&video->video_dev.entity,
1620 1, &video->pad);
1621 if (ret < 0)
1622 return ret;
1623
1624 video_set_drvdata(&video->video_dev, video);
1625
1626 return 0;
1627 }
1628
1629 /* vpfe video device register function */
vpfe_video_register(struct vpfe_video_device * video,struct v4l2_device * vdev)1630 int vpfe_video_register(struct vpfe_video_device *video,
1631 struct v4l2_device *vdev)
1632 {
1633 int ret;
1634
1635 video->video_dev.v4l2_dev = vdev;
1636
1637 ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1);
1638 if (ret < 0)
1639 pr_err("%s: could not register video device (%d)\n",
1640 __func__, ret);
1641 return ret;
1642 }
1643
1644 /* vpfe video device unregister function */
vpfe_video_unregister(struct vpfe_video_device * video)1645 void vpfe_video_unregister(struct vpfe_video_device *video)
1646 {
1647 if (video_is_registered(&video->video_dev)) {
1648 video_unregister_device(&video->video_dev);
1649 media_entity_cleanup(&video->video_dev.entity);
1650 }
1651 }
1652