1 /*
2 * V4L2 context helper functions.
3 *
4 * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5 * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <poll.h>
30 #include "libavcodec/avcodec.h"
31 #include "libavcodec/internal.h"
32 #include "v4l2_buffers.h"
33 #include "v4l2_fmt.h"
34 #include "v4l2_m2m.h"
35
36 struct v4l2_format_update {
37 uint32_t v4l2_fmt;
38 int update_v4l2;
39
40 enum AVPixelFormat av_fmt;
41 int update_avfmt;
42 };
43
ctx_to_m2mctx(V4L2Context * ctx)44 static inline V4L2m2mContext *ctx_to_m2mctx(V4L2Context *ctx)
45 {
46 return V4L2_TYPE_IS_OUTPUT(ctx->type) ?
47 container_of(ctx, V4L2m2mContext, output) :
48 container_of(ctx, V4L2m2mContext, capture);
49 }
50
logger(V4L2Context * ctx)51 static inline AVCodecContext *logger(V4L2Context *ctx)
52 {
53 return ctx_to_m2mctx(ctx)->avctx;
54 }
55
v4l2_get_width(struct v4l2_format * fmt)56 static inline unsigned int v4l2_get_width(struct v4l2_format *fmt)
57 {
58 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
59 }
60
v4l2_get_height(struct v4l2_format * fmt)61 static inline unsigned int v4l2_get_height(struct v4l2_format *fmt)
62 {
63 return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
64 }
65
v4l2_get_sar(V4L2Context * ctx)66 static AVRational v4l2_get_sar(V4L2Context *ctx)
67 {
68 struct AVRational sar = { 0, 1 };
69 struct v4l2_cropcap cropcap;
70 int ret;
71
72 memset(&cropcap, 0, sizeof(cropcap));
73 cropcap.type = ctx->type;
74
75 ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_CROPCAP, &cropcap);
76 if (ret)
77 return sar;
78
79 sar.num = cropcap.pixelaspect.numerator;
80 sar.den = cropcap.pixelaspect.denominator;
81 return sar;
82 }
83
v4l2_resolution_changed(V4L2Context * ctx,struct v4l2_format * fmt2)84 static inline unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
85 {
86 struct v4l2_format *fmt1 = &ctx->format;
87 int ret = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
88 fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
89 fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
90 :
91 fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
92 fmt1->fmt.pix.height != fmt2->fmt.pix.height;
93
94 if (ret)
95 av_log(logger(ctx), AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n",
96 ctx->name,
97 v4l2_get_width(fmt1), v4l2_get_height(fmt1),
98 v4l2_get_width(fmt2), v4l2_get_height(fmt2));
99
100 return ret;
101 }
102
v4l2_type_supported(V4L2Context * ctx)103 static inline int v4l2_type_supported(V4L2Context *ctx)
104 {
105 return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
106 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
107 ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
108 ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
109 }
110
v4l2_get_framesize_compressed(V4L2Context * ctx,int width,int height)111 static inline int v4l2_get_framesize_compressed(V4L2Context* ctx, int width, int height)
112 {
113 V4L2m2mContext *s = ctx_to_m2mctx(ctx);
114 const int SZ_4K = 0x1000;
115 int size;
116
117 if (s->avctx && av_codec_is_decoder(s->avctx->codec))
118 return ((width * height * 3 / 2) / 2) + 128;
119
120 /* encoder */
121 size = FFALIGN(height, 32) * FFALIGN(width, 32) * 3 / 2 / 2;
122 return FFALIGN(size, SZ_4K);
123 }
124
v4l2_save_to_context(V4L2Context * ctx,struct v4l2_format_update * fmt)125 static inline void v4l2_save_to_context(V4L2Context* ctx, struct v4l2_format_update *fmt)
126 {
127 ctx->format.type = ctx->type;
128
129 if (fmt->update_avfmt)
130 ctx->av_pix_fmt = fmt->av_fmt;
131
132 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
133 /* update the sizes to handle the reconfiguration of the capture stream at runtime */
134 ctx->format.fmt.pix_mp.height = ctx->height;
135 ctx->format.fmt.pix_mp.width = ctx->width;
136 if (fmt->update_v4l2) {
137 ctx->format.fmt.pix_mp.pixelformat = fmt->v4l2_fmt;
138
139 /* s5p-mfc requires the user to specify a buffer size */
140 ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
141 v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
142 }
143 } else {
144 ctx->format.fmt.pix.height = ctx->height;
145 ctx->format.fmt.pix.width = ctx->width;
146 if (fmt->update_v4l2) {
147 ctx->format.fmt.pix.pixelformat = fmt->v4l2_fmt;
148
149 /* s5p-mfc requires the user to specify a buffer size */
150 ctx->format.fmt.pix.sizeimage =
151 v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
152 }
153 }
154 }
155
156 /**
157 * handle resolution change event and end of stream event
158 * returns 1 if reinit was successful, negative if it failed
159 * returns 0 if reinit was not executed
160 */
v4l2_handle_event(V4L2Context * ctx)161 static int v4l2_handle_event(V4L2Context *ctx)
162 {
163 V4L2m2mContext *s = ctx_to_m2mctx(ctx);
164 struct v4l2_format cap_fmt = s->capture.format;
165 struct v4l2_format out_fmt = s->output.format;
166 struct v4l2_event evt = { 0 };
167 int full_reinit, reinit, ret;
168
169 ret = ioctl(s->fd, VIDIOC_DQEVENT, &evt);
170 if (ret < 0) {
171 av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name);
172 return 0;
173 }
174
175 if (evt.type == V4L2_EVENT_EOS) {
176 ctx->done = 1;
177 return 0;
178 }
179
180 if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
181 return 0;
182
183 ret = ioctl(s->fd, VIDIOC_G_FMT, &out_fmt);
184 if (ret) {
185 av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->output.name);
186 return 0;
187 }
188
189 ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt);
190 if (ret) {
191 av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->capture.name);
192 return 0;
193 }
194
195 full_reinit = v4l2_resolution_changed(&s->output, &out_fmt);
196 if (full_reinit) {
197 s->output.height = v4l2_get_height(&out_fmt);
198 s->output.width = v4l2_get_width(&out_fmt);
199 s->output.sample_aspect_ratio = v4l2_get_sar(&s->output);
200 }
201
202 reinit = v4l2_resolution_changed(&s->capture, &cap_fmt);
203 if (reinit) {
204 s->capture.height = v4l2_get_height(&cap_fmt);
205 s->capture.width = v4l2_get_width(&cap_fmt);
206 s->capture.sample_aspect_ratio = v4l2_get_sar(&s->capture);
207 }
208
209 if (full_reinit || reinit)
210 s->reinit = 1;
211
212 if (full_reinit) {
213 ret = ff_v4l2_m2m_codec_full_reinit(s);
214 if (ret) {
215 av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_full_reinit\n");
216 return AVERROR(EINVAL);
217 }
218 goto reinit_run;
219 }
220
221 if (reinit) {
222 if (s->avctx)
223 ret = ff_set_dimensions(s->avctx, s->capture.width, s->capture.height);
224 if (ret < 0)
225 av_log(logger(ctx), AV_LOG_WARNING, "update avcodec height and width\n");
226
227 ret = ff_v4l2_m2m_codec_reinit(s);
228 if (ret) {
229 av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_reinit\n");
230 return AVERROR(EINVAL);
231 }
232 goto reinit_run;
233 }
234
235 /* dummy event received */
236 return 0;
237
238 /* reinit executed */
239 reinit_run:
240 return 1;
241 }
242
v4l2_stop_decode(V4L2Context * ctx)243 static int v4l2_stop_decode(V4L2Context *ctx)
244 {
245 struct v4l2_decoder_cmd cmd = {
246 .cmd = V4L2_DEC_CMD_STOP,
247 .flags = 0,
248 };
249 int ret;
250
251 ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DECODER_CMD, &cmd);
252 if (ret) {
253 /* DECODER_CMD is optional */
254 if (errno == ENOTTY)
255 return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
256 else
257 return AVERROR(errno);
258 }
259
260 return 0;
261 }
262
v4l2_stop_encode(V4L2Context * ctx)263 static int v4l2_stop_encode(V4L2Context *ctx)
264 {
265 struct v4l2_encoder_cmd cmd = {
266 .cmd = V4L2_ENC_CMD_STOP,
267 .flags = 0,
268 };
269 int ret;
270
271 ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENCODER_CMD, &cmd);
272 if (ret) {
273 /* ENCODER_CMD is optional */
274 if (errno == ENOTTY)
275 return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
276 else
277 return AVERROR(errno);
278 }
279
280 return 0;
281 }
282
v4l2_dequeue_v4l2buf(V4L2Context * ctx,int timeout)283 static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
284 {
285 struct v4l2_plane planes[VIDEO_MAX_PLANES];
286 struct v4l2_buffer buf = { 0 };
287 V4L2Buffer *avbuf;
288 struct pollfd pfd = {
289 .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
290 .fd = ctx_to_m2mctx(ctx)->fd,
291 };
292 int i, ret;
293
294 if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx->buffers) {
295 for (i = 0; i < ctx->num_buffers; i++) {
296 if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
297 break;
298 }
299 if (i == ctx->num_buffers)
300 av_log(logger(ctx), AV_LOG_WARNING, "All capture buffers returned to "
301 "userspace. Increase num_capture_buffers "
302 "to prevent device deadlock or dropped "
303 "packets/frames.\n");
304 }
305
306 /* if we are draining and there are no more capture buffers queued in the driver we are done */
307 if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
308 for (i = 0; i < ctx->num_buffers; i++) {
309 /* capture buffer initialization happens during decode hence
310 * detection happens at runtime
311 */
312 if (!ctx->buffers)
313 break;
314
315 if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
316 goto start;
317 }
318 ctx->done = 1;
319 return NULL;
320 }
321
322 start:
323 if (V4L2_TYPE_IS_OUTPUT(ctx->type))
324 pfd.events = POLLOUT | POLLWRNORM;
325 else {
326 /* no need to listen to requests for more input while draining */
327 if (ctx_to_m2mctx(ctx)->draining)
328 pfd.events = POLLIN | POLLRDNORM | POLLPRI;
329 }
330
331 for (;;) {
332 ret = poll(&pfd, 1, timeout);
333 if (ret > 0)
334 break;
335 if (errno == EINTR)
336 continue;
337 return NULL;
338 }
339
340 /* 0. handle errors */
341 if (pfd.revents & POLLERR) {
342 /* if we are trying to get free buffers but none have been queued yet
343 no need to raise a warning */
344 if (timeout == 0) {
345 for (i = 0; i < ctx->num_buffers; i++) {
346 if (ctx->buffers[i].status != V4L2BUF_AVAILABLE)
347 av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
348 }
349 }
350 else
351 av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
352
353 return NULL;
354 }
355
356 /* 1. handle resolution changes */
357 if (pfd.revents & POLLPRI) {
358 ret = v4l2_handle_event(ctx);
359 if (ret < 0) {
360 /* if re-init failed, abort */
361 ctx->done = 1;
362 return NULL;
363 }
364 if (ret) {
365 /* if re-init was successful drop the buffer (if there was one)
366 * since we had to reconfigure capture (unmap all buffers)
367 */
368 return NULL;
369 }
370 }
371
372 /* 2. dequeue the buffer */
373 if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
374
375 if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
376 /* there is a capture buffer ready */
377 if (pfd.revents & (POLLIN | POLLRDNORM))
378 goto dequeue;
379
380 /* the driver is ready to accept more input; instead of waiting for the capture
381 * buffer to complete we return NULL so input can proceed (we are single threaded)
382 */
383 if (pfd.revents & (POLLOUT | POLLWRNORM))
384 return NULL;
385 }
386
387 dequeue:
388 memset(&buf, 0, sizeof(buf));
389 buf.memory = V4L2_MEMORY_MMAP;
390 buf.type = ctx->type;
391 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
392 memset(planes, 0, sizeof(planes));
393 buf.length = VIDEO_MAX_PLANES;
394 buf.m.planes = planes;
395 }
396
397 ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
398 if (ret) {
399 if (errno != EAGAIN) {
400 ctx->done = 1;
401 if (errno != EPIPE)
402 av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
403 ctx->name, av_err2str(AVERROR(errno)));
404 }
405 return NULL;
406 }
407
408 if (ctx_to_m2mctx(ctx)->draining && !V4L2_TYPE_IS_OUTPUT(ctx->type)) {
409 int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ?
410 buf.m.planes[0].bytesused : buf.bytesused;
411 if (bytesused == 0) {
412 ctx->done = 1;
413 return NULL;
414 }
415 #ifdef V4L2_BUF_FLAG_LAST
416 if (buf.flags & V4L2_BUF_FLAG_LAST)
417 ctx->done = 1;
418 #endif
419 }
420
421 avbuf = &ctx->buffers[buf.index];
422 avbuf->status = V4L2BUF_AVAILABLE;
423 avbuf->buf = buf;
424 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
425 memcpy(avbuf->planes, planes, sizeof(planes));
426 avbuf->buf.m.planes = avbuf->planes;
427 }
428 return avbuf;
429 }
430
431 return NULL;
432 }
433
v4l2_getfree_v4l2buf(V4L2Context * ctx)434 static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
435 {
436 int timeout = 0; /* return when no more buffers to dequeue */
437 int i;
438
439 /* get back as many output buffers as possible */
440 if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
441 do {
442 } while (v4l2_dequeue_v4l2buf(ctx, timeout));
443 }
444
445 for (i = 0; i < ctx->num_buffers; i++) {
446 if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
447 return &ctx->buffers[i];
448 }
449
450 return NULL;
451 }
452
v4l2_release_buffers(V4L2Context * ctx)453 static int v4l2_release_buffers(V4L2Context* ctx)
454 {
455 struct v4l2_requestbuffers req = {
456 .memory = V4L2_MEMORY_MMAP,
457 .type = ctx->type,
458 .count = 0, /* 0 -> unmaps buffers from the driver */
459 };
460 int i, j;
461
462 for (i = 0; i < ctx->num_buffers; i++) {
463 V4L2Buffer *buffer = &ctx->buffers[i];
464
465 for (j = 0; j < buffer->num_planes; j++) {
466 struct V4L2Plane_info *p = &buffer->plane_info[j];
467 if (p->mm_addr && p->length)
468 if (munmap(p->mm_addr, p->length) < 0)
469 av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno)));
470 }
471 }
472
473 return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
474 }
475
v4l2_try_raw_format(V4L2Context * ctx,enum AVPixelFormat pixfmt)476 static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfmt)
477 {
478 struct v4l2_format *fmt = &ctx->format;
479 uint32_t v4l2_fmt;
480 int ret;
481
482 v4l2_fmt = ff_v4l2_format_avfmt_to_v4l2(pixfmt);
483 if (!v4l2_fmt)
484 return AVERROR(EINVAL);
485
486 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type))
487 fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
488 else
489 fmt->fmt.pix.pixelformat = v4l2_fmt;
490
491 fmt->type = ctx->type;
492
493 ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, fmt);
494 if (ret)
495 return AVERROR(EINVAL);
496
497 return 0;
498 }
499
v4l2_get_raw_format(V4L2Context * ctx,enum AVPixelFormat * p)500 static int v4l2_get_raw_format(V4L2Context* ctx, enum AVPixelFormat *p)
501 {
502 enum AVPixelFormat pixfmt = ctx->av_pix_fmt;
503 struct v4l2_fmtdesc fdesc;
504 int ret;
505
506 memset(&fdesc, 0, sizeof(fdesc));
507 fdesc.type = ctx->type;
508
509 if (pixfmt != AV_PIX_FMT_NONE) {
510 ret = v4l2_try_raw_format(ctx, pixfmt);
511 if (!ret)
512 return 0;
513 }
514
515 for (;;) {
516 ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
517 if (ret)
518 return AVERROR(EINVAL);
519
520 pixfmt = ff_v4l2_format_v4l2_to_avfmt(fdesc.pixelformat, AV_CODEC_ID_RAWVIDEO);
521 ret = v4l2_try_raw_format(ctx, pixfmt);
522 if (ret){
523 fdesc.index++;
524 continue;
525 }
526
527 *p = pixfmt;
528
529 return 0;
530 }
531
532 return AVERROR(EINVAL);
533 }
534
v4l2_get_coded_format(V4L2Context * ctx,uint32_t * p)535 static int v4l2_get_coded_format(V4L2Context* ctx, uint32_t *p)
536 {
537 struct v4l2_fmtdesc fdesc;
538 uint32_t v4l2_fmt;
539 int ret;
540
541 /* translate to a valid v4l2 format */
542 v4l2_fmt = ff_v4l2_format_avcodec_to_v4l2(ctx->av_codec_id);
543 if (!v4l2_fmt)
544 return AVERROR(EINVAL);
545
546 /* check if the driver supports this format */
547 memset(&fdesc, 0, sizeof(fdesc));
548 fdesc.type = ctx->type;
549
550 for (;;) {
551 ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
552 if (ret)
553 return AVERROR(EINVAL);
554
555 if (fdesc.pixelformat == v4l2_fmt)
556 break;
557
558 fdesc.index++;
559 }
560
561 *p = v4l2_fmt;
562
563 return 0;
564 }
565
566 /*****************************************************************************
567 *
568 * V4L2 Context Interface
569 *
570 *****************************************************************************/
571
ff_v4l2_context_set_status(V4L2Context * ctx,uint32_t cmd)572 int ff_v4l2_context_set_status(V4L2Context* ctx, uint32_t cmd)
573 {
574 int type = ctx->type;
575 int ret;
576
577 ret = ioctl(ctx_to_m2mctx(ctx)->fd, cmd, &type);
578 if (ret < 0)
579 return AVERROR(errno);
580
581 ctx->streamon = (cmd == VIDIOC_STREAMON);
582
583 return 0;
584 }
585
ff_v4l2_context_enqueue_frame(V4L2Context * ctx,const AVFrame * frame)586 int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* frame)
587 {
588 V4L2m2mContext *s = ctx_to_m2mctx(ctx);
589 V4L2Buffer* avbuf;
590 int ret;
591
592 if (!frame) {
593 ret = v4l2_stop_encode(ctx);
594 if (ret)
595 av_log(logger(ctx), AV_LOG_ERROR, "%s stop_encode\n", ctx->name);
596 s->draining= 1;
597 return 0;
598 }
599
600 avbuf = v4l2_getfree_v4l2buf(ctx);
601 if (!avbuf)
602 return AVERROR(EAGAIN);
603
604 ret = ff_v4l2_buffer_avframe_to_buf(frame, avbuf);
605 if (ret)
606 return ret;
607
608 return ff_v4l2_buffer_enqueue(avbuf);
609 }
610
ff_v4l2_context_enqueue_packet(V4L2Context * ctx,const AVPacket * pkt)611 int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt)
612 {
613 V4L2m2mContext *s = ctx_to_m2mctx(ctx);
614 V4L2Buffer* avbuf;
615 int ret;
616
617 if (!pkt->size) {
618 ret = v4l2_stop_decode(ctx);
619 if (ret)
620 av_log(logger(ctx), AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
621 s->draining = 1;
622 return 0;
623 }
624
625 avbuf = v4l2_getfree_v4l2buf(ctx);
626 if (!avbuf)
627 return AVERROR(EAGAIN);
628
629 ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
630 if (ret)
631 return ret;
632
633 return ff_v4l2_buffer_enqueue(avbuf);
634 }
635
ff_v4l2_context_dequeue_frame(V4L2Context * ctx,AVFrame * frame,int timeout)636 int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame, int timeout)
637 {
638 V4L2Buffer *avbuf;
639
640 /*
641 * timeout=-1 blocks until:
642 * 1. decoded frame available
643 * 2. an input buffer is ready to be dequeued
644 */
645 avbuf = v4l2_dequeue_v4l2buf(ctx, timeout);
646 if (!avbuf) {
647 if (ctx->done)
648 return AVERROR_EOF;
649
650 return AVERROR(EAGAIN);
651 }
652
653 return ff_v4l2_buffer_buf_to_avframe(frame, avbuf);
654 }
655
ff_v4l2_context_dequeue_packet(V4L2Context * ctx,AVPacket * pkt)656 int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt)
657 {
658 V4L2Buffer *avbuf;
659
660 /*
661 * blocks until:
662 * 1. encoded packet available
663 * 2. an input buffer ready to be dequeued
664 */
665 avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
666 if (!avbuf) {
667 if (ctx->done)
668 return AVERROR_EOF;
669
670 return AVERROR(EAGAIN);
671 }
672
673 return ff_v4l2_buffer_buf_to_avpkt(pkt, avbuf);
674 }
675
ff_v4l2_context_get_format(V4L2Context * ctx,int probe)676 int ff_v4l2_context_get_format(V4L2Context* ctx, int probe)
677 {
678 struct v4l2_format_update fmt = { 0 };
679 int ret;
680
681 if (ctx->av_codec_id == AV_CODEC_ID_RAWVIDEO) {
682 ret = v4l2_get_raw_format(ctx, &fmt.av_fmt);
683 if (ret)
684 return ret;
685
686 fmt.update_avfmt = !probe;
687 v4l2_save_to_context(ctx, &fmt);
688
689 /* format has been tried already */
690 return ret;
691 }
692
693 ret = v4l2_get_coded_format(ctx, &fmt.v4l2_fmt);
694 if (ret)
695 return ret;
696
697 fmt.update_v4l2 = 1;
698 v4l2_save_to_context(ctx, &fmt);
699
700 return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, &ctx->format);
701 }
702
ff_v4l2_context_set_format(V4L2Context * ctx)703 int ff_v4l2_context_set_format(V4L2Context* ctx)
704 {
705 return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
706 }
707
ff_v4l2_context_release(V4L2Context * ctx)708 void ff_v4l2_context_release(V4L2Context* ctx)
709 {
710 int ret;
711
712 if (!ctx->buffers)
713 return;
714
715 ret = v4l2_release_buffers(ctx);
716 if (ret)
717 av_log(logger(ctx), AV_LOG_WARNING, "V4L2 failed to unmap the %s buffers\n", ctx->name);
718
719 av_freep(&ctx->buffers);
720 }
721
ff_v4l2_context_init(V4L2Context * ctx)722 int ff_v4l2_context_init(V4L2Context* ctx)
723 {
724 V4L2m2mContext *s = ctx_to_m2mctx(ctx);
725 struct v4l2_requestbuffers req;
726 int ret, i;
727
728 if (!v4l2_type_supported(ctx)) {
729 av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type);
730 return AVERROR_PATCHWELCOME;
731 }
732
733 ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
734 if (ret)
735 av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);
736
737 memset(&req, 0, sizeof(req));
738 req.count = ctx->num_buffers;
739 req.memory = V4L2_MEMORY_MMAP;
740 req.type = ctx->type;
741 ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
742 if (ret < 0) {
743 av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, strerror(errno));
744 return AVERROR(errno);
745 }
746
747 ctx->num_buffers = req.count;
748 ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
749 if (!ctx->buffers) {
750 av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
751 return AVERROR(ENOMEM);
752 }
753
754 for (i = 0; i < req.count; i++) {
755 ctx->buffers[i].context = ctx;
756 ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
757 if (ret < 0) {
758 av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization (%s)\n", ctx->name, i, av_err2str(ret));
759 goto error;
760 }
761 }
762
763 av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
764 V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
765 req.count,
766 v4l2_get_width(&ctx->format),
767 v4l2_get_height(&ctx->format),
768 V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
769 V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline);
770
771 return 0;
772
773 error:
774 v4l2_release_buffers(ctx);
775
776 av_freep(&ctx->buffers);
777
778 return ret;
779 }
780