1 /*
2 # (C) 2008 Hans de Goede <hdegoede@redhat.com>
3
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU Lesser General Public License as published by
6 # the Free Software Foundation; either version 2.1 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 # Lesser General Public License for more details.
13 #
14 # You should have received a copy of the GNU Lesser General Public License
15 # along with this program; if not, write to the Free Software
16 # Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
17 */
18
19 /* MAKING CHANGES TO THIS FILE?? READ THIS FIRST!!!
20
21 This file implements libv4l2, which offers v4l2_ prefixed versions of
22 open/close/etc. The API is 100% the same as directly opening /dev/videoX
23 using regular open/close/etc, the big difference is that format conversion
24 is done if necessary when capturing. That is if you (try to) set a capture
25 format which is not supported by the cam, but is supported by libv4lconvert,
26 then the try_fmt / set_fmt will succeed as if the cam supports the format
27 and on dqbuf / read the data will be converted for you and returned in
28 the request format.
29
30 Important note to people making changes to this file: All functions
31 (v4l2_close, v4l2_ioctl, etc.) are designed to function as their regular
32 counterpart when they get passed a fd that is not "registered" by libv4l2,
33 there are 2 reasons for this:
34 1) This allows us to get completely out of the way when dealing with non
35 capture devices.
36 2) libv4l2 is the base of the v4l2convert.so wrapper lib, which is a .so
37 which can be LD_PRELOAD-ed and the overrules the libc's open/close/etc,
38 and when opening /dev/videoX or /dev/v4l/ calls v4l2_open. Because we
39 behave as the regular counterpart when the fd is not known (instead of say
40 throwing an error), v4l2convert.so can simply call the v4l2_ prefixed
41 function for all wrapped functions (except for v4l2_open which will fail
42 when not called on a v4l2 device). This way the wrapper does not have to
43 keep track of which fd's are being handled by libv4l2, as libv4l2 already
44 keeps track of this itself.
45
46 This also means that libv4l2 may not use any of the regular functions
47 it mimics, as for example open could be a symbol in v4l2convert.so, which
48 in turn will call v4l2_open, so therefore v4l2_open (for example) may not
49 use the regular open()!
50
51 Another important note: libv4l2 does conversion for capture usage only, if
52 any calls are made which are passed a v4l2_buffer or v4l2_format with a
53 v4l2_buf_type which is different from V4L2_BUF_TYPE_VIDEO_CAPTURE, then
54 the v4l2_ methods behave exactly the same as their regular counterparts.
55 When modifications are made, one should be careful that this behavior is
56 preserved.
57 */
58 #include <errno.h>
59 #include <stdarg.h>
60 #include <stdio.h>
61 #include <stdlib.h>
62 #include <fcntl.h>
63 #include <string.h>
64 #include <unistd.h>
65 #include <sys/types.h>
66 #include <sys/mman.h>
67 #include <sys/stat.h>
68 #include "libv4l2.h"
69 #include "libv4l2-priv.h"
70 #include "libv4l-plugin.h"
71
72 /* Note these flags are stored together with the flags passed to v4l2_fd_open()
73 in v4l2_dev_info's flags member, so care should be taken that the do not
74 use the same bits! */
75 #define V4L2_STREAMON 0x0100
76 #define V4L2_BUFFERS_REQUESTED_BY_READ 0x0200
77 #define V4L2_STREAM_CONTROLLED_BY_READ 0x0400
78 #define V4L2_SUPPORTS_READ 0x0800
79 #define V4L2_STREAM_TOUCHED 0x1000
80 #define V4L2_USE_READ_FOR_READ 0x2000
81 #define V4L2_SUPPORTS_TIMEPERFRAME 0x4000
82
83 #define V4L2_MMAP_OFFSET_MAGIC 0xABCDEF00u
84
85 static void v4l2_adjust_src_fmt_to_fps(int index, int fps);
86 static void v4l2_set_src_and_dest_format(int index,
87 struct v4l2_format *src_fmt, struct v4l2_format *dest_fmt);
88
89 static pthread_mutex_t v4l2_open_mutex = PTHREAD_MUTEX_INITIALIZER;
90 static struct v4l2_dev_info devices[V4L2_MAX_DEVICES] = {
91 { .fd = -1 },
92 { .fd = -1 }, { .fd = -1 }, { .fd = -1 }, { .fd = -1 }, { .fd = -1 },
93 { .fd = -1 }, { .fd = -1 }, { .fd = -1 }, { .fd = -1 }, { .fd = -1 },
94 { .fd = -1 }, { .fd = -1 }, { .fd = -1 }, { .fd = -1 }, { .fd = -1 }
95 };
96 static int devices_used;
97
v4l2_ensure_convert_mmap_buf(int index)98 static int v4l2_ensure_convert_mmap_buf(int index)
99 {
100 if (devices[index].convert_mmap_buf != MAP_FAILED) {
101 return 0;
102 }
103
104 devices[index].convert_mmap_buf_size =
105 devices[index].convert_mmap_frame_size * devices[index].no_frames;
106
107 devices[index].convert_mmap_buf = (void *)SYS_MMAP(NULL,
108 devices[index].convert_mmap_buf_size,
109 PROT_READ | PROT_WRITE,
110 MAP_ANONYMOUS | MAP_PRIVATE,
111 -1, 0);
112
113 if (devices[index].convert_mmap_buf == MAP_FAILED) {
114 devices[index].convert_mmap_buf_size = 0;
115
116 int saved_err = errno;
117 V4L2_LOG_ERR("allocating conversion buffer\n");
118 errno = saved_err;
119 return -1;
120 }
121
122 return 0;
123 }
124
v4l2_request_read_buffers(int index)125 static int v4l2_request_read_buffers(int index)
126 {
127 int result;
128 struct v4l2_requestbuffers req;
129
130 /* Note we re-request the buffers if they are already requested as the format
131 and thus the needed buffer size may have changed. */
132 req.count = (devices[index].no_frames) ? devices[index].no_frames :
133 devices[index].nreadbuffers;
134 req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
135 req.memory = V4L2_MEMORY_MMAP;
136 result = devices[index].dev_ops->ioctl(devices[index].dev_ops_priv,
137 devices[index].fd, VIDIOC_REQBUFS, &req);
138 if (result < 0) {
139 int saved_err = errno;
140
141 V4L2_LOG("warning reqbuf (%u) failed: %s\n", req.count, strerror(errno));
142 errno = saved_err;
143 return result;
144 }
145
146 if (!devices[index].no_frames && req.count)
147 devices[index].flags |= V4L2_BUFFERS_REQUESTED_BY_READ;
148
149 devices[index].no_frames = MIN(req.count, V4L2_MAX_NO_FRAMES);
150 return 0;
151 }
152
v4l2_unrequest_read_buffers(int index)153 static void v4l2_unrequest_read_buffers(int index)
154 {
155 struct v4l2_requestbuffers req;
156
157 if (!(devices[index].flags & V4L2_BUFFERS_REQUESTED_BY_READ) ||
158 devices[index].no_frames == 0)
159 return;
160
161 /* (Un)Request buffers, note not all driver support this, and those
162 who do not support it don't need it. */
163 req.count = 0;
164 req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
165 req.memory = V4L2_MEMORY_MMAP;
166 if (devices[index].dev_ops->ioctl(devices[index].dev_ops_priv,
167 devices[index].fd, VIDIOC_REQBUFS, &req) < 0)
168 return;
169
170 devices[index].no_frames = MIN(req.count, V4L2_MAX_NO_FRAMES);
171 if (devices[index].no_frames == 0)
172 devices[index].flags &= ~V4L2_BUFFERS_REQUESTED_BY_READ;
173 }
174
v4l2_map_buffers(int index)175 static int v4l2_map_buffers(int index)
176 {
177 int result = 0;
178 unsigned int i;
179 struct v4l2_buffer buf;
180
181 for (i = 0; i < devices[index].no_frames; i++) {
182 if (devices[index].frame_pointers[i] != MAP_FAILED)
183 continue;
184
185 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
186 buf.memory = V4L2_MEMORY_MMAP;
187 buf.index = i;
188 buf.reserved = buf.reserved2 = 0;
189 result = devices[index].dev_ops->ioctl(
190 devices[index].dev_ops_priv,
191 devices[index].fd, VIDIOC_QUERYBUF, &buf);
192 if (result) {
193 int saved_err = errno;
194
195 V4L2_PERROR("querying buffer %u", i);
196 errno = saved_err;
197 break;
198 }
199
200 devices[index].frame_pointers[i] = (void *)SYS_MMAP(NULL,
201 (size_t)buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, devices[index].fd,
202 buf.m.offset);
203 if (devices[index].frame_pointers[i] == MAP_FAILED) {
204 int saved_err = errno;
205
206 V4L2_PERROR("mmapping buffer %u", i);
207 errno = saved_err;
208 result = -1;
209 break;
210 }
211 V4L2_LOG("mapped buffer %u at %p\n", i,
212 devices[index].frame_pointers[i]);
213
214 devices[index].frame_sizes[i] = buf.length;
215 }
216
217 return result;
218 }
219
v4l2_unmap_buffers(int index)220 static void v4l2_unmap_buffers(int index)
221 {
222 unsigned int i;
223
224 /* unmap the buffers */
225 for (i = 0; i < devices[index].no_frames; i++) {
226 if (devices[index].frame_pointers[i] != MAP_FAILED) {
227 SYS_MUNMAP(devices[index].frame_pointers[i],
228 devices[index].frame_sizes[i]);
229 devices[index].frame_pointers[i] = MAP_FAILED;
230 V4L2_LOG("unmapped buffer %u\n", i);
231 }
232 }
233 }
234
v4l2_streamon(int index)235 static int v4l2_streamon(int index)
236 {
237 int result;
238 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
239
240 if (!(devices[index].flags & V4L2_STREAMON)) {
241 result = devices[index].dev_ops->ioctl(
242 devices[index].dev_ops_priv,
243 devices[index].fd, VIDIOC_STREAMON, &type);
244 if (result) {
245 int saved_err = errno;
246
247 V4L2_PERROR("turning on stream");
248 errno = saved_err;
249 return result;
250 }
251 devices[index].flags |= V4L2_STREAMON;
252 devices[index].first_frame = V4L2_IGNORE_FIRST_FRAME_ERRORS;
253 }
254
255 return 0;
256 }
257
v4l2_streamoff(int index)258 static int v4l2_streamoff(int index)
259 {
260 int result;
261 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
262
263 if (devices[index].flags & V4L2_STREAMON) {
264 result = devices[index].dev_ops->ioctl(
265 devices[index].dev_ops_priv,
266 devices[index].fd, VIDIOC_STREAMOFF, &type);
267 if (result) {
268 int saved_err = errno;
269
270 V4L2_PERROR("turning off stream");
271 errno = saved_err;
272 return result;
273 }
274 devices[index].flags &= ~V4L2_STREAMON;
275
276 /* Stream off also dequeues all our buffers! */
277 devices[index].frame_queued = 0;
278 }
279
280 return 0;
281 }
282
v4l2_queue_read_buffer(int index,int buffer_index)283 static int v4l2_queue_read_buffer(int index, int buffer_index)
284 {
285 int result;
286 struct v4l2_buffer buf;
287
288 if (devices[index].frame_queued & (1 << buffer_index))
289 return 0;
290
291 memset(&buf, 0, sizeof(buf));
292 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
293 buf.memory = V4L2_MEMORY_MMAP;
294 buf.index = buffer_index;
295 result = devices[index].dev_ops->ioctl(devices[index].dev_ops_priv,
296 devices[index].fd, VIDIOC_QBUF, &buf);
297 if (result) {
298 int saved_err = errno;
299
300 V4L2_PERROR("queuing buf %d", buffer_index);
301 errno = saved_err;
302 return result;
303 }
304
305 devices[index].frame_queued |= 1 << buffer_index;
306 return 0;
307 }
308
v4l2_dequeue_and_convert(int index,struct v4l2_buffer * buf,unsigned char * dest,int dest_size)309 static int v4l2_dequeue_and_convert(int index, struct v4l2_buffer *buf,
310 unsigned char *dest, int dest_size)
311 {
312 const int max_tries = V4L2_IGNORE_FIRST_FRAME_ERRORS + 1;
313 int result, tries = max_tries, frame_info_gen;
314
315 /* Make sure we have the real v4l2 buffers mapped */
316 result = v4l2_map_buffers(index);
317 if (result)
318 return result;
319
320 do {
321 frame_info_gen = devices[index].frame_info_generation;
322 pthread_mutex_unlock(&devices[index].stream_lock);
323 result = devices[index].dev_ops->ioctl(
324 devices[index].dev_ops_priv,
325 devices[index].fd, VIDIOC_DQBUF, buf);
326 pthread_mutex_lock(&devices[index].stream_lock);
327 if (result) {
328 if (errno != EAGAIN) {
329 int saved_err = errno;
330
331 V4L2_PERROR("dequeuing buf");
332 errno = saved_err;
333 }
334 return result;
335 }
336
337 devices[index].frame_queued &= ~(1 << buf->index);
338
339 if (frame_info_gen != devices[index].frame_info_generation) {
340 errno = -EINVAL;
341 return -1;
342 }
343
344 result = v4lconvert_convert(devices[index].convert,
345 &devices[index].src_fmt, &devices[index].dest_fmt,
346 devices[index].frame_pointers[buf->index],
347 buf->bytesused, dest ? dest : (devices[index].convert_mmap_buf +
348 buf->index * devices[index].convert_mmap_frame_size),
349 dest_size);
350
351 if (devices[index].first_frame) {
352 /* Always treat convert errors as EAGAIN during the first few frames, as
353 some cams produce bad frames at the start of the stream
354 (hsync and vsync still syncing ??). */
355 if (result < 0)
356 errno = EAGAIN;
357 devices[index].first_frame--;
358 }
359
360 if (result < 0) {
361 int saved_err = errno;
362
363 if (errno == EAGAIN || errno == EPIPE)
364 V4L2_LOG("warning error while converting frame data: %s",
365 v4lconvert_get_error_message(devices[index].convert));
366 else
367 V4L2_LOG_ERR("converting / decoding frame data: %s",
368 v4lconvert_get_error_message(devices[index].convert));
369
370 /*
371 * If this is the last try, and the frame is short
372 * we will return the (short) buffer to the caller,
373 * so we must not re-queue it then!
374 */
375 if (!(tries == 1 && errno == EPIPE))
376 v4l2_queue_read_buffer(index, buf->index);
377 errno = saved_err;
378 }
379 tries--;
380 } while (result < 0 && (errno == EAGAIN || errno == EPIPE) && tries);
381
382 if (result < 0 && errno == EAGAIN) {
383 V4L2_LOG_ERR("got %d consecutive frame decode errors, last error: %s",
384 max_tries, v4lconvert_get_error_message(devices[index].convert));
385 errno = EIO;
386 }
387
388 if (result < 0 && errno == EPIPE) {
389 V4L2_LOG("got %d consecutive short frame errors, "
390 "returning short frame", max_tries);
391 result = devices[index].dest_fmt.fmt.pix.sizeimage;
392 errno = 0;
393 }
394
395 return result;
396 }
397
v4l2_read_and_convert(int index,unsigned char * dest,int dest_size)398 static int v4l2_read_and_convert(int index, unsigned char *dest, int dest_size)
399 {
400 const int max_tries = V4L2_IGNORE_FIRST_FRAME_ERRORS + 1;
401 int result, buf_size, tries = max_tries;
402
403 buf_size = devices[index].dest_fmt.fmt.pix.sizeimage;
404
405 if (devices[index].readbuf_size < buf_size) {
406 unsigned char *new_buf;
407
408 new_buf = realloc(devices[index].readbuf, buf_size);
409 if (!new_buf)
410 return -1;
411
412 devices[index].readbuf = new_buf;
413 devices[index].readbuf_size = buf_size;
414 }
415
416 do {
417 result = devices[index].dev_ops->read(
418 devices[index].dev_ops_priv,
419 devices[index].fd, devices[index].readbuf,
420 buf_size);
421 if (result <= 0) {
422 if (result && errno != EAGAIN) {
423 int saved_err = errno;
424
425 V4L2_PERROR("reading");
426 errno = saved_err;
427 }
428 return result;
429 }
430
431 result = v4lconvert_convert(devices[index].convert,
432 &devices[index].src_fmt, &devices[index].dest_fmt,
433 devices[index].readbuf, result, dest, dest_size);
434
435 if (devices[index].first_frame) {
436 /* Always treat convert errors as EAGAIN during the first few frames, as
437 some cams produce bad frames at the start of the stream
438 (hsync and vsync still syncing ??). */
439 if (result < 0)
440 errno = EAGAIN;
441 devices[index].first_frame--;
442 }
443
444 if (result < 0) {
445 int saved_err = errno;
446
447 if (errno == EAGAIN || errno == EPIPE)
448 V4L2_LOG("warning error while converting frame data: %s",
449 v4lconvert_get_error_message(devices[index].convert));
450 else
451 V4L2_LOG_ERR("converting / decoding frame data: %s",
452 v4lconvert_get_error_message(devices[index].convert));
453
454 errno = saved_err;
455 }
456 tries--;
457 } while (result < 0 && (errno == EAGAIN || errno == EPIPE) && tries);
458
459 if (result < 0 && errno == EAGAIN) {
460 V4L2_LOG_ERR("got %d consecutive frame decode errors, last error: %s",
461 max_tries, v4lconvert_get_error_message(devices[index].convert));
462 errno = EIO;
463 }
464
465 if (result < 0 && errno == EPIPE) {
466 V4L2_LOG("got %d consecutive short frame errors, "
467 "returning short frame", max_tries);
468 result = devices[index].dest_fmt.fmt.pix.sizeimage;
469 errno = 0;
470 }
471
472 return result;
473 }
474
v4l2_queue_read_buffers(int index)475 static int v4l2_queue_read_buffers(int index)
476 {
477 unsigned int i;
478 int last_error = EIO, queued = 0;
479
480 for (i = 0; i < devices[index].no_frames; i++) {
481 /* Don't queue unmapped buffers (should never happen) */
482 if (devices[index].frame_pointers[i] != MAP_FAILED) {
483 if (v4l2_queue_read_buffer(index, i)) {
484 last_error = errno;
485 continue;
486 }
487 queued++;
488 }
489 }
490
491 if (!queued) {
492 errno = last_error;
493 return -1;
494 }
495 return 0;
496 }
497
v4l2_activate_read_stream(int index)498 static int v4l2_activate_read_stream(int index)
499 {
500 int result;
501
502 if ((devices[index].flags & V4L2_STREAMON) || devices[index].frame_queued) {
503 errno = EBUSY;
504 return -1;
505 }
506
507 result = v4l2_request_read_buffers(index);
508 if (!result)
509 result = v4l2_map_buffers(index);
510 if (!result)
511 result = v4l2_queue_read_buffers(index);
512 if (result)
513 return result;
514
515 devices[index].flags |= V4L2_STREAM_CONTROLLED_BY_READ;
516
517 return v4l2_streamon(index);
518 }
519
v4l2_deactivate_read_stream(int index)520 static int v4l2_deactivate_read_stream(int index)
521 {
522 int result;
523
524 result = v4l2_streamoff(index);
525 if (result)
526 return result;
527
528 /* No need to dequeue our buffers, streamoff does that for us */
529
530 v4l2_unmap_buffers(index);
531
532 v4l2_unrequest_read_buffers(index);
533
534 devices[index].flags &= ~V4L2_STREAM_CONTROLLED_BY_READ;
535
536 return 0;
537 }
538
v4l2_needs_conversion(int index)539 static int v4l2_needs_conversion(int index)
540 {
541 if (devices[index].convert == NULL)
542 return 0;
543
544 return v4lconvert_needs_conversion(devices[index].convert,
545 &devices[index].src_fmt, &devices[index].dest_fmt);
546 }
547
v4l2_set_conversion_buf_params(int index,struct v4l2_buffer * buf)548 static void v4l2_set_conversion_buf_params(int index, struct v4l2_buffer *buf)
549 {
550 if (!v4l2_needs_conversion(index))
551 return;
552
553 /* This may happen if the ioctl failed */
554 if (buf->index >= devices[index].no_frames)
555 buf->index = 0;
556
557 buf->m.offset = V4L2_MMAP_OFFSET_MAGIC | buf->index;
558 buf->length = devices[index].convert_mmap_frame_size;
559 if (devices[index].frame_map_count[buf->index])
560 buf->flags |= V4L2_BUF_FLAG_MAPPED;
561 else
562 buf->flags &= ~V4L2_BUF_FLAG_MAPPED;
563 }
564
v4l2_buffers_mapped(int index)565 static int v4l2_buffers_mapped(int index)
566 {
567 unsigned int i;
568
569 if (!v4l2_needs_conversion(index)) {
570 /* Normal (no conversion) mode */
571 struct v4l2_buffer buf;
572
573 for (i = 0; i < devices[index].no_frames; i++) {
574 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
575 buf.memory = V4L2_MEMORY_MMAP;
576 buf.index = i;
577 buf.reserved = buf.reserved2 = 0;
578 if (devices[index].dev_ops->ioctl(
579 devices[index].dev_ops_priv,
580 devices[index].fd, VIDIOC_QUERYBUF,
581 &buf)) {
582 int saved_err = errno;
583
584 V4L2_PERROR("querying buffer %u", i);
585 errno = saved_err;
586 break;
587 }
588 if (buf.flags & V4L2_BUF_FLAG_MAPPED)
589 break;
590 }
591 } else {
592 /* Conversion mode */
593 for (i = 0; i < devices[index].no_frames; i++)
594 if (devices[index].frame_map_count[i])
595 break;
596 }
597
598 if (i != devices[index].no_frames)
599 V4L2_LOG("v4l2_buffers_mapped(): buffers still mapped\n");
600
601 return i != devices[index].no_frames;
602 }
603
v4l2_update_fps(int index,struct v4l2_streamparm * parm)604 static void v4l2_update_fps(int index, struct v4l2_streamparm *parm)
605 {
606 if ((devices[index].flags & V4L2_SUPPORTS_TIMEPERFRAME) &&
607 parm->parm.capture.timeperframe.numerator != 0) {
608 int fps = parm->parm.capture.timeperframe.denominator;
609 fps += parm->parm.capture.timeperframe.numerator - 1;
610 fps /= parm->parm.capture.timeperframe.numerator;
611 devices[index].fps = fps;
612 } else
613 devices[index].fps = 0;
614 }
615
v4l2_open(const char * file,int oflag,...)616 int v4l2_open(const char *file, int oflag, ...)
617 {
618 int fd;
619
620 /* original open code */
621 if (oflag & O_CREAT) {
622 va_list ap;
623 mode_t mode;
624
625 va_start(ap, oflag);
626 mode = va_arg(ap, PROMOTED_MODE_T);
627
628 fd = SYS_OPEN(file, oflag, mode);
629
630 va_end(ap);
631 } else {
632 fd = SYS_OPEN(file, oflag, 0);
633 }
634 /* end of original open code */
635
636 if (fd == -1)
637 return fd;
638
639 if (v4l2_fd_open(fd, 0) == -1) {
640 int saved_err = errno;
641
642 SYS_CLOSE(fd);
643 errno = saved_err;
644 return -1;
645 }
646
647 return fd;
648 }
649
v4l2_fd_open(int fd,int v4l2_flags)650 int v4l2_fd_open(int fd, int v4l2_flags)
651 {
652 int i, index;
653 char *lfname;
654 struct v4l2_capability cap;
655 struct v4l2_format fmt = { 0, };
656 struct v4l2_streamparm parm = { 0, };
657 struct v4lconvert_data *convert = NULL;
658 void *plugin_library;
659 void *dev_ops_priv;
660 const struct libv4l_dev_ops *dev_ops;
661 long page_size;
662
663 v4l2_plugin_init(fd, &plugin_library, &dev_ops_priv, &dev_ops);
664
665 /* If no log file was set by the app, see if one was specified through the
666 environment */
667 if (!v4l2_log_file) {
668 lfname = getenv("LIBV4L2_LOG_FILENAME");
669 if (lfname)
670 v4l2_log_file = fopen(lfname, "w");
671 }
672
673 /* Get page_size (for mmap emulation) */
674 page_size = sysconf(_SC_PAGESIZE);
675 if (page_size < 0) {
676 int saved_err = errno;
677 V4L2_LOG_ERR("unable to retrieve page size: %s\n",
678 strerror(errno));
679 v4l2_plugin_cleanup(plugin_library, dev_ops_priv, dev_ops);
680 errno = saved_err;
681 return -1;
682 }
683
684 /* check that this is a v4l2 device */
685 if (dev_ops->ioctl(dev_ops_priv, fd, VIDIOC_QUERYCAP, &cap)) {
686 int saved_err = errno;
687 V4L2_LOG_ERR("getting capabilities: %s\n", strerror(errno));
688 v4l2_plugin_cleanup(plugin_library, dev_ops_priv, dev_ops);
689 errno = saved_err;
690 return -1;
691 }
692
693 if (cap.capabilities & V4L2_CAP_DEVICE_CAPS)
694 cap.capabilities = cap.device_caps;
695 if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) ||
696 !(cap.capabilities & (V4L2_CAP_STREAMING | V4L2_CAP_READWRITE)))
697 goto no_capture;
698
699 /* Get current cam format */
700 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
701 if (dev_ops->ioctl(dev_ops_priv, fd, VIDIOC_G_FMT, &fmt)) {
702 int saved_err = errno;
703 V4L2_LOG_ERR("getting pixformat: %s\n", strerror(errno));
704 v4l2_plugin_cleanup(plugin_library, dev_ops_priv, dev_ops);
705 errno = saved_err;
706 return -1;
707 }
708
709 /* Check for frame rate setting support */
710 parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
711 if (dev_ops->ioctl(dev_ops_priv, fd, VIDIOC_G_PARM, &parm))
712 parm.type = 0;
713
714 /* init libv4lconvert */
715 if (!(v4l2_flags & V4L2_DISABLE_CONVERSION)) {
716 convert = v4lconvert_create_with_dev_ops(fd, dev_ops_priv, dev_ops);
717 if (!convert) {
718 int saved_err = errno;
719 v4l2_plugin_cleanup(plugin_library, dev_ops_priv,
720 dev_ops);
721 errno = saved_err;
722 return -1;
723 }
724 }
725
726 no_capture:
727 /* So we have a v4l2 capture device, register it in our devices array */
728 pthread_mutex_lock(&v4l2_open_mutex);
729 for (index = 0; index < V4L2_MAX_DEVICES; index++) {
730 if (devices[index].fd == -1) {
731 devices[index].fd = fd;
732 devices[index].plugin_library = plugin_library;
733 devices[index].dev_ops_priv = dev_ops_priv;
734 devices[index].dev_ops = dev_ops;
735 break;
736 }
737 }
738 pthread_mutex_unlock(&v4l2_open_mutex);
739
740 if (index == V4L2_MAX_DEVICES) {
741 V4L2_LOG_ERR("attempting to open more than %d video devices\n",
742 V4L2_MAX_DEVICES);
743 v4l2_plugin_cleanup(plugin_library, dev_ops_priv, dev_ops);
744 errno = EBUSY;
745 return -1;
746 }
747
748 devices[index].flags = v4l2_flags;
749 if (cap.capabilities & V4L2_CAP_READWRITE)
750 devices[index].flags |= V4L2_SUPPORTS_READ;
751 if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
752 devices[index].flags |= V4L2_USE_READ_FOR_READ;
753 /* This device only supports read so the stream gets started by the
754 driver on the first read */
755 devices[index].first_frame = V4L2_IGNORE_FIRST_FRAME_ERRORS;
756 }
757 if ((parm.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
758 (parm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME))
759 devices[index].flags |= V4L2_SUPPORTS_TIMEPERFRAME;
760 devices[index].open_count = 1;
761 devices[index].page_size = page_size;
762 devices[index].src_fmt = fmt;
763 devices[index].dest_fmt = fmt;
764 v4l2_set_src_and_dest_format(index, &devices[index].src_fmt,
765 &devices[index].dest_fmt);
766
767 pthread_mutex_init(&devices[index].stream_lock, NULL);
768
769 devices[index].no_frames = 0;
770 devices[index].nreadbuffers = V4L2_DEFAULT_NREADBUFFERS;
771 devices[index].convert = convert;
772 devices[index].convert_mmap_buf = MAP_FAILED;
773 devices[index].convert_mmap_buf_size = 0;
774 for (i = 0; i < V4L2_MAX_NO_FRAMES; i++) {
775 devices[index].frame_pointers[i] = MAP_FAILED;
776 devices[index].frame_map_count[i] = 0;
777 }
778 devices[index].frame_queued = 0;
779 devices[index].readbuf = NULL;
780 devices[index].readbuf_size = 0;
781
782 if (index >= devices_used)
783 devices_used = index + 1;
784
785 /* Note we always tell v4lconvert to optimize src fmt selection for
786 our default fps, the only exception is the app explicitly selecting
787 a frame rate using the S_PARM ioctl after a S_FMT */
788 if (devices[index].convert)
789 v4lconvert_set_fps(devices[index].convert, V4L2_DEFAULT_FPS);
790 v4l2_update_fps(index, &parm);
791
792 V4L2_LOG("open: %d\n", fd);
793
794 return fd;
795 }
796
797 /* Is this an fd for which we are emulating v4l1 ? */
v4l2_get_index(int fd)798 static int v4l2_get_index(int fd)
799 {
800 int index;
801
802 /* We never handle fd -1 */
803 if (fd == -1)
804 return -1;
805
806 for (index = 0; index < devices_used; index++)
807 if (devices[index].fd == fd)
808 break;
809
810 if (index == devices_used)
811 return -1;
812
813 return index;
814 }
815
816
v4l2_close(int fd)817 int v4l2_close(int fd)
818 {
819 int index, result;
820
821 index = v4l2_get_index(fd);
822 if (index == -1)
823 return SYS_CLOSE(fd);
824
825 /* Abuse stream_lock to stop 2 closes from racing and trying to free
826 the resources twice */
827 pthread_mutex_lock(&devices[index].stream_lock);
828 devices[index].open_count--;
829 result = devices[index].open_count != 0;
830 pthread_mutex_unlock(&devices[index].stream_lock);
831
832 if (result)
833 return 0;
834
835 v4l2_plugin_cleanup(devices[index].plugin_library,
836 devices[index].dev_ops_priv,
837 devices[index].dev_ops);
838
839 /* Free resources */
840 v4l2_unmap_buffers(index);
841 if (devices[index].convert_mmap_buf != MAP_FAILED) {
842 if (v4l2_buffers_mapped(index)) {
843 if (!devices[index].gone)
844 V4L2_LOG_WARN("v4l2 mmap buffers still mapped on close()\n");
845 } else {
846 SYS_MUNMAP(devices[index].convert_mmap_buf,
847 devices[index].convert_mmap_buf_size);
848 }
849 devices[index].convert_mmap_buf = MAP_FAILED;
850 devices[index].convert_mmap_buf_size = 0;
851 }
852 v4lconvert_destroy(devices[index].convert);
853 free(devices[index].readbuf);
854 devices[index].readbuf = NULL;
855 devices[index].readbuf_size = 0;
856
857 /* Remove the fd from our list of managed fds before closing it, because as
858 soon as we've done the actual close, the fd maybe returned by an open() in
859 another thread and we don't want to intercept calls to this new fd. */
860 devices[index].fd = -1;
861
862 /* Since we've marked the fd as no longer used, and freed the resources,
863 redo the close in case it was interrupted */
864 do {
865 result = SYS_CLOSE(fd);
866 } while (result == -1 && errno == EINTR);
867
868 V4L2_LOG("close: %d\n", fd);
869
870 return result;
871 }
872
v4l2_dup(int fd)873 int v4l2_dup(int fd)
874 {
875 int index = v4l2_get_index(fd);
876
877 if (index == -1)
878 return syscall(SYS_dup, fd);
879
880 devices[index].open_count++;
881
882 return fd;
883 }
884
v4l2_check_buffer_change_ok(int index)885 static int v4l2_check_buffer_change_ok(int index)
886 {
887 devices[index].frame_info_generation++;
888 v4l2_unmap_buffers(index);
889
890 /* Check if the app itself still is using the stream */
891 if (v4l2_buffers_mapped(index) ||
892 (!(devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) &&
893 ((devices[index].flags & V4L2_STREAMON) ||
894 devices[index].frame_queued))) {
895 V4L2_LOG("v4l2_check_buffer_change_ok(): stream busy\n");
896 errno = EBUSY;
897 return -1;
898 }
899
900 /* We may change from convert to non conversion mode and
901 v4l2_unrequest_read_buffers may change the no_frames, so free the
902 convert mmap buffer */
903 SYS_MUNMAP(devices[index].convert_mmap_buf,
904 devices[index].convert_mmap_buf_size);
905 devices[index].convert_mmap_buf = MAP_FAILED;
906 devices[index].convert_mmap_buf_size = 0;
907
908 if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
909 V4L2_LOG("deactivating read-stream for settings change\n");
910 return v4l2_deactivate_read_stream(index);
911 }
912
913 return 0;
914 }
915
v4l2_pix_fmt_compat(struct v4l2_format * a,struct v4l2_format * b)916 static int v4l2_pix_fmt_compat(struct v4l2_format *a, struct v4l2_format *b)
917 {
918 if (a->fmt.pix.width == b->fmt.pix.width &&
919 a->fmt.pix.height == b->fmt.pix.height &&
920 a->fmt.pix.pixelformat == b->fmt.pix.pixelformat &&
921 a->fmt.pix.field == b->fmt.pix.field)
922 return 1;
923
924 return 0;
925 }
926
v4l2_pix_fmt_identical(struct v4l2_format * a,struct v4l2_format * b)927 static int v4l2_pix_fmt_identical(struct v4l2_format *a, struct v4l2_format *b)
928 {
929 if (v4l2_pix_fmt_compat(a, b) &&
930 a->fmt.pix.bytesperline == b->fmt.pix.bytesperline &&
931 a->fmt.pix.sizeimage == b->fmt.pix.sizeimage)
932 return 1;
933
934 return 0;
935 }
936
v4l2_set_src_and_dest_format(int index,struct v4l2_format * src_fmt,struct v4l2_format * dest_fmt)937 static void v4l2_set_src_and_dest_format(int index,
938 struct v4l2_format *src_fmt, struct v4l2_format *dest_fmt)
939 {
940 /*
941 * When a user does a try_fmt with the current dest_fmt and the
942 * dest_fmt is a supported one we will align the resolution (see
943 * libv4lconvert_try_fmt). We do this here too, in case dest_fmt gets
944 * set without having gone through libv4lconvert_try_fmt, so that a
945 * try_fmt on the result of a get_fmt always returns the same result.
946 */
947 if (v4lconvert_supported_dst_format(dest_fmt->fmt.pix.pixelformat)) {
948 dest_fmt->fmt.pix.width &= ~7;
949 dest_fmt->fmt.pix.height &= ~1;
950 }
951
952 /* Sigh some drivers (pwc) do not properly reflect what one really gets
953 after a s_fmt in their try_fmt answer. So update dest format (which we
954 report as result from s_fmt / g_fmt to the app) with all info from the src
955 format not changed by conversion */
956 dest_fmt->fmt.pix.field = src_fmt->fmt.pix.field;
957 dest_fmt->fmt.pix.colorspace = src_fmt->fmt.pix.colorspace;
958 dest_fmt->fmt.pix.xfer_func = src_fmt->fmt.pix.xfer_func;
959 dest_fmt->fmt.pix.ycbcr_enc = src_fmt->fmt.pix.ycbcr_enc;
960 dest_fmt->fmt.pix.quantization = src_fmt->fmt.pix.quantization;
961 /* When we're not converting use bytesperline and imagesize from src_fmt */
962 if (v4l2_pix_fmt_compat(src_fmt, dest_fmt)) {
963 dest_fmt->fmt.pix.bytesperline = src_fmt->fmt.pix.bytesperline;
964 dest_fmt->fmt.pix.sizeimage = src_fmt->fmt.pix.sizeimage;
965 } else
966 v4lconvert_fixup_fmt(dest_fmt);
967
968 devices[index].src_fmt = *src_fmt;
969 devices[index].dest_fmt = *dest_fmt;
970 /* round up to full page size */
971 devices[index].convert_mmap_frame_size =
972 (((dest_fmt->fmt.pix.sizeimage + devices[index].page_size - 1)
973 / devices[index].page_size) * devices[index].page_size);
974 }
975
v4l2_s_fmt(int index,struct v4l2_format * dest_fmt)976 static int v4l2_s_fmt(int index, struct v4l2_format *dest_fmt)
977 {
978 struct v4l2_format src_fmt;
979 struct v4l2_pix_format req_pix_fmt;
980 int result;
981
982 if (v4l2_log_file) {
983 int pixfmt = dest_fmt->fmt.pix.pixelformat;
984
985 fprintf(v4l2_log_file, "VIDIOC_S_FMT app requesting: %c%c%c%c\n",
986 pixfmt & 0xff,
987 (pixfmt >> 8) & 0xff,
988 (pixfmt >> 16) & 0xff,
989 pixfmt >> 24);
990 }
991
992 result = v4lconvert_try_format(devices[index].convert,
993 dest_fmt, &src_fmt);
994 if (result) {
995 int saved_err = errno;
996 V4L2_LOG("S_FMT error trying format: %s\n", strerror(errno));
997 errno = saved_err;
998 return result;
999 }
1000
1001 if (src_fmt.fmt.pix.pixelformat != dest_fmt->fmt.pix.pixelformat &&
1002 v4l2_log_file) {
1003 int pixfmt = src_fmt.fmt.pix.pixelformat;
1004
1005 fprintf(v4l2_log_file,
1006 "VIDIOC_S_FMT converting from: %c%c%c%c\n",
1007 pixfmt & 0xff, (pixfmt >> 8) & 0xff,
1008 (pixfmt >> 16) & 0xff, pixfmt >> 24);
1009 }
1010
1011 result = v4l2_check_buffer_change_ok(index);
1012 if (result)
1013 return result;
1014
1015 req_pix_fmt = src_fmt.fmt.pix;
1016 result = devices[index].dev_ops->ioctl(devices[index].dev_ops_priv,
1017 devices[index].fd,
1018 VIDIOC_S_FMT, &src_fmt);
1019 if (result) {
1020 int saved_err = errno;
1021 V4L2_PERROR("setting pixformat");
1022 /* Report to the app dest_fmt has not changed */
1023 *dest_fmt = devices[index].dest_fmt;
1024 errno = saved_err;
1025 return result;
1026 }
1027
1028 /* See if we've gotten what try_fmt promised us
1029 (this check should never fail) */
1030 if (src_fmt.fmt.pix.width != req_pix_fmt.width ||
1031 src_fmt.fmt.pix.height != req_pix_fmt.height ||
1032 src_fmt.fmt.pix.pixelformat != req_pix_fmt.pixelformat) {
1033 V4L2_LOG_ERR("set_fmt gave us a different result than try_fmt!\n");
1034 /* Not what we expected / wanted, disable conversion */
1035 *dest_fmt = src_fmt;
1036 }
1037
1038 v4l2_set_src_and_dest_format(index, &src_fmt, dest_fmt);
1039
1040 if (devices[index].flags & V4L2_SUPPORTS_TIMEPERFRAME) {
1041 struct v4l2_streamparm parm = {
1042 .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
1043 };
1044 if (devices[index].dev_ops->ioctl(devices[index].dev_ops_priv,
1045 devices[index].fd,
1046 VIDIOC_G_PARM, &parm))
1047 return 0;
1048 v4l2_update_fps(index, &parm);
1049 }
1050
1051 return 0;
1052 }
1053
v4l2_ioctl(int fd,unsigned long int request,...)1054 int v4l2_ioctl(int fd, unsigned long int request, ...)
1055 {
1056 void *arg;
1057 va_list ap;
1058 int result, index, saved_err;
1059 int is_capture_request = 0, stream_needs_locking = 0;
1060
1061 va_start(ap, request);
1062 arg = va_arg(ap, void *);
1063 va_end(ap);
1064
1065 index = v4l2_get_index(fd);
1066 if (index == -1)
1067 return SYS_IOCTL(fd, request, arg);
1068
1069 /* Apparently the kernel and / or glibc ignore the 32 most significant bits
1070 when long = 64 bits, and some applications pass an int holding the req to
1071 ioctl, causing it to get sign extended, depending upon this behavior */
1072 request = (unsigned int)request;
1073
1074 if (devices[index].convert == NULL)
1075 goto no_capture_request;
1076
1077 /* Is this a capture request and do we need to take the stream lock? */
1078 switch (request) {
1079 case VIDIOC_QUERYCAP:
1080 case VIDIOC_QUERYCTRL:
1081 case VIDIOC_G_CTRL:
1082 case VIDIOC_S_CTRL:
1083 case VIDIOC_G_EXT_CTRLS:
1084 case VIDIOC_TRY_EXT_CTRLS:
1085 case VIDIOC_S_EXT_CTRLS:
1086 case VIDIOC_ENUM_FRAMESIZES:
1087 case VIDIOC_ENUM_FRAMEINTERVALS:
1088 is_capture_request = 1;
1089 break;
1090 case VIDIOC_ENUM_FMT:
1091 if (((struct v4l2_fmtdesc *)arg)->type ==
1092 V4L2_BUF_TYPE_VIDEO_CAPTURE)
1093 is_capture_request = 1;
1094 break;
1095 case VIDIOC_TRY_FMT:
1096 if (((struct v4l2_format *)arg)->type ==
1097 V4L2_BUF_TYPE_VIDEO_CAPTURE)
1098 is_capture_request = 1;
1099 break;
1100 case VIDIOC_S_FMT:
1101 case VIDIOC_G_FMT:
1102 if (((struct v4l2_format *)arg)->type ==
1103 V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1104 is_capture_request = 1;
1105 stream_needs_locking = 1;
1106 }
1107 break;
1108 case VIDIOC_REQBUFS:
1109 if (((struct v4l2_requestbuffers *)arg)->type ==
1110 V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1111 is_capture_request = 1;
1112 stream_needs_locking = 1;
1113 }
1114 break;
1115 case VIDIOC_QUERYBUF:
1116 case VIDIOC_QBUF:
1117 case VIDIOC_DQBUF:
1118 if (((struct v4l2_buffer *)arg)->type ==
1119 V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1120 is_capture_request = 1;
1121 stream_needs_locking = 1;
1122 }
1123 break;
1124 case VIDIOC_STREAMON:
1125 case VIDIOC_STREAMOFF:
1126 if (*((enum v4l2_buf_type *)arg) ==
1127 V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1128 is_capture_request = 1;
1129 stream_needs_locking = 1;
1130 }
1131 break;
1132 case VIDIOC_S_PARM:
1133 if (((struct v4l2_streamparm *)arg)->type ==
1134 V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1135 is_capture_request = 1;
1136 if (devices[index].flags & V4L2_SUPPORTS_TIMEPERFRAME)
1137 stream_needs_locking = 1;
1138 }
1139 break;
1140 case VIDIOC_S_STD:
1141 case VIDIOC_S_INPUT:
1142 case VIDIOC_S_DV_TIMINGS:
1143 is_capture_request = 1;
1144 stream_needs_locking = 1;
1145 break;
1146 }
1147
1148 if (!is_capture_request) {
1149 no_capture_request:
1150 result = devices[index].dev_ops->ioctl(
1151 devices[index].dev_ops_priv,
1152 fd, request, arg);
1153 saved_err = errno;
1154 v4l2_log_ioctl(request, arg, result);
1155 errno = saved_err;
1156 return result;
1157 }
1158
1159
1160 if (stream_needs_locking) {
1161 pthread_mutex_lock(&devices[index].stream_lock);
1162 /* If this is the first stream-related ioctl, and we should only allow
1163 libv4lconvert supported destination formats (so that it can do flipping,
1164 processing, etc.) and the current destination format is not supported,
1165 try setting the format to RGB24 (which is a supported dest. format). */
1166 if (!(devices[index].flags & V4L2_STREAM_TOUCHED) &&
1167 v4lconvert_supported_dst_fmt_only(devices[index].convert) &&
1168 !v4lconvert_supported_dst_format(
1169 devices[index].dest_fmt.fmt.pix.pixelformat)) {
1170 struct v4l2_format fmt = devices[index].dest_fmt;
1171
1172 V4L2_LOG("Setting pixelformat to RGB24 (supported_dst_fmt_only)");
1173 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;
1174 v4l2_s_fmt(index, &fmt);
1175 V4L2_LOG("Done setting pixelformat (supported_dst_fmt_only)");
1176 }
1177 devices[index].flags |= V4L2_STREAM_TOUCHED;
1178 }
1179
1180 switch (request) {
1181 case VIDIOC_QUERYCTRL:
1182 result = v4lconvert_vidioc_queryctrl(devices[index].convert, arg);
1183 break;
1184
1185 case VIDIOC_G_CTRL:
1186 result = v4lconvert_vidioc_g_ctrl(devices[index].convert, arg);
1187 break;
1188
1189 case VIDIOC_S_CTRL:
1190 result = v4lconvert_vidioc_s_ctrl(devices[index].convert, arg);
1191 break;
1192
1193 case VIDIOC_G_EXT_CTRLS:
1194 result = v4lconvert_vidioc_g_ext_ctrls(devices[index].convert, arg);
1195 break;
1196
1197 case VIDIOC_TRY_EXT_CTRLS:
1198 result = v4lconvert_vidioc_try_ext_ctrls(devices[index].convert, arg);
1199 break;
1200
1201 case VIDIOC_S_EXT_CTRLS:
1202 result = v4lconvert_vidioc_s_ext_ctrls(devices[index].convert, arg);
1203 break;
1204
1205 case VIDIOC_QUERYCAP: {
1206 struct v4l2_capability *cap = arg;
1207
1208 result = devices[index].dev_ops->ioctl(
1209 devices[index].dev_ops_priv,
1210 fd, VIDIOC_QUERYCAP, cap);
1211 if (result == 0) {
1212 /* We always support read() as we fake it using mmap mode */
1213 cap->capabilities |= V4L2_CAP_READWRITE;
1214 cap->device_caps |= V4L2_CAP_READWRITE;
1215 }
1216 break;
1217 }
1218
1219 case VIDIOC_ENUM_FMT:
1220 result = v4lconvert_enum_fmt(devices[index].convert, arg);
1221 break;
1222
1223 case VIDIOC_ENUM_FRAMESIZES:
1224 result = v4lconvert_enum_framesizes(devices[index].convert, arg);
1225 break;
1226
1227 case VIDIOC_ENUM_FRAMEINTERVALS:
1228 result = v4lconvert_enum_frameintervals(devices[index].convert, arg);
1229 if (result)
1230 V4L2_LOG("ENUM_FRAMEINTERVALS Error: %s",
1231 v4lconvert_get_error_message(devices[index].convert));
1232 break;
1233
1234 case VIDIOC_TRY_FMT:
1235 result = v4lconvert_try_format(devices[index].convert,
1236 arg, NULL);
1237 break;
1238
1239 case VIDIOC_S_FMT:
1240 result = v4l2_s_fmt(index, arg);
1241 break;
1242
1243 case VIDIOC_G_FMT: {
1244 struct v4l2_format *fmt = arg;
1245
1246 *fmt = devices[index].dest_fmt;
1247 result = 0;
1248 break;
1249 }
1250
1251 case VIDIOC_S_STD:
1252 case VIDIOC_S_INPUT:
1253 case VIDIOC_S_DV_TIMINGS: {
1254 struct v4l2_format src_fmt = { 0 };
1255 unsigned int orig_dest_pixelformat =
1256 devices[index].dest_fmt.fmt.pix.pixelformat;
1257
1258 result = devices[index].dev_ops->ioctl(
1259 devices[index].dev_ops_priv,
1260 fd, request, arg);
1261 if (result)
1262 break;
1263
1264 /* These ioctls may have changed the device's fmt */
1265 src_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1266 result = devices[index].dev_ops->ioctl(
1267 devices[index].dev_ops_priv,
1268 fd, VIDIOC_G_FMT, &src_fmt);
1269 if (result) {
1270 V4L2_PERROR("getting pixformat after %s",
1271 v4l2_ioctls[_IOC_NR(request)]);
1272 result = 0; /* The original command did succeed */
1273 break;
1274 }
1275
1276 if (v4l2_pix_fmt_compat(&devices[index].src_fmt, &src_fmt)) {
1277 v4l2_set_src_and_dest_format(index, &src_fmt,
1278 &devices[index].dest_fmt);
1279 break;
1280 }
1281
1282 /* The fmt has been changed, remember the new format ... */
1283 devices[index].src_fmt = src_fmt;
1284 devices[index].dest_fmt = src_fmt;
1285 v4l2_set_src_and_dest_format(index, &devices[index].src_fmt,
1286 &devices[index].dest_fmt);
1287 /* and try to restore the last set destination pixelformat. */
1288 src_fmt.fmt.pix.pixelformat = orig_dest_pixelformat;
1289 result = v4l2_s_fmt(index, &src_fmt);
1290 if (result) {
1291 V4L2_LOG_WARN("restoring destination pixelformat after %s failed\n",
1292 v4l2_ioctls[_IOC_NR(request)]);
1293 result = 0; /* The original command did succeed */
1294 }
1295
1296 break;
1297 }
1298
1299 case VIDIOC_REQBUFS: {
1300 struct v4l2_requestbuffers *req = arg;
1301
1302 /* IMPROVEME (maybe?) add support for userptr's? */
1303 if (req->memory != V4L2_MEMORY_MMAP) {
1304 errno = EINVAL;
1305 result = -1;
1306 break;
1307 }
1308
1309 result = v4l2_check_buffer_change_ok(index);
1310 if (result)
1311 break;
1312
1313 /* No more buffers than we can manage please */
1314 if (req->count > V4L2_MAX_NO_FRAMES)
1315 req->count = V4L2_MAX_NO_FRAMES;
1316
1317 result = devices[index].dev_ops->ioctl(
1318 devices[index].dev_ops_priv,
1319 fd, VIDIOC_REQBUFS, req);
1320 if (result < 0)
1321 break;
1322 result = 0; /* some drivers return the number of buffers on success */
1323
1324 devices[index].no_frames = MIN(req->count, V4L2_MAX_NO_FRAMES);
1325 devices[index].flags &= ~V4L2_BUFFERS_REQUESTED_BY_READ;
1326 break;
1327 }
1328
1329 case VIDIOC_QUERYBUF: {
1330 struct v4l2_buffer *buf = arg;
1331
1332 if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
1333 result = v4l2_deactivate_read_stream(index);
1334 if (result)
1335 break;
1336 }
1337
1338 /* Do a real query even when converting to let the driver fill in
1339 things like buf->field */
1340 result = devices[index].dev_ops->ioctl(
1341 devices[index].dev_ops_priv,
1342 fd, VIDIOC_QUERYBUF, buf);
1343
1344 v4l2_set_conversion_buf_params(index, buf);
1345 break;
1346 }
1347
1348 case VIDIOC_QBUF: {
1349 struct v4l2_buffer *buf = arg;
1350
1351 if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
1352 result = v4l2_deactivate_read_stream(index);
1353 if (result)
1354 break;
1355 }
1356
1357 /* With some drivers the buffers must be mapped before queuing */
1358 if (v4l2_needs_conversion(index)) {
1359 result = v4l2_map_buffers(index);
1360 if (result)
1361 break;
1362 }
1363
1364 result = devices[index].dev_ops->ioctl(
1365 devices[index].dev_ops_priv,
1366 fd, VIDIOC_QBUF, arg);
1367
1368 v4l2_set_conversion_buf_params(index, buf);
1369 break;
1370 }
1371
1372 case VIDIOC_DQBUF: {
1373 struct v4l2_buffer *buf = arg;
1374
1375 if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
1376 result = v4l2_deactivate_read_stream(index);
1377 if (result)
1378 break;
1379 }
1380
1381 if (!v4l2_needs_conversion(index)) {
1382 pthread_mutex_unlock(&devices[index].stream_lock);
1383 result = devices[index].dev_ops->ioctl(
1384 devices[index].dev_ops_priv,
1385 fd, VIDIOC_DQBUF, buf);
1386 pthread_mutex_lock(&devices[index].stream_lock);
1387 if (result) {
1388 saved_err = errno;
1389 V4L2_PERROR("dequeuing buf");
1390 errno = saved_err;
1391 }
1392 break;
1393 }
1394
1395 /* An application can do a DQBUF before mmap-ing in the buffer,
1396 but we need the buffer _now_ to write our converted data
1397 to it! */
1398 result = v4l2_ensure_convert_mmap_buf(index);
1399 if (result)
1400 break;
1401
1402 result = v4l2_dequeue_and_convert(index, buf, 0,
1403 devices[index].convert_mmap_frame_size);
1404 if (result >= 0) {
1405 buf->bytesused = result;
1406 result = 0;
1407 }
1408
1409 v4l2_set_conversion_buf_params(index, buf);
1410 break;
1411 }
1412
1413 case VIDIOC_STREAMON:
1414 case VIDIOC_STREAMOFF:
1415 if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
1416 result = v4l2_deactivate_read_stream(index);
1417 if (result)
1418 break;
1419 }
1420
1421 if (request == VIDIOC_STREAMON)
1422 result = v4l2_streamon(index);
1423 else
1424 result = v4l2_streamoff(index);
1425 break;
1426
1427 case VIDIOC_S_PARM: {
1428 struct v4l2_streamparm *parm = arg;
1429
1430 /* See if libv4lconvert wishes to use a different src_fmt
1431 for the new frame rate and set that first */
1432 if ((devices[index].flags & V4L2_SUPPORTS_TIMEPERFRAME) &&
1433 parm->parm.capture.timeperframe.numerator != 0) {
1434 int fps = parm->parm.capture.timeperframe.denominator;
1435 fps += parm->parm.capture.timeperframe.numerator - 1;
1436 fps /= parm->parm.capture.timeperframe.numerator;
1437 v4l2_adjust_src_fmt_to_fps(index, fps);
1438 }
1439
1440 result = devices[index].dev_ops->ioctl(
1441 devices[index].dev_ops_priv,
1442 fd, VIDIOC_S_PARM, parm);
1443 if (result)
1444 break;
1445
1446 v4l2_update_fps(index, parm);
1447 break;
1448 }
1449
1450 default:
1451 result = devices[index].dev_ops->ioctl(
1452 devices[index].dev_ops_priv,
1453 fd, request, arg);
1454 break;
1455 }
1456
1457 if (stream_needs_locking)
1458 pthread_mutex_unlock(&devices[index].stream_lock);
1459
1460 saved_err = errno;
1461 v4l2_log_ioctl(request, arg, result);
1462 errno = saved_err;
1463
1464 return result;
1465 }
1466
v4l2_adjust_src_fmt_to_fps(int index,int fps)1467 static void v4l2_adjust_src_fmt_to_fps(int index, int fps)
1468 {
1469 struct v4l2_pix_format req_pix_fmt;
1470 struct v4l2_format src_fmt;
1471 struct v4l2_format dest_fmt = devices[index].dest_fmt;
1472 struct v4l2_format orig_src_fmt = devices[index].src_fmt;
1473 struct v4l2_format orig_dest_fmt = devices[index].dest_fmt;
1474 int r;
1475
1476 if (fps == devices[index].fps)
1477 return;
1478
1479 if (v4l2_check_buffer_change_ok(index))
1480 return;
1481
1482 v4lconvert_set_fps(devices[index].convert, fps);
1483 r = v4lconvert_try_format(devices[index].convert, &dest_fmt, &src_fmt);
1484 v4lconvert_set_fps(devices[index].convert, V4L2_DEFAULT_FPS);
1485 if (r)
1486 return;
1487
1488 if (orig_src_fmt.fmt.pix.pixelformat == src_fmt.fmt.pix.pixelformat ||
1489 !v4l2_pix_fmt_compat(&orig_dest_fmt, &dest_fmt))
1490 return;
1491
1492 req_pix_fmt = src_fmt.fmt.pix;
1493 if (devices[index].dev_ops->ioctl(devices[index].dev_ops_priv,
1494 devices[index].fd, VIDIOC_S_FMT, &src_fmt))
1495 return;
1496
1497 v4l2_set_src_and_dest_format(index, &src_fmt, &dest_fmt);
1498
1499 /* Check we've gotten what try_fmt promised us and that the
1500 new dest fmt matches the original, if this is true we're done. */
1501 if (src_fmt.fmt.pix.width == req_pix_fmt.width &&
1502 src_fmt.fmt.pix.height == req_pix_fmt.height &&
1503 src_fmt.fmt.pix.pixelformat == req_pix_fmt.pixelformat &&
1504 v4l2_pix_fmt_identical(&orig_dest_fmt, &dest_fmt)) {
1505 if (v4l2_log_file) {
1506 int pixfmt = src_fmt.fmt.pix.pixelformat;
1507 fprintf(v4l2_log_file,
1508 "new src fmt for fps change: %c%c%c%c\n",
1509 pixfmt & 0xff, (pixfmt >> 8) & 0xff,
1510 (pixfmt >> 16) & 0xff, pixfmt >> 24);
1511 }
1512 return;
1513 }
1514
1515 /* Not identical!! */
1516 V4L2_LOG_WARN("dest fmt changed after adjusting src fmt for fps "
1517 "change, restoring original src fmt");
1518 src_fmt = orig_src_fmt;
1519 dest_fmt = orig_dest_fmt;
1520 req_pix_fmt = src_fmt.fmt.pix;
1521 if (devices[index].dev_ops->ioctl(devices[index].dev_ops_priv,
1522 devices[index].fd, VIDIOC_S_FMT, &src_fmt)) {
1523 V4L2_PERROR("restoring src fmt");
1524 return;
1525 }
1526 v4l2_set_src_and_dest_format(index, &src_fmt, &dest_fmt);
1527 if (src_fmt.fmt.pix.width != req_pix_fmt.width ||
1528 src_fmt.fmt.pix.height != req_pix_fmt.height ||
1529 src_fmt.fmt.pix.pixelformat != req_pix_fmt.pixelformat ||
1530 !v4l2_pix_fmt_identical(&orig_dest_fmt, &dest_fmt))
1531 V4L2_LOG_ERR("dest fmt different after restoring src fmt");
1532 }
1533
v4l2_read(int fd,void * dest,size_t n)1534 ssize_t v4l2_read(int fd, void *dest, size_t n)
1535 {
1536 ssize_t result;
1537 int saved_errno;
1538 int index = v4l2_get_index(fd);
1539
1540 if (index == -1)
1541 return SYS_READ(fd, dest, n);
1542
1543 if (!devices[index].dev_ops->read) {
1544 errno = EINVAL;
1545 return -1;
1546 }
1547
1548 pthread_mutex_lock(&devices[index].stream_lock);
1549
1550 /* When not converting and the device supports read(), let the kernel handle
1551 it */
1552 if (devices[index].convert == NULL ||
1553 ((devices[index].flags & V4L2_SUPPORTS_READ) &&
1554 !v4l2_needs_conversion(index))) {
1555 result = devices[index].dev_ops->read(
1556 devices[index].dev_ops_priv,
1557 fd, dest, n);
1558 goto leave;
1559 }
1560
1561 /* Since we need to do conversion try to use mmap (streaming) mode under
1562 the hood as that safes a memcpy for each frame read.
1563
1564 Note sometimes this will fail as some drivers (at least gspca) do not allow
1565 switching from read mode to mmap mode and they assume read() mode if a
1566 select or poll() is done before any buffers are requested. So using mmap
1567 mode under the hood will fail if a select() or poll() is done before the
1568 first emulated read() call. */
1569 if (!(devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) &&
1570 !(devices[index].flags & V4L2_USE_READ_FOR_READ)) {
1571 result = v4l2_activate_read_stream(index);
1572 if (result) {
1573 /* Activating mmap mode failed, use read() instead */
1574 devices[index].flags |= V4L2_USE_READ_FOR_READ;
1575 /* The read call done by v4l2_read_and_convert will start the stream */
1576 devices[index].first_frame = V4L2_IGNORE_FIRST_FRAME_ERRORS;
1577 }
1578 }
1579
1580 if (devices[index].flags & V4L2_USE_READ_FOR_READ) {
1581 result = v4l2_read_and_convert(index, dest, n);
1582 } else {
1583 struct v4l2_buffer buf;
1584
1585 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1586 buf.memory = V4L2_MEMORY_MMAP;
1587 result = v4l2_dequeue_and_convert(index, &buf, dest, n);
1588
1589 if (result >= 0)
1590 v4l2_queue_read_buffer(index, buf.index);
1591 }
1592
1593 leave:
1594 saved_errno = errno;
1595 pthread_mutex_unlock(&devices[index].stream_lock);
1596 errno = saved_errno;
1597
1598 return result;
1599 }
1600
v4l2_write(int fd,const void * buffer,size_t n)1601 ssize_t v4l2_write(int fd, const void *buffer, size_t n)
1602 {
1603 int index = v4l2_get_index(fd);
1604
1605 if (index == -1)
1606 return SYS_WRITE(fd, buffer, n);
1607
1608 if (!devices[index].dev_ops->write) {
1609 errno = EINVAL;
1610 return -1;
1611 }
1612
1613 return devices[index].dev_ops->write(
1614 devices[index].dev_ops_priv, fd, buffer, n);
1615 }
1616
v4l2_mmap(void * start,size_t length,int prot,int flags,int fd,int64_t offset)1617 void *v4l2_mmap(void *start, size_t length, int prot, int flags, int fd,
1618 int64_t offset)
1619 {
1620 int index;
1621 unsigned int buffer_index;
1622 void *result;
1623
1624 index = v4l2_get_index(fd);
1625 if (index == -1 ||
1626 /* Check if the mmap data matches our answer to QUERY_BUF. If it doesn't,
1627 let the kernel handle it (to allow for mmap-based non capture use) */
1628 start || length != devices[index].convert_mmap_frame_size ||
1629 ((unsigned int)offset & ~0xFFu) != V4L2_MMAP_OFFSET_MAGIC) {
1630 if (index != -1)
1631 V4L2_LOG("Passing mmap(%p, %d, ..., %x, through to the driver\n",
1632 start, (int)length, (int)offset);
1633
1634 if (offset & ((1 << MMAP2_PAGE_SHIFT) - 1)) {
1635 errno = EINVAL;
1636 return MAP_FAILED;
1637 }
1638
1639 return (void *)SYS_MMAP(start, length, prot, flags, fd, offset);
1640 }
1641
1642 pthread_mutex_lock(&devices[index].stream_lock);
1643
1644 buffer_index = offset & 0xff;
1645 if (buffer_index >= devices[index].no_frames ||
1646 /* Got magic offset and not converting ?? */
1647 !v4l2_needs_conversion(index)) {
1648 errno = EINVAL;
1649 result = MAP_FAILED;
1650 goto leave;
1651 }
1652
1653 if (v4l2_ensure_convert_mmap_buf(index)) {
1654 errno = EINVAL;
1655 result = MAP_FAILED;
1656 goto leave;
1657 }
1658
1659 devices[index].frame_map_count[buffer_index]++;
1660
1661 result = devices[index].convert_mmap_buf +
1662 buffer_index * devices[index].convert_mmap_frame_size;
1663
1664 V4L2_LOG("Fake (conversion) mmap buf %u, seen by app at: %p\n",
1665 buffer_index, result);
1666
1667 leave:
1668 pthread_mutex_unlock(&devices[index].stream_lock);
1669
1670 return result;
1671 }
1672
v4l2_munmap(void * _start,size_t length)1673 int v4l2_munmap(void *_start, size_t length)
1674 {
1675 int index;
1676 unsigned int buffer_index;
1677 unsigned char *start = _start;
1678
1679 /* Is this memory ours? */
1680 if (start != MAP_FAILED) {
1681 for (index = 0; index < devices_used; index++)
1682 if (devices[index].fd != -1 &&
1683 devices[index].convert_mmap_buf != MAP_FAILED &&
1684 length == devices[index].convert_mmap_frame_size &&
1685 start >= devices[index].convert_mmap_buf &&
1686 (start - devices[index].convert_mmap_buf) % length == 0)
1687 break;
1688
1689 if (index != devices_used) {
1690 int unmapped = 0;
1691
1692 pthread_mutex_lock(&devices[index].stream_lock);
1693
1694 buffer_index = (start - devices[index].convert_mmap_buf) / length;
1695
1696 /* Re-do our checks now that we have the lock, things may have changed */
1697 if (devices[index].convert_mmap_buf != MAP_FAILED &&
1698 length == devices[index].convert_mmap_frame_size &&
1699 start >= devices[index].convert_mmap_buf &&
1700 (start - devices[index].convert_mmap_buf) % length == 0 &&
1701 buffer_index < devices[index].no_frames) {
1702 if (devices[index].frame_map_count[buffer_index] > 0)
1703 devices[index].frame_map_count[buffer_index]--;
1704 unmapped = 1;
1705 }
1706
1707 pthread_mutex_unlock(&devices[index].stream_lock);
1708
1709 if (unmapped) {
1710 V4L2_LOG("v4l2 fake buffer munmap %p, %d\n", start, (int)length);
1711 return 0;
1712 }
1713 }
1714 }
1715
1716 V4L2_LOG("v4l2 unknown munmap %p, %d\n", start, (int)length);
1717
1718 return SYS_MUNMAP(_start, length);
1719 }
1720
1721 /* Misc utility functions */
v4l2_set_control(int fd,int cid,int value)1722 int v4l2_set_control(int fd, int cid, int value)
1723 {
1724 struct v4l2_queryctrl qctrl = { .id = cid };
1725 struct v4l2_control ctrl = { .id = cid };
1726 int index, result;
1727
1728 index = v4l2_get_index(fd);
1729 if (index == -1 || devices[index].convert == NULL) {
1730 V4L2_LOG_ERR("v4l2_set_control called with invalid fd: %d\n", fd);
1731 errno = EBADF;
1732 return -1;
1733 }
1734
1735 result = v4lconvert_vidioc_queryctrl(devices[index].convert, &qctrl);
1736 if (result)
1737 return result;
1738
1739 if (!(qctrl.flags & V4L2_CTRL_FLAG_DISABLED) &&
1740 !(qctrl.flags & V4L2_CTRL_FLAG_GRABBED)) {
1741 if (qctrl.type == V4L2_CTRL_TYPE_BOOLEAN)
1742 ctrl.value = value ? 1 : 0;
1743 else
1744 ctrl.value = ((long long) value * (qctrl.maximum - qctrl.minimum) + 32767) / 65535 +
1745 qctrl.minimum;
1746
1747 result = v4lconvert_vidioc_s_ctrl(devices[index].convert, &ctrl);
1748 }
1749
1750 return result;
1751 }
1752
v4l2_get_control(int fd,int cid)1753 int v4l2_get_control(int fd, int cid)
1754 {
1755 struct v4l2_queryctrl qctrl = { .id = cid };
1756 struct v4l2_control ctrl = { .id = cid };
1757 int index = v4l2_get_index(fd);
1758
1759 if (index == -1 || devices[index].convert == NULL) {
1760 V4L2_LOG_ERR("v4l2_set_control called with invalid fd: %d\n", fd);
1761 errno = EBADF;
1762 return -1;
1763 }
1764
1765 if (v4lconvert_vidioc_queryctrl(devices[index].convert, &qctrl))
1766 return -1;
1767
1768 if (qctrl.flags & V4L2_CTRL_FLAG_DISABLED) {
1769 errno = EINVAL;
1770 return -1;
1771 }
1772
1773 if (v4lconvert_vidioc_g_ctrl(devices[index].convert, &ctrl))
1774 return -1;
1775
1776 return (((long long) ctrl.value - qctrl.minimum) * 65535 +
1777 (qctrl.maximum - qctrl.minimum) / 2) /
1778 (qctrl.maximum - qctrl.minimum);
1779 }
1780