• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Collabora Ltd.
3  *     Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Library General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Library General Public License for more details.
14  *
15  * You should have received a copy of the GNU Library General Public
16  * License along with this library; if not, write to the
17  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18  * Boston, MA 02110-1301, USA.
19  *
20  */
21 
22 #include "config.h"
23 
24 #ifndef _GNU_SOURCE
25 # define _GNU_SOURCE            /* O_CLOEXEC */
26 #endif
27 
28 #include "ext/videodev2.h"
29 
30 #include "gstv4l2object.h"
31 #include "gstv4l2allocator.h"
32 
33 #include <gst/allocators/gstdmabuf.h>
34 
35 #include <fcntl.h>
36 #include <string.h>
37 #include <sys/stat.h>
38 #include <sys/types.h>
39 #include <sys/mman.h>
40 #include <unistd.h>
41 
42 #define GST_V4L2_MEMORY_TYPE "V4l2Memory"
43 
44 #define gst_v4l2_allocator_parent_class parent_class
45 G_DEFINE_TYPE (GstV4l2Allocator, gst_v4l2_allocator, GST_TYPE_ALLOCATOR);
46 
47 GST_DEBUG_CATEGORY_STATIC (v4l2allocator_debug);
48 #define GST_CAT_DEFAULT v4l2allocator_debug
49 
50 #define UNSET_QUEUED(buffer) \
51     ((buffer).flags &= ~(V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
52 
53 #define SET_QUEUED(buffer) ((buffer).flags |= V4L2_BUF_FLAG_QUEUED)
54 
55 #define IS_QUEUED(buffer) \
56     ((buffer).flags & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
57 
58 enum
59 {
60   GROUP_RELEASED,
61   LAST_SIGNAL
62 };
63 
64 static guint gst_v4l2_allocator_signals[LAST_SIGNAL] = { 0 };
65 
66 static void gst_v4l2_allocator_release (GstV4l2Allocator * allocator,
67     GstV4l2Memory * mem);
68 
69 static const gchar *
memory_type_to_str(guint32 memory)70 memory_type_to_str (guint32 memory)
71 {
72   switch (memory) {
73     case V4L2_MEMORY_MMAP:
74       return "mmap";
75     case V4L2_MEMORY_USERPTR:
76       return "userptr";
77     case V4L2_MEMORY_DMABUF:
78       return "dmabuf";
79     default:
80       return "unknown";
81   }
82 }
83 
84 /*************************************/
85 /* GstV4lMemory implementation */
86 /*************************************/
87 
88 static gpointer
_v4l2mem_map(GstV4l2Memory * mem,gsize maxsize,GstMapFlags flags)89 _v4l2mem_map (GstV4l2Memory * mem, gsize maxsize, GstMapFlags flags)
90 {
91   gpointer data = NULL;
92 
93   switch (mem->group->buffer.memory) {
94     case V4L2_MEMORY_MMAP:
95     case V4L2_MEMORY_USERPTR:
96       data = mem->data;
97       break;
98     case V4L2_MEMORY_DMABUF:
99       /* v4l2 dmabuf memory are not shared with downstream */
100       g_assert_not_reached ();
101       break;
102     default:
103       GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
104       break;
105   }
106   return data;
107 }
108 
109 static gboolean
_v4l2mem_unmap(GstV4l2Memory * mem)110 _v4l2mem_unmap (GstV4l2Memory * mem)
111 {
112   gboolean ret = FALSE;
113 
114   switch (mem->group->buffer.memory) {
115     case V4L2_MEMORY_MMAP:
116     case V4L2_MEMORY_USERPTR:
117       ret = TRUE;
118       break;
119     case V4L2_MEMORY_DMABUF:
120       /* v4l2 dmabuf memory are not share with downstream */
121       g_assert_not_reached ();
122       break;
123     default:
124       GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
125       break;
126   }
127   return ret;
128 }
129 
130 static gboolean
_v4l2mem_dispose(GstV4l2Memory * mem)131 _v4l2mem_dispose (GstV4l2Memory * mem)
132 {
133   GstV4l2Allocator *allocator = (GstV4l2Allocator *) mem->mem.allocator;
134   GstV4l2MemoryGroup *group = mem->group;
135   gboolean ret;
136 
137   if (group->mem[mem->plane]) {
138     /* We may have a dmabuf, replace it with returned original memory */
139     group->mem[mem->plane] = gst_memory_ref ((GstMemory *) mem);
140     gst_v4l2_allocator_release (allocator, mem);
141     ret = FALSE;
142   } else {
143     gst_object_ref (allocator);
144     ret = TRUE;
145   }
146 
147   return ret;
148 }
149 
150 static inline GstV4l2Memory *
_v4l2mem_new(GstMemoryFlags flags,GstAllocator * allocator,GstMemory * parent,gsize maxsize,gsize align,gsize offset,gsize size,gint plane,gpointer data,int dmafd,GstV4l2MemoryGroup * group)151 _v4l2mem_new (GstMemoryFlags flags, GstAllocator * allocator,
152     GstMemory * parent, gsize maxsize, gsize align, gsize offset, gsize size,
153     gint plane, gpointer data, int dmafd, GstV4l2MemoryGroup * group)
154 {
155   GstV4l2Memory *mem;
156 
157   mem = g_slice_new0 (GstV4l2Memory);
158   gst_memory_init (GST_MEMORY_CAST (mem),
159       flags, allocator, parent, maxsize, align, offset, size);
160 
161   if (parent == NULL)
162     mem->mem.mini_object.dispose =
163         (GstMiniObjectDisposeFunction) _v4l2mem_dispose;
164 
165   mem->plane = plane;
166   mem->data = data;
167   mem->dmafd = dmafd;
168   mem->group = group;
169 
170   return mem;
171 }
172 
173 static GstV4l2Memory *
_v4l2mem_share(GstV4l2Memory * mem,gssize offset,gsize size)174 _v4l2mem_share (GstV4l2Memory * mem, gssize offset, gsize size)
175 {
176   GstV4l2Memory *sub;
177   GstMemory *parent;
178 
179   /* find the real parent */
180   if ((parent = mem->mem.parent) == NULL)
181     parent = (GstMemory *) mem;
182 
183   if (size == -1)
184     size = mem->mem.size - offset;
185 
186   /* the shared memory is always readonly */
187   sub = _v4l2mem_new (GST_MINI_OBJECT_FLAGS (parent) |
188       GST_MINI_OBJECT_FLAG_LOCK_READONLY, mem->mem.allocator, parent,
189       mem->mem.maxsize, mem->mem.align, offset, size, mem->plane, mem->data,
190       -1, mem->group);
191 
192   return sub;
193 }
194 
195 static gboolean
_v4l2mem_is_span(GstV4l2Memory * mem1,GstV4l2Memory * mem2,gsize * offset)196 _v4l2mem_is_span (GstV4l2Memory * mem1, GstV4l2Memory * mem2, gsize * offset)
197 {
198   if (offset)
199     *offset = mem1->mem.offset - mem1->mem.parent->offset;
200 
201   /* and memory is contiguous */
202   return mem1->mem.offset + mem1->mem.size == mem2->mem.offset;
203 }
204 
205 gboolean
gst_is_v4l2_memory(GstMemory * mem)206 gst_is_v4l2_memory (GstMemory * mem)
207 {
208   return gst_memory_is_type (mem, GST_V4L2_MEMORY_TYPE);
209 }
210 
211 GQuark
gst_v4l2_memory_quark(void)212 gst_v4l2_memory_quark (void)
213 {
214   static GQuark quark = 0;
215 
216   if (quark == 0)
217     quark = g_quark_from_string ("GstV4l2Memory");
218 
219   return quark;
220 }
221 
222 
223 /*************************************/
224 /* GstV4l2MemoryGroup implementation */
225 /*************************************/
226 
227 static void
gst_v4l2_memory_group_free(GstV4l2MemoryGroup * group)228 gst_v4l2_memory_group_free (GstV4l2MemoryGroup * group)
229 {
230   gint i;
231 
232   for (i = 0; i < group->n_mem; i++) {
233     GstMemory *mem = group->mem[i];
234     group->mem[i] = NULL;
235     if (mem)
236       gst_memory_unref (mem);
237   }
238 
239   g_slice_free (GstV4l2MemoryGroup, group);
240 }
241 
242 static GstV4l2MemoryGroup *
gst_v4l2_memory_group_new(GstV4l2Allocator * allocator,guint32 index)243 gst_v4l2_memory_group_new (GstV4l2Allocator * allocator, guint32 index)
244 {
245   GstV4l2Object *obj = allocator->obj;
246   guint32 memory = allocator->memory;
247   struct v4l2_format *format = &obj->format;
248   GstV4l2MemoryGroup *group;
249   gsize img_size, buf_size;
250 
251   group = g_slice_new0 (GstV4l2MemoryGroup);
252 
253   group->buffer.type = format->type;
254   group->buffer.index = index;
255   group->buffer.memory = memory;
256 
257   if (V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
258     group->n_mem = group->buffer.length = format->fmt.pix_mp.num_planes;
259     group->buffer.m.planes = group->planes;
260   } else {
261     group->n_mem = 1;
262   }
263 
264   if (obj->ioctl (obj->video_fd, VIDIOC_QUERYBUF, &group->buffer) < 0)
265     goto querybuf_failed;
266 
267   if (group->buffer.index != index) {
268     GST_ERROR_OBJECT (allocator, "Buffer index returned by VIDIOC_QUERYBUF "
269         "didn't match, this indicate the presence of a bug in your driver or "
270         "libv4l2");
271     g_slice_free (GstV4l2MemoryGroup, group);
272     return NULL;
273   }
274 
275   /* Check that provided size matches the format we have negotiation. Failing
276    * there usually means a driver of libv4l bug. */
277   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
278     gint i;
279 
280     for (i = 0; i < group->n_mem; i++) {
281       img_size = obj->format.fmt.pix_mp.plane_fmt[i].sizeimage;
282       buf_size = group->planes[i].length;
283       if (buf_size < img_size)
284         goto buffer_too_short;
285     }
286   } else {
287     img_size = obj->format.fmt.pix.sizeimage;
288     buf_size = group->buffer.length;
289     if (buf_size < img_size)
290       goto buffer_too_short;
291   }
292 
293   /* We save non planar buffer information into the multi-planar plane array
294    * to avoid duplicating the code later */
295   if (!V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
296     group->planes[0].bytesused = group->buffer.bytesused;
297     group->planes[0].length = group->buffer.length;
298     group->planes[0].data_offset = 0;
299     g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
300     memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
301   }
302 
303   GST_LOG_OBJECT (allocator, "Got %s buffer", memory_type_to_str (memory));
304   GST_LOG_OBJECT (allocator, "  index:     %u", group->buffer.index);
305   GST_LOG_OBJECT (allocator, "  type:      %d", group->buffer.type);
306   GST_LOG_OBJECT (allocator, "  flags:     %08x", group->buffer.flags);
307   GST_LOG_OBJECT (allocator, "  field:     %d", group->buffer.field);
308   GST_LOG_OBJECT (allocator, "  memory:    %d", group->buffer.memory);
309   GST_LOG_OBJECT (allocator, "  planes:    %d", group->n_mem);
310 
311 #ifndef GST_DISABLE_GST_DEBUG
312   if (memory == V4L2_MEMORY_MMAP) {
313     gint i;
314     for (i = 0; i < group->n_mem; i++) {
315       GST_LOG_OBJECT (allocator,
316           "  [%u] bytesused: %u, length: %u, offset: %u", i,
317           group->planes[i].bytesused, group->planes[i].length,
318           group->planes[i].data_offset);
319       GST_LOG_OBJECT (allocator, "  [%u] MMAP offset:  %u", i,
320           group->planes[i].m.mem_offset);
321     }
322   }
323 #endif
324 
325   return group;
326 
327 querybuf_failed:
328   {
329     GST_ERROR ("error querying buffer %d: %s", index, g_strerror (errno));
330     goto failed;
331   }
332 buffer_too_short:
333   {
334     GST_ERROR ("buffer size %" G_GSIZE_FORMAT
335         " is smaller then negotiated size %" G_GSIZE_FORMAT
336         ", this is usually the result of a bug in the v4l2 driver or libv4l.",
337         buf_size, img_size);
338     goto failed;
339   }
340 failed:
341   gst_v4l2_memory_group_free (group);
342   return NULL;
343 }
344 
345 
346 /*************************************/
347 /* GstV4lAllocator implementation    */
348 /*************************************/
349 
350 static void
gst_v4l2_allocator_release(GstV4l2Allocator * allocator,GstV4l2Memory * mem)351 gst_v4l2_allocator_release (GstV4l2Allocator * allocator, GstV4l2Memory * mem)
352 {
353   GstV4l2MemoryGroup *group = mem->group;
354 
355   GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
356       mem->plane, group->buffer.index);
357 
358   switch (allocator->memory) {
359     case V4L2_MEMORY_DMABUF:
360       close (mem->dmafd);
361       mem->dmafd = -1;
362       break;
363     case V4L2_MEMORY_USERPTR:
364       mem->data = NULL;
365       break;
366     default:
367       break;
368   }
369 
370   /* When all memory are back, put the group back in the free queue */
371   if (g_atomic_int_dec_and_test (&group->mems_allocated)) {
372     GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
373     gst_atomic_queue_push (allocator->free_queue, group);
374     g_signal_emit (allocator, gst_v4l2_allocator_signals[GROUP_RELEASED], 0);
375   }
376 
377   /* Keep last, allocator may be freed after this call */
378   g_object_unref (allocator);
379 }
380 
381 static void
gst_v4l2_allocator_free(GstAllocator * gallocator,GstMemory * gmem)382 gst_v4l2_allocator_free (GstAllocator * gallocator, GstMemory * gmem)
383 {
384   GstV4l2Allocator *allocator = (GstV4l2Allocator *) gallocator;
385   GstV4l2Object *obj = allocator->obj;
386   GstV4l2Memory *mem = (GstV4l2Memory *) gmem;
387   GstV4l2MemoryGroup *group = mem->group;
388 
389   /* Only free unparented memory */
390   if (mem->mem.parent == NULL) {
391     GST_LOG_OBJECT (allocator, "freeing plane %i of buffer %u",
392         mem->plane, group->buffer.index);
393 
394     if (allocator->memory == V4L2_MEMORY_MMAP) {
395       if (mem->data)
396         obj->munmap (mem->data, group->planes[mem->plane].length);
397     }
398 
399     /* This apply for both mmap with expbuf, and dmabuf imported memory */
400     if (mem->dmafd >= 0)
401       close (mem->dmafd);
402   }
403 
404   g_slice_free (GstV4l2Memory, mem);
405 }
406 
407 static void
gst_v4l2_allocator_dispose(GObject * obj)408 gst_v4l2_allocator_dispose (GObject * obj)
409 {
410   GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
411   gint i;
412 
413   GST_LOG_OBJECT (obj, "called");
414 
415   for (i = 0; i < allocator->count; i++) {
416     GstV4l2MemoryGroup *group = allocator->groups[i];
417     allocator->groups[i] = NULL;
418     if (group)
419       gst_v4l2_memory_group_free (group);
420   }
421 
422   G_OBJECT_CLASS (parent_class)->dispose (obj);
423 }
424 
425 static void
gst_v4l2_allocator_finalize(GObject * obj)426 gst_v4l2_allocator_finalize (GObject * obj)
427 {
428   GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
429 
430   GST_LOG_OBJECT (obj, "called");
431 
432   gst_atomic_queue_unref (allocator->free_queue);
433   gst_object_unref (allocator->obj->element);
434 
435   G_OBJECT_CLASS (parent_class)->finalize (obj);
436 }
437 
438 static void
gst_v4l2_allocator_class_init(GstV4l2AllocatorClass * klass)439 gst_v4l2_allocator_class_init (GstV4l2AllocatorClass * klass)
440 {
441   GObjectClass *object_class;
442   GstAllocatorClass *allocator_class;
443 
444   allocator_class = (GstAllocatorClass *) klass;
445   object_class = (GObjectClass *) klass;
446 
447   allocator_class->alloc = NULL;
448   allocator_class->free = gst_v4l2_allocator_free;
449 
450   object_class->dispose = gst_v4l2_allocator_dispose;
451   object_class->finalize = gst_v4l2_allocator_finalize;
452 
453   gst_v4l2_allocator_signals[GROUP_RELEASED] = g_signal_new ("group-released",
454       G_TYPE_FROM_CLASS (object_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, NULL,
455       G_TYPE_NONE, 0);
456 
457   GST_DEBUG_CATEGORY_INIT (v4l2allocator_debug, "v4l2allocator", 0,
458       "V4L2 Allocator");
459 }
460 
461 static void
gst_v4l2_allocator_init(GstV4l2Allocator * allocator)462 gst_v4l2_allocator_init (GstV4l2Allocator * allocator)
463 {
464   GstAllocator *alloc = GST_ALLOCATOR_CAST (allocator);
465 
466   alloc->mem_type = GST_V4L2_MEMORY_TYPE;
467   alloc->mem_map = (GstMemoryMapFunction) _v4l2mem_map;
468   alloc->mem_unmap = (GstMemoryUnmapFunction) _v4l2mem_unmap;
469   alloc->mem_share = (GstMemoryShareFunction) _v4l2mem_share;
470   alloc->mem_is_span = (GstMemoryIsSpanFunction) _v4l2mem_is_span;
471   /* Use the default, fallback copy function */
472 
473   allocator->free_queue = gst_atomic_queue_new (VIDEO_MAX_FRAME);
474 
475   GST_OBJECT_FLAG_SET (allocator, GST_ALLOCATOR_FLAG_CUSTOM_ALLOC);
476 }
477 
478 #define GST_V4L2_ALLOCATOR_PROBE(obj,type) \
479     gst_v4l2_allocator_probe ((obj), V4L2_MEMORY_ ## type, \
480         GST_V4L2_ALLOCATOR_FLAG_ ## type ## _REQBUFS, \
481         GST_V4L2_ALLOCATOR_FLAG_ ## type ## _CREATE_BUFS)
482 static guint32
gst_v4l2_allocator_probe(GstV4l2Allocator * allocator,guint32 memory,guint32 breq_flag,guint32 bcreate_flag)483 gst_v4l2_allocator_probe (GstV4l2Allocator * allocator, guint32 memory,
484     guint32 breq_flag, guint32 bcreate_flag)
485 {
486   GstV4l2Object *obj = allocator->obj;
487   struct v4l2_requestbuffers breq = { 0 };
488   guint32 flags = 0;
489 
490   breq.type = obj->type;
491   breq.count = 0;
492   breq.memory = memory;
493 
494   if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) == 0) {
495     struct v4l2_create_buffers bcreate = { 0 };
496 
497     flags |= breq_flag;
498 
499     bcreate.memory = memory;
500     bcreate.format = obj->format;
501 
502     if ((obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) == 0))
503       flags |= bcreate_flag;
504   }
505 
506   if (breq.capabilities & V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS)
507     flags |= GST_V4L2_ALLOCATOR_FLAG_SUPPORTS_ORPHANED_BUFS;
508 
509   return flags;
510 }
511 
512 static GstV4l2MemoryGroup *
gst_v4l2_allocator_create_buf(GstV4l2Allocator * allocator)513 gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
514 {
515   GstV4l2Object *obj = allocator->obj;
516   struct v4l2_create_buffers bcreate = { 0 };
517   GstV4l2MemoryGroup *group = NULL;
518 
519   GST_OBJECT_LOCK (allocator);
520 
521   if (!g_atomic_int_get (&allocator->active))
522     goto done;
523 
524   if (GST_V4L2_ALLOCATOR_IS_ORPHANED (allocator))
525     goto orphaned_bug;
526 
527   bcreate.memory = allocator->memory;
528   bcreate.format = obj->format;
529   bcreate.count = 1;
530 
531   if (!allocator->can_allocate)
532     goto done;
533 
534   if (obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
535     goto create_bufs_failed;
536 
537   if (allocator->groups[bcreate.index] != NULL)
538     goto create_bufs_bug;
539 
540   group = gst_v4l2_memory_group_new (allocator, bcreate.index);
541 
542   if (group) {
543     allocator->groups[bcreate.index] = group;
544     allocator->count++;
545   }
546 
547 done:
548   GST_OBJECT_UNLOCK (allocator);
549   return group;
550 
551 orphaned_bug:
552   {
553     GST_ERROR_OBJECT (allocator, "allocator was orphaned, "
554         "not creating new buffers");
555     goto done;
556   }
557 create_bufs_failed:
558   {
559     GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
560         g_strerror (errno));
561     goto done;
562   }
563 create_bufs_bug:
564   {
565     GST_ERROR_OBJECT (allocator, "created buffer has already used buffer "
566         "index %i, this means there is an bug in your driver or libv4l2",
567         bcreate.index);
568     goto done;
569   }
570 }
571 
572 static GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc(GstV4l2Allocator * allocator)573 gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
574 {
575   GstV4l2MemoryGroup *group;
576 
577   if (!g_atomic_int_get (&allocator->active))
578     return NULL;
579 
580   group = gst_atomic_queue_pop (allocator->free_queue);
581 
582   if (group == NULL) {
583     if (allocator->can_allocate) {
584       group = gst_v4l2_allocator_create_buf (allocator);
585 
586       /* Don't hammer on CREATE_BUFS */
587       if (group == NULL)
588         allocator->can_allocate = FALSE;
589     }
590   }
591 
592   return group;
593 }
594 
595 static void
gst_v4l2_allocator_reset_size(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)596 gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
597     GstV4l2MemoryGroup * group)
598 {
599   gint i;
600   for (i = 0; i < group->n_mem; i++) {
601     group->mem[i]->maxsize = group->planes[i].length;
602     group->mem[i]->offset = 0;
603     group->mem[i]->size = group->planes[i].length;
604   }
605 }
606 
607 static void
_cleanup_failed_alloc(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)608 _cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
609 {
610   if (group->mems_allocated > 0) {
611     gint i;
612     /* If one or more mmap worked, we need to unref the memory, otherwise
613      * they will keep a ref on the allocator and leak it. This will put back
614      * the group into the free_queue */
615     for (i = 0; i < group->n_mem; i++)
616       gst_memory_unref (group->mem[i]);
617   } else {
618     /* Otherwise, group has to be on free queue for _stop() to work */
619     gst_atomic_queue_push (allocator->free_queue, group);
620   }
621 }
622 
623 
624 
625 GstV4l2Allocator *
gst_v4l2_allocator_new(GstObject * parent,GstV4l2Object * v4l2object)626 gst_v4l2_allocator_new (GstObject * parent, GstV4l2Object * v4l2object)
627 {
628   GstV4l2Allocator *allocator;
629   guint32 flags = 0;
630   gchar *name, *parent_name;
631 
632   parent_name = gst_object_get_name (parent);
633   name = g_strconcat (parent_name, ":allocator", NULL);
634   g_free (parent_name);
635 
636   allocator = g_object_new (GST_TYPE_V4L2_ALLOCATOR, "name", name, NULL);
637   gst_object_ref_sink (allocator);
638   g_free (name);
639 
640   /* Save everything */
641   allocator->obj = v4l2object;
642 
643   /* Keep a ref on the element so obj does not disappear */
644   gst_object_ref (allocator->obj->element);
645 
646   flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, MMAP);
647   flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, USERPTR);
648   flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, DMABUF);
649 
650 
651   if (flags == 0) {
652     /* Drivers not ported from videobuf to videbuf2 don't allow freeing buffers
653      * using REQBUFS(0). This is a workaround to still support these drivers,
654      * which are known to have MMAP support. */
655     GST_WARNING_OBJECT (allocator, "Could not probe supported memory type, "
656         "assuming MMAP is supported, this is expected for older drivers not "
657         " yet ported to videobuf2 framework");
658     flags = GST_V4L2_ALLOCATOR_FLAG_MMAP_REQBUFS;
659   }
660 
661   GST_OBJECT_FLAG_SET (allocator, flags);
662 
663   return allocator;
664 }
665 
666 guint
gst_v4l2_allocator_start(GstV4l2Allocator * allocator,guint32 count,guint32 memory)667 gst_v4l2_allocator_start (GstV4l2Allocator * allocator, guint32 count,
668     guint32 memory)
669 {
670   GstV4l2Object *obj = allocator->obj;
671   struct v4l2_requestbuffers breq = { count, obj->type, memory };
672   gboolean can_allocate;
673   gint i;
674 
675   g_return_val_if_fail (count != 0, 0);
676 
677   GST_OBJECT_LOCK (allocator);
678 
679   if (g_atomic_int_get (&allocator->active))
680     goto already_active;
681 
682   if (GST_V4L2_ALLOCATOR_IS_ORPHANED (allocator))
683     goto orphaned;
684 
685   if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
686     goto reqbufs_failed;
687 
688   if (breq.count < 1)
689     goto out_of_memory;
690 
691   switch (memory) {
692     case V4L2_MEMORY_MMAP:
693       can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, MMAP);
694       break;
695     case V4L2_MEMORY_USERPTR:
696       can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, USERPTR);
697       break;
698     case V4L2_MEMORY_DMABUF:
699       can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, DMABUF);
700       break;
701     default:
702       can_allocate = FALSE;
703       break;
704   }
705 
706   GST_DEBUG_OBJECT (allocator, "allocated %u %s buffers out of %u requested",
707       breq.count, memory_type_to_str (memory), count);
708 
709   allocator->can_allocate = can_allocate;
710   allocator->count = breq.count;
711   allocator->memory = memory;
712 
713   /* Create memory groups */
714   for (i = 0; i < allocator->count; i++) {
715     allocator->groups[i] = gst_v4l2_memory_group_new (allocator, i);
716     if (allocator->groups[i] == NULL)
717       goto error;
718 
719     gst_atomic_queue_push (allocator->free_queue, allocator->groups[i]);
720   }
721 
722   g_atomic_int_set (&allocator->active, TRUE);
723 
724 done:
725   GST_OBJECT_UNLOCK (allocator);
726   return breq.count;
727 
728 already_active:
729   {
730     GST_ERROR_OBJECT (allocator, "allocator already active");
731     goto error;
732   }
733 orphaned:
734   {
735     GST_ERROR_OBJECT (allocator, "allocator was orphaned");
736     goto error;
737   }
738 reqbufs_failed:
739   {
740     GST_ERROR_OBJECT (allocator,
741         "error requesting %d buffers: %s", count, g_strerror (errno));
742     goto error;
743   }
744 out_of_memory:
745   {
746     GST_ERROR_OBJECT (allocator, "Not enough memory to allocate buffers");
747     goto error;
748   }
749 error:
750   {
751     breq.count = 0;
752     goto done;
753   }
754 }
755 
756 GstV4l2Return
gst_v4l2_allocator_stop(GstV4l2Allocator * allocator)757 gst_v4l2_allocator_stop (GstV4l2Allocator * allocator)
758 {
759   GstV4l2Object *obj = allocator->obj;
760   struct v4l2_requestbuffers breq = { 0, obj->type, allocator->memory };
761   gint i = 0;
762   GstV4l2Return ret = GST_V4L2_OK;
763 
764   GST_DEBUG_OBJECT (allocator, "stop allocator");
765 
766   GST_OBJECT_LOCK (allocator);
767 
768   if (!g_atomic_int_get (&allocator->active))
769     goto done;
770 
771   if (gst_atomic_queue_length (allocator->free_queue) != allocator->count) {
772     GST_DEBUG_OBJECT (allocator, "allocator is still in use");
773     ret = GST_V4L2_BUSY;
774     goto done;
775   }
776 
777   while (gst_atomic_queue_pop (allocator->free_queue)) {
778     /* nothing */
779   };
780 
781   for (i = 0; i < allocator->count; i++) {
782     GstV4l2MemoryGroup *group = allocator->groups[i];
783     allocator->groups[i] = NULL;
784     if (group)
785       gst_v4l2_memory_group_free (group);
786   }
787 
788   if (!GST_V4L2_ALLOCATOR_IS_ORPHANED (allocator)) {
789     /* Not all drivers support rebufs(0), so warn only */
790     if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
791       GST_WARNING_OBJECT (allocator,
792           "error releasing buffers buffers: %s", g_strerror (errno));
793   }
794 
795   allocator->count = 0;
796 
797   g_atomic_int_set (&allocator->active, FALSE);
798 
799 done:
800   GST_OBJECT_UNLOCK (allocator);
801   return ret;
802 }
803 
804 gboolean
gst_v4l2_allocator_orphan(GstV4l2Allocator * allocator)805 gst_v4l2_allocator_orphan (GstV4l2Allocator * allocator)
806 {
807   GstV4l2Object *obj = allocator->obj;
808   struct v4l2_requestbuffers breq = { 0, obj->type, allocator->memory };
809 
810   if (!GST_V4L2_ALLOCATOR_CAN_ORPHAN_BUFS (allocator))
811     return FALSE;
812 
813   GST_OBJECT_FLAG_SET (allocator, GST_V4L2_ALLOCATOR_FLAG_ORPHANED);
814 
815   if (!g_atomic_int_get (&allocator->active))
816     return TRUE;
817 
818   if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0) {
819     GST_ERROR_OBJECT (allocator,
820         "error orphaning buffers buffers: %s", g_strerror (errno));
821     return FALSE;
822   }
823 
824   return TRUE;
825 }
826 
827 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_mmap(GstV4l2Allocator * allocator)828 gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
829 {
830   GstV4l2Object *obj = allocator->obj;
831   GstV4l2MemoryGroup *group;
832   gint i;
833 
834   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
835 
836   group = gst_v4l2_allocator_alloc (allocator);
837 
838   if (group == NULL)
839     return NULL;
840 
841   for (i = 0; i < group->n_mem; i++) {
842     if (group->mem[i] == NULL) {
843       gpointer data;
844       data = obj->mmap (NULL, group->planes[i].length, PROT_READ | PROT_WRITE,
845           MAP_SHARED, obj->video_fd, group->planes[i].m.mem_offset);
846 
847       if (data == MAP_FAILED)
848         goto mmap_failed;
849 
850       GST_LOG_OBJECT (allocator,
851           "mmap buffer length %d, data offset %d, plane %d",
852           group->planes[i].length, group->planes[i].data_offset, i);
853 
854       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
855           NULL, group->planes[i].length, 0, 0, group->planes[i].length, i, data,
856           -1, group);
857     } else {
858       /* Take back the allocator reference */
859       gst_object_ref (allocator);
860     }
861 
862     group->mems_allocated++;
863   }
864 
865   /* Ensure group size. Unlike GST, v4l2 have size (bytesused) initially set
866    * to 0. As length might be bigger then the expected size exposed in the
867    * format, we simply set bytesused initially and reset it here for
868    * simplicity */
869   gst_v4l2_allocator_reset_size (allocator, group);
870 
871   return group;
872 
873 mmap_failed:
874   {
875     GST_ERROR_OBJECT (allocator, "Failed to mmap buffer: %s",
876         g_strerror (errno));
877     _cleanup_failed_alloc (allocator, group);
878     return NULL;
879   }
880 }
881 
882 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabuf(GstV4l2Allocator * allocator,GstAllocator * dmabuf_allocator)883 gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator,
884     GstAllocator * dmabuf_allocator)
885 {
886   GstV4l2Object *obj = allocator->obj;
887   GstV4l2MemoryGroup *group;
888   gint i;
889 
890   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
891 
892   group = gst_v4l2_allocator_alloc (allocator);
893 
894   if (group == NULL)
895     return NULL;
896 
897   for (i = 0; i < group->n_mem; i++) {
898     GstV4l2Memory *mem;
899     GstMemory *dma_mem;
900 
901     if (group->mem[i] == NULL) {
902       struct v4l2_exportbuffer expbuf = { 0 };
903 
904       expbuf.type = obj->type;
905       expbuf.index = group->buffer.index;
906       expbuf.plane = i;
907       expbuf.flags = O_CLOEXEC | O_RDWR;
908 
909       if (obj->ioctl (obj->video_fd, VIDIOC_EXPBUF, &expbuf) < 0)
910         goto expbuf_failed;
911 
912       GST_LOG_OBJECT (allocator, "exported DMABUF as fd %i plane %d",
913           expbuf.fd, i);
914 
915       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
916           NULL, group->planes[i].length, 0, group->planes[i].data_offset,
917           group->planes[i].length - group->planes[i].data_offset, i, NULL,
918           expbuf.fd, group);
919     } else {
920       /* Take back the allocator reference */
921       gst_object_ref (allocator);
922     }
923 
924     group->mems_allocated++;
925 
926     g_assert (gst_is_v4l2_memory (group->mem[i]));
927     mem = (GstV4l2Memory *) group->mem[i];
928 
929     dma_mem = gst_fd_allocator_alloc (dmabuf_allocator, mem->dmafd,
930         group->planes[i].length, GST_FD_MEMORY_FLAG_DONT_CLOSE);
931     gst_memory_resize (dma_mem, group->planes[i].data_offset,
932         group->planes[i].length - group->planes[i].data_offset);
933 
934     gst_mini_object_set_qdata (GST_MINI_OBJECT (dma_mem),
935         GST_V4L2_MEMORY_QUARK, mem, (GDestroyNotify) gst_memory_unref);
936 
937     group->mem[i] = dma_mem;
938   }
939 
940   gst_v4l2_allocator_reset_size (allocator, group);
941 
942   return group;
943 
944 expbuf_failed:
945   {
946     GST_ERROR_OBJECT (allocator, "Failed to export DMABUF: %s",
947         g_strerror (errno));
948     goto cleanup;
949   }
950 cleanup:
951   {
952     _cleanup_failed_alloc (allocator, group);
953     return NULL;
954   }
955 }
956 
957 static void
gst_v4l2_allocator_clear_dmabufin(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)958 gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
959     GstV4l2MemoryGroup * group)
960 {
961   GstV4l2Object *obj = allocator->obj;
962   GstV4l2Memory *mem;
963   gint i;
964 
965   g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
966 
967   for (i = 0; i < group->n_mem; i++) {
968 
969     mem = (GstV4l2Memory *) group->mem[i];
970 
971     GST_LOG_OBJECT (allocator, "[%i] clearing DMABUF import, fd %i plane %d",
972         group->buffer.index, mem->dmafd, i);
973 
974     /* Update memory */
975     mem->mem.maxsize = 0;
976     mem->mem.offset = 0;
977     mem->mem.size = 0;
978     mem->dmafd = -1;
979 
980     /* Update v4l2 structure */
981     group->planes[i].length = 0;
982     group->planes[i].bytesused = 0;
983     group->planes[i].m.fd = -1;
984     group->planes[i].data_offset = 0;
985   }
986 
987   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
988     group->buffer.bytesused = 0;
989     group->buffer.length = 0;
990     group->buffer.m.fd = -1;
991   }
992 }
993 
994 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabufin(GstV4l2Allocator * allocator)995 gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
996 {
997   GstV4l2MemoryGroup *group;
998   gint i;
999 
1000   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, NULL);
1001 
1002   group = gst_v4l2_allocator_alloc (allocator);
1003 
1004   if (group == NULL)
1005     return NULL;
1006 
1007   GST_LOG_OBJECT (allocator, "allocating empty DMABUF import group");
1008 
1009   for (i = 0; i < group->n_mem; i++) {
1010     if (group->mem[i] == NULL) {
1011       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
1012           NULL, 0, 0, 0, 0, i, NULL, -1, group);
1013     } else {
1014       /* Take back the allocator reference */
1015       gst_object_ref (allocator);
1016     }
1017 
1018     group->mems_allocated++;
1019   }
1020 
1021   gst_v4l2_allocator_clear_dmabufin (allocator, group);
1022 
1023   return group;
1024 }
1025 
1026 static void
gst_v4l2_allocator_clear_userptr(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)1027 gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
1028     GstV4l2MemoryGroup * group)
1029 {
1030   GstV4l2Object *obj = allocator->obj;
1031   GstV4l2Memory *mem;
1032   gint i;
1033 
1034   g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
1035 
1036   for (i = 0; i < group->n_mem; i++) {
1037     mem = (GstV4l2Memory *) group->mem[i];
1038 
1039     GST_LOG_OBJECT (allocator, "[%i] clearing USERPTR %p plane %d size %"
1040         G_GSIZE_FORMAT, group->buffer.index, mem->data, i, mem->mem.size);
1041 
1042     mem->mem.maxsize = 0;
1043     mem->mem.size = 0;
1044     mem->data = NULL;
1045 
1046     group->planes[i].length = 0;
1047     group->planes[i].bytesused = 0;
1048     group->planes[i].m.userptr = 0;
1049   }
1050 
1051   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1052     group->buffer.bytesused = 0;
1053     group->buffer.length = 0;
1054     group->buffer.m.userptr = 0;
1055   }
1056 }
1057 
1058 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_userptr(GstV4l2Allocator * allocator)1059 gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator)
1060 {
1061   GstV4l2MemoryGroup *group;
1062   gint i;
1063 
1064   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, NULL);
1065 
1066   group = gst_v4l2_allocator_alloc (allocator);
1067 
1068   if (group == NULL)
1069     return NULL;
1070 
1071   GST_LOG_OBJECT (allocator, "allocating empty USERPTR group");
1072 
1073   for (i = 0; i < group->n_mem; i++) {
1074 
1075     if (group->mem[i] == NULL) {
1076       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
1077           NULL, 0, 0, 0, 0, i, NULL, -1, group);
1078     } else {
1079       /* Take back the allocator reference */
1080       gst_object_ref (allocator);
1081     }
1082 
1083     group->mems_allocated++;
1084   }
1085 
1086   gst_v4l2_allocator_clear_userptr (allocator, group);
1087 
1088   return group;
1089 }
1090 
1091 gboolean
gst_v4l2_allocator_import_dmabuf(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group,gint n_mem,GstMemory ** dma_mem)1092 gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
1093     GstV4l2MemoryGroup * group, gint n_mem, GstMemory ** dma_mem)
1094 {
1095   GstV4l2Object *obj = allocator->obj;
1096   GstV4l2Memory *mem;
1097   gint i;
1098 
1099   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, FALSE);
1100 
1101   if (group->n_mem != n_mem)
1102     goto n_mem_missmatch;
1103 
1104   for (i = 0; i < group->n_mem; i++) {
1105     gint dmafd;
1106     gsize size, offset, maxsize;
1107 
1108     if (!gst_is_dmabuf_memory (dma_mem[i]))
1109       goto not_dmabuf;
1110 
1111     size = gst_memory_get_sizes (dma_mem[i], &offset, &maxsize);
1112 
1113     dmafd = gst_dmabuf_memory_get_fd (dma_mem[i]);
1114 
1115     GST_LOG_OBJECT (allocator, "[%i] imported DMABUF as fd %i plane %d",
1116         group->buffer.index, dmafd, i);
1117 
1118     mem = (GstV4l2Memory *) group->mem[i];
1119 
1120     /* Update memory */
1121     mem->mem.maxsize = maxsize;
1122     mem->mem.offset = offset;
1123     mem->mem.size = size;
1124     mem->dmafd = dmafd;
1125 
1126     /* Update v4l2 structure */
1127     group->planes[i].length = maxsize;
1128     group->planes[i].bytesused = size + offset;
1129     group->planes[i].m.fd = dmafd;
1130     group->planes[i].data_offset = offset;
1131   }
1132 
1133   /* Copy into buffer structure if not using planes */
1134   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1135     group->buffer.bytesused = group->planes[0].bytesused;
1136     group->buffer.length = group->planes[0].length;
1137     group->buffer.m.fd = group->planes[0].m.userptr;
1138 
1139     /* FIXME Check if data_offset > 0 and fail for non-multi-planar */
1140     g_assert (group->planes[0].data_offset == 0);
1141   } else {
1142     group->buffer.length = group->n_mem;
1143   }
1144 
1145   return TRUE;
1146 
1147 n_mem_missmatch:
1148   {
1149     GST_ERROR_OBJECT (allocator, "Got %i dmabuf but needed %i", n_mem,
1150         group->n_mem);
1151     return FALSE;
1152   }
1153 not_dmabuf:
1154   {
1155     GST_ERROR_OBJECT (allocator, "Memory %i is not of DMABUF", i);
1156     return FALSE;
1157   }
1158 }
1159 
1160 gboolean
gst_v4l2_allocator_import_userptr(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group,gsize img_size,int n_planes,gpointer * data,gsize * size)1161 gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
1162     GstV4l2MemoryGroup * group, gsize img_size, int n_planes,
1163     gpointer * data, gsize * size)
1164 {
1165   GstV4l2Object *obj = allocator->obj;
1166   GstV4l2Memory *mem;
1167   gint i;
1168 
1169   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, FALSE);
1170 
1171   /* TODO Support passing N plane from 1 memory to MPLANE v4l2 format */
1172   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type) && n_planes != group->n_mem)
1173     goto n_mem_missmatch;
1174 
1175   for (i = 0; i < group->n_mem; i++) {
1176     gsize maxsize, psize;
1177 
1178     /* TODO request used size and maxsize separately */
1179     if (V4L2_TYPE_IS_MULTIPLANAR (obj->type))
1180       maxsize = psize = size[i];
1181     else
1182       maxsize = psize = img_size;
1183 
1184     g_assert (psize <= img_size);
1185 
1186     GST_LOG_OBJECT (allocator, "[%i] imported USERPTR %p plane %d size %"
1187         G_GSIZE_FORMAT, group->buffer.index, data[i], i, psize);
1188 
1189     mem = (GstV4l2Memory *) group->mem[i];
1190 
1191     mem->mem.maxsize = maxsize;
1192     mem->mem.size = psize;
1193     mem->data = data[i];
1194 
1195     group->planes[i].length = maxsize;
1196     group->planes[i].bytesused = psize;
1197     group->planes[i].m.userptr = (unsigned long) data[i];
1198     group->planes[i].data_offset = 0;
1199   }
1200 
1201   /* Copy into buffer structure if not using planes */
1202   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1203     group->buffer.bytesused = group->planes[0].bytesused;
1204     group->buffer.length = group->planes[0].length;
1205     group->buffer.m.userptr = group->planes[0].m.userptr;
1206   } else {
1207     group->buffer.length = group->n_mem;
1208   }
1209 
1210   return TRUE;
1211 
1212 n_mem_missmatch:
1213   {
1214     GST_ERROR_OBJECT (allocator, "Got %i userptr plane while driver need %i",
1215         n_planes, group->n_mem);
1216     return FALSE;
1217   }
1218 }
1219 
1220 void
gst_v4l2_allocator_flush(GstV4l2Allocator * allocator)1221 gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)
1222 {
1223   gint i;
1224 
1225   GST_OBJECT_LOCK (allocator);
1226 
1227   if (!g_atomic_int_get (&allocator->active))
1228     goto done;
1229 
1230   for (i = 0; i < allocator->count; i++) {
1231     GstV4l2MemoryGroup *group = allocator->groups[i];
1232     gint n;
1233 
1234     if (IS_QUEUED (group->buffer)) {
1235       UNSET_QUEUED (group->buffer);
1236 
1237       gst_v4l2_allocator_reset_group (allocator, group);
1238 
1239       for (n = 0; n < group->n_mem; n++)
1240         gst_memory_unref (group->mem[n]);
1241     }
1242   }
1243 
1244 done:
1245   GST_OBJECT_UNLOCK (allocator);
1246 }
1247 
1248 gboolean
gst_v4l2_allocator_qbuf(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)1249 gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
1250     GstV4l2MemoryGroup * group)
1251 {
1252   GstV4l2Object *obj = allocator->obj;
1253   gboolean ret = TRUE;
1254   gint i;
1255 
1256   g_return_val_if_fail (g_atomic_int_get (&allocator->active), FALSE);
1257 
1258   /* update sizes */
1259   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1260     for (i = 0; i < group->n_mem; i++)
1261       group->planes[i].bytesused =
1262           gst_memory_get_sizes (group->mem[i], NULL, NULL);
1263   } else {
1264     group->buffer.bytesused = gst_memory_get_sizes (group->mem[0], NULL, NULL);
1265   }
1266 
1267   /* Ensure the memory will stay around and is RO */
1268   for (i = 0; i < group->n_mem; i++)
1269     gst_memory_ref (group->mem[i]);
1270 
1271   if (obj->ioctl (obj->video_fd, VIDIOC_QBUF, &group->buffer) < 0) {
1272     GST_ERROR_OBJECT (allocator, "failed queueing buffer %i: %s",
1273         group->buffer.index, g_strerror (errno));
1274 
1275     /* Release the memory, possibly making it RW again */
1276     for (i = 0; i < group->n_mem; i++)
1277       gst_memory_unref (group->mem[i]);
1278 
1279     ret = FALSE;
1280     if (IS_QUEUED (group->buffer)) {
1281       GST_DEBUG_OBJECT (allocator,
1282           "driver pretends buffer is queued even if queue failed");
1283       UNSET_QUEUED (group->buffer);
1284     }
1285     goto done;
1286   }
1287 
1288   GST_LOG_OBJECT (allocator, "queued buffer %i (flags 0x%X)",
1289       group->buffer.index, group->buffer.flags);
1290 
1291   if (!IS_QUEUED (group->buffer)) {
1292     GST_DEBUG_OBJECT (allocator,
1293         "driver pretends buffer is not queued even if queue succeeded");
1294     SET_QUEUED (group->buffer);
1295   }
1296 
1297 done:
1298   return ret;
1299 }
1300 
1301 GstFlowReturn
gst_v4l2_allocator_dqbuf(GstV4l2Allocator * allocator,GstV4l2MemoryGroup ** group_out)1302 gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator,
1303     GstV4l2MemoryGroup ** group_out)
1304 {
1305   GstV4l2Object *obj = allocator->obj;
1306   struct v4l2_buffer buffer = { 0 };
1307   struct v4l2_plane planes[VIDEO_MAX_PLANES] = { {0} };
1308   gint i;
1309 
1310   GstV4l2MemoryGroup *group = NULL;
1311 
1312   g_return_val_if_fail (g_atomic_int_get (&allocator->active), GST_FLOW_ERROR);
1313 
1314   buffer.type = obj->type;
1315   buffer.memory = allocator->memory;
1316 
1317   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1318     buffer.length = obj->format.fmt.pix_mp.num_planes;
1319     buffer.m.planes = planes;
1320   }
1321 
1322   if (obj->ioctl (obj->video_fd, VIDIOC_DQBUF, &buffer) < 0)
1323     goto error;
1324 
1325   group = allocator->groups[buffer.index];
1326 
1327   if (!IS_QUEUED (group->buffer)) {
1328     GST_ERROR_OBJECT (allocator,
1329         "buffer %i was not queued, this indicate a driver bug.", buffer.index);
1330     return GST_FLOW_ERROR;
1331   }
1332 
1333   group->buffer = buffer;
1334 
1335   GST_LOG_OBJECT (allocator, "dequeued buffer %i (flags 0x%X)", buffer.index,
1336       buffer.flags);
1337 
1338   if (IS_QUEUED (group->buffer)) {
1339     GST_DEBUG_OBJECT (allocator,
1340         "driver pretends buffer is queued even if dequeue succeeded");
1341     UNSET_QUEUED (group->buffer);
1342   }
1343 
1344   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1345     group->buffer.m.planes = group->planes;
1346     memcpy (group->planes, buffer.m.planes, sizeof (planes));
1347   } else {
1348     group->planes[0].bytesused = group->buffer.bytesused;
1349     group->planes[0].length = group->buffer.length;
1350     g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
1351     memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
1352   }
1353 
1354   /* And update memory size */
1355   if (V4L2_TYPE_IS_OUTPUT (obj->type)) {
1356     gst_v4l2_allocator_reset_size (allocator, group);
1357   } else {
1358     /* for capture, simply read the size */
1359     for (i = 0; i < group->n_mem; i++) {
1360       gsize size, offset;
1361 
1362       GST_LOG_OBJECT (allocator,
1363           "Dequeued capture buffer, length: %u bytesused: %u data_offset: %u",
1364           group->planes[i].length, group->planes[i].bytesused,
1365           group->planes[i].data_offset);
1366 
1367       offset = group->planes[i].data_offset;
1368 
1369       if (group->planes[i].bytesused >= group->planes[i].data_offset) {
1370         size = group->planes[i].bytesused - group->planes[i].data_offset;
1371       } else {
1372         GST_WARNING_OBJECT (allocator, "V4L2 provided buffer has bytesused %"
1373             G_GUINT32_FORMAT " which is too small to include data_offset %"
1374             G_GUINT32_FORMAT, group->planes[i].bytesused,
1375             group->planes[i].data_offset);
1376         size = group->planes[i].bytesused;
1377       }
1378 
1379       if (G_LIKELY (size + offset <= group->mem[i]->maxsize))
1380         gst_memory_resize (group->mem[i], offset, size);
1381       else {
1382         GST_WARNING_OBJECT (allocator,
1383             "v4l2 provided buffer that is too big for the memory it was "
1384             "writing into.  v4l2 claims %" G_GSIZE_FORMAT " bytes used but "
1385             "memory is only %" G_GSIZE_FORMAT "B.  This is probably a driver "
1386             "bug.", size, group->mem[i]->maxsize);
1387         gst_memory_resize (group->mem[i], 0, group->mem[i]->maxsize);
1388       }
1389     }
1390   }
1391 
1392   /* Release the memory, possibly making it RW again */
1393   for (i = 0; i < group->n_mem; i++)
1394     gst_memory_unref (group->mem[i]);
1395 
1396   *group_out = group;
1397   return GST_FLOW_OK;
1398 
1399 error:
1400   if (errno == EPIPE) {
1401     GST_DEBUG_OBJECT (allocator, "broken pipe signals last buffer");
1402     return GST_FLOW_EOS;
1403   }
1404 
1405   GST_ERROR_OBJECT (allocator, "failed dequeuing a %s buffer: %s",
1406       memory_type_to_str (allocator->memory), g_strerror (errno));
1407 
1408   switch (errno) {
1409     case EAGAIN:
1410       GST_WARNING_OBJECT (allocator,
1411           "Non-blocking I/O has been selected using O_NONBLOCK and"
1412           " no buffer was in the outgoing queue.");
1413       break;
1414     case EINVAL:
1415       GST_ERROR_OBJECT (allocator,
1416           "The buffer type is not supported, or the index is out of bounds, "
1417           "or no buffers have been allocated yet, or the userptr "
1418           "or length are invalid.");
1419       break;
1420     case ENOMEM:
1421       GST_ERROR_OBJECT (allocator,
1422           "insufficient memory to enqueue a user pointer buffer");
1423       break;
1424     case EIO:
1425       GST_INFO_OBJECT (allocator,
1426           "VIDIOC_DQBUF failed due to an internal error."
1427           " Can also indicate temporary problems like signal loss."
1428           " Note the driver might dequeue an (empty) buffer despite"
1429           " returning an error, or even stop capturing.");
1430       /* have we de-queued a buffer ? */
1431       if (!IS_QUEUED (buffer)) {
1432         GST_DEBUG_OBJECT (allocator, "reenqueueing buffer");
1433         /* FIXME ... should we do something here? */
1434       }
1435       break;
1436     case EINTR:
1437       GST_WARNING_OBJECT (allocator, "could not sync on a buffer on device");
1438       break;
1439     default:
1440       GST_WARNING_OBJECT (allocator,
1441           "Grabbing frame got interrupted unexpectedly. %d: %s.", errno,
1442           g_strerror (errno));
1443       break;
1444   }
1445 
1446   return GST_FLOW_ERROR;
1447 }
1448 
1449 void
gst_v4l2_allocator_reset_group(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)1450 gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
1451     GstV4l2MemoryGroup * group)
1452 {
1453   switch (allocator->memory) {
1454     case V4L2_MEMORY_USERPTR:
1455       gst_v4l2_allocator_clear_userptr (allocator, group);
1456       break;
1457     case V4L2_MEMORY_DMABUF:
1458       gst_v4l2_allocator_clear_dmabufin (allocator, group);
1459       break;
1460     case V4L2_MEMORY_MMAP:
1461       break;
1462     default:
1463       g_assert_not_reached ();
1464       break;
1465   }
1466 
1467   gst_v4l2_allocator_reset_size (allocator, group);
1468 }
1469