1 /* GStreamer
2 * Copyright (C) 2006 Edward Hervey <edward@fluendo.com>
3 * Copyright (C) 2007 Jan Schmidt <jan@fluendo.com>
4 * Copyright (C) 2007 Wim Taymans <wim@fluendo.com>
5 * Copyright (C) 2011 Sebastian Dröge <sebastian.droege@collabora.co.uk>
6 *
7 * gstmultiqueue.c:
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
18 *
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23 */
24
25 /**
26 * SECTION:element-multiqueue
27 * @title: multiqueue
28 * @see_also: #GstQueue
29 *
30 * Multiqueue is similar to a normal #GstQueue with the following additional
31 * features:
32 *
33 * 1) Multiple streamhandling
34 *
35 * * The element handles queueing data on more than one stream at once. To
36 * achieve such a feature it has request sink pads (sink%u) and
37 * 'sometimes' src pads (src%u). When requesting a given sinkpad with gst_element_request_pad(),
38 * the associated srcpad for that stream will be created.
39 * Example: requesting sink1 will generate src1.
40 *
41 * 2) Non-starvation on multiple stream
42 *
43 * * If more than one stream is used with the element, the streams' queues
44 * will be dynamically grown (up to a limit), in order to ensure that no
45 * stream is risking data starvation. This guarantees that at any given
46 * time there are at least N bytes queued and available for each individual
47 * stream. If an EOS event comes through a srcpad, the associated queue will be
48 * considered as 'not-empty' in the queue-size-growing algorithm.
49 *
50 * 3) Non-linked srcpads graceful handling
51 *
52 * * In order to better support dynamic switching between streams, the multiqueue
53 * (unlike the current GStreamer queue) continues to push buffers on non-linked
54 * pads rather than shutting down. In addition, to prevent a non-linked stream from very quickly consuming all
55 * available buffers and thus 'racing ahead' of the other streams, the element
56 * must ensure that buffers and inlined events for a non-linked stream are pushed
57 * in the same order as they were received, relative to the other streams
58 * controlled by the element. This means that a buffer cannot be pushed to a
59 * non-linked pad any sooner than buffers in any other stream which were received
60 * before it.
61 *
62 * Data is queued until one of the limits specified by the
63 * #GstMultiQueue:max-size-buffers, #GstMultiQueue:max-size-bytes and/or
64 * #GstMultiQueue:max-size-time properties has been reached. Any attempt to push
65 * more buffers into the queue will block the pushing thread until more space
66 * becomes available. #GstMultiQueue:extra-size-buffers,
67 *
68 *
69 * #GstMultiQueue:extra-size-bytes and #GstMultiQueue:extra-size-time are
70 * currently unused.
71 *
72 * The default queue size limits are 5 buffers, 10MB of data, or
73 * two second worth of data, whichever is reached first. Note that the number
74 * of buffers will dynamically grow depending on the fill level of
75 * other queues.
76 *
77 * The #GstMultiQueue::underrun signal is emitted when all of the queues
78 * are empty. The #GstMultiQueue::overrun signal is emitted when one of the
79 * queues is filled.
80 * Both signals are emitted from the context of the streaming thread.
81 *
82 * When using #GstMultiQueue:sync-by-running-time the unlinked streams will
83 * be throttled by the highest running-time of linked streams. This allows
84 * further relinking of those unlinked streams without them being in the
85 * future (i.e. to achieve gapless playback).
86 * When dealing with streams which have got different consumption requirements
87 * downstream (ex: video decoders which will consume more buffer (in time) than
88 * audio decoders), it is recommended to group streams of the same type
89 * by using the pad "group-id" property. This will further throttle streams
90 * in time within that group.
91 */
92
93 #ifdef HAVE_CONFIG_H
94 # include "config.h"
95 #endif
96
97 #include <gst/gst.h>
98 #include <gst/glib-compat-private.h>
99 #include <stdio.h>
100
101 #include "gstmultiqueue.h"
102 #include "gstcoreelementselements.h"
103
104 /* GstSingleQueue:
105 * @sinkpad: associated sink #GstPad
106 * @srcpad: associated source #GstPad
107 *
108 * Structure containing all information and properties about
109 * a single queue.
110 */
111 typedef struct _GstSingleQueue GstSingleQueue;
112
113 struct _GstSingleQueue
114 {
115 gint refcount;
116
117 /* unique identifier of the queue */
118 guint id;
119 /* group of streams to which this queue belongs to */
120 guint groupid;
121 GstClockTimeDiff group_high_time;
122
123 GWeakRef mqueue;
124 GWeakRef sinkpad;
125 GWeakRef srcpad;
126
127 /* flowreturn of previous srcpad push */
128 GstFlowReturn srcresult;
129 /* If something was actually pushed on
130 * this pad after flushing/pad activation
131 * and the srcresult corresponds to something
132 * real
133 */
134 gboolean pushed;
135
136 /* segments */
137 GstSegment sink_segment;
138 GstSegment src_segment;
139 gboolean has_src_segment; /* preferred over initializing the src_segment to
140 * UNDEFINED as this doesn't requires adding ifs
141 * in every segment usage */
142
143 /* position of src/sink */
144 GstClockTimeDiff sinktime, srctime;
145 /* cached input value, used for interleave */
146 GstClockTimeDiff cached_sinktime;
147 /* TRUE if either position needs to be recalculated */
148 gboolean sink_tainted, src_tainted;
149
150 /* queue of data */
151 GstDataQueue *queue;
152 GstDataQueueSize max_size, extra_size;
153 GstClockTime cur_time;
154 gboolean is_eos;
155 gboolean is_segment_done;
156 gboolean is_sparse;
157 gboolean flushing;
158 gboolean active;
159
160 /* Protected by global lock */
161 guint32 nextid; /* ID of the next object waiting to be pushed */
162 guint32 oldid; /* ID of the last object pushed (last in a series) */
163 guint32 last_oldid; /* Previously observed old_id, reset to MAXUINT32 on flush */
164 GstClockTimeDiff next_time; /* End running time of next buffer to be pushed */
165 GstClockTimeDiff last_time; /* Start running time of last pushed buffer */
166 GCond turn; /* SingleQueue turn waiting conditional */
167
168 /* for serialized queries */
169 GCond query_handled;
170 gboolean last_query;
171 GstQuery *last_handled_query;
172
173 /* For interleave calculation */
174 GThread *thread; /* Streaming thread of SingleQueue */
175 GstClockTime interleave; /* Calculated interleve within the thread */
176 };
177
178 /* Extension of GstDataQueueItem structure for our usage */
179 typedef struct _GstMultiQueueItem GstMultiQueueItem;
180
181 struct _GstMultiQueueItem
182 {
183 GstMiniObject *object;
184 guint size;
185 guint64 duration;
186 gboolean visible;
187
188 GDestroyNotify destroy;
189 guint32 posid;
190
191 gboolean is_query;
192 };
193
194 static GstSingleQueue *gst_single_queue_new (GstMultiQueue * mqueue, guint id);
195 static void gst_single_queue_unref (GstSingleQueue * squeue);
196 static GstSingleQueue *gst_single_queue_ref (GstSingleQueue * squeue);
197
198 static void wake_up_next_non_linked (GstMultiQueue * mq);
199 static void compute_high_id (GstMultiQueue * mq);
200 static void compute_high_time (GstMultiQueue * mq, guint groupid);
201 static void single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
202 static void single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq);
203
204 static void update_buffering (GstMultiQueue * mq, GstSingleQueue * sq);
205 static void gst_multi_queue_post_buffering (GstMultiQueue * mq);
206 static void recheck_buffering_status (GstMultiQueue * mq);
207
208 static void gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full);
209
210 static void calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq);
211
212 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink_%u",
213 GST_PAD_SINK,
214 GST_PAD_REQUEST,
215 GST_STATIC_CAPS_ANY);
216
217 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src_%u",
218 GST_PAD_SRC,
219 GST_PAD_SOMETIMES,
220 GST_STATIC_CAPS_ANY);
221
222 GST_DEBUG_CATEGORY_STATIC (multi_queue_debug);
223 #define GST_CAT_DEFAULT (multi_queue_debug)
224
225 /* Signals and args */
226 enum
227 {
228 SIGNAL_UNDERRUN,
229 SIGNAL_OVERRUN,
230 LAST_SIGNAL
231 };
232
233 /* default limits, we try to keep up to 2 seconds of data and if there is not
234 * time, up to 10 MB. The number of buffers is dynamically scaled to make sure
235 * there is data in the queues. Normally, the byte and time limits are not hit
236 * in theses conditions. */
237 #define DEFAULT_MAX_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
238 #define DEFAULT_MAX_SIZE_BUFFERS 5
239 #define DEFAULT_MAX_SIZE_TIME 2 * GST_SECOND
240
241 #ifdef OHOS_EXT_FUNC
242 // ohos.ext.func.0012
243 #define DEFAULT_INTERNAL_BUFFERING_TIME 500 * GST_MSECOND
244 #endif
245 /* second limits. When we hit one of the above limits we are probably dealing
246 * with a badly muxed file and we scale the limits to these emergency values.
247 * This is currently not yet implemented.
248 * Since we dynamically scale the queue buffer size up to the limits but avoid
249 * going above the max-size-buffers when we can, we don't really need this
250 * additional extra size. */
251 #define DEFAULT_EXTRA_SIZE_BYTES 10 * 1024 * 1024 /* 10 MB */
252 #define DEFAULT_EXTRA_SIZE_BUFFERS 5
253 #define DEFAULT_EXTRA_SIZE_TIME 3 * GST_SECOND
254
255 #define DEFAULT_USE_BUFFERING FALSE
256 #define DEFAULT_LOW_WATERMARK 0.01
257 #define DEFAULT_HIGH_WATERMARK 0.99
258 #define DEFAULT_SYNC_BY_RUNNING_TIME FALSE
259 #define DEFAULT_USE_INTERLEAVE FALSE
260 #define DEFAULT_UNLINKED_CACHE_TIME 250 * GST_MSECOND
261
262 #define DEFAULT_MINIMUM_INTERLEAVE (250 * GST_MSECOND)
263
264 enum
265 {
266 PROP_0,
267 PROP_EXTRA_SIZE_BYTES,
268 PROP_EXTRA_SIZE_BUFFERS,
269 PROP_EXTRA_SIZE_TIME,
270 PROP_MAX_SIZE_BYTES,
271 PROP_MAX_SIZE_BUFFERS,
272 PROP_MAX_SIZE_TIME,
273 PROP_USE_BUFFERING,
274 PROP_LOW_PERCENT,
275 PROP_HIGH_PERCENT,
276 PROP_LOW_WATERMARK,
277 PROP_HIGH_WATERMARK,
278 PROP_SYNC_BY_RUNNING_TIME,
279 PROP_USE_INTERLEAVE,
280 PROP_UNLINKED_CACHE_TIME,
281 PROP_MINIMUM_INTERLEAVE,
282 PROP_STATS,
283 #ifdef OHOS_EXT_FUNC
284 // ohos.ext.func.0013
285 PROP_MQ_NUM_ID,
286 #endif
287 PROP_LAST
288 };
289
290 /* Explanation for buffer levels and percentages:
291 *
292 * The buffering_level functions here return a value in a normalized range
293 * that specifies the current fill level of a queue. The range goes from 0 to
294 * MAX_BUFFERING_LEVEL. The low/high watermarks also use this same range.
295 *
296 * This is not to be confused with the buffering_percent value, which is
297 * a *relative* quantity - relative to the low/high watermarks.
298 * buffering_percent = 0% means overall buffering_level is at the low watermark.
299 * buffering_percent = 100% means overall buffering_level is at the high watermark.
300 * buffering_percent is used for determining if the fill level has reached
301 * the high watermark, and for producing BUFFERING messages. This value
302 * always uses a 0..100 range (since it is a percentage).
303 *
304 * To avoid future confusions, whenever "buffering level" is mentioned, it
305 * refers to the absolute level which is in the 0..MAX_BUFFERING_LEVEL
306 * range. Whenever "buffering_percent" is mentioned, it refers to the
307 * percentage value that is relative to the low/high watermark. */
308
309 /* Using a buffering level range of 0..1000000 to allow for a
310 * resolution in ppm (1 ppm = 0.0001%) */
311 #define MAX_BUFFERING_LEVEL 1000000
312
313 /* How much 1% makes up in the buffer level range */
314 #define BUF_LEVEL_PERCENT_FACTOR ((MAX_BUFFERING_LEVEL) / 100)
315
316 /* GstMultiQueuePad */
317
318 #define DEFAULT_PAD_GROUP_ID 0
319
320 enum
321 {
322 PROP_PAD_0,
323 PROP_PAD_GROUP_ID,
324 PROP_CURRENT_LEVEL_BUFFERS,
325 PROP_CURRENT_LEVEL_BYTES,
326 PROP_CURRENT_LEVEL_TIME,
327 };
328
329 #define GST_TYPE_MULTIQUEUE_PAD (gst_multiqueue_pad_get_type())
330 #define GST_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePad))
331 #define GST_IS_MULTIQUEUE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MULTIQUEUE_PAD))
332 #define GST_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
333 #define GST_IS_MULTIQUEUE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_MULTIQUEUE_PAD))
334 #define GST_MULTIQUEUE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_MULTIQUEUE_PAD,GstMultiQueuePadClass))
335
336 #define GST_MULTI_QUEUE_MUTEX_LOCK(q) G_STMT_START { \
337 g_mutex_lock (&q->qlock); \
338 } G_STMT_END
339
340 #define GST_MULTI_QUEUE_MUTEX_UNLOCK(q) G_STMT_START { \
341 g_mutex_unlock (&q->qlock); \
342 } G_STMT_END
343
344 #define SET_PERCENT(mq, perc) G_STMT_START { \
345 if (perc != mq->buffering_percent) { \
346 mq->buffering_percent = perc; \
347 mq->buffering_percent_changed = TRUE; \
348 GST_DEBUG_OBJECT (mq, "buffering %d percent", perc); \
349 } \
350 } G_STMT_END
351
352 struct _GstMultiQueuePad
353 {
354 GstPad parent;
355
356 GstSingleQueue *sq;
357 };
358
359 struct _GstMultiQueuePadClass
360 {
361 GstPadClass parent_class;
362 };
363
364 GType gst_multiqueue_pad_get_type (void);
365
366 G_DEFINE_TYPE (GstMultiQueuePad, gst_multiqueue_pad, GST_TYPE_PAD);
367
368 static guint
gst_multiqueue_pad_get_group_id(GstMultiQueuePad * pad)369 gst_multiqueue_pad_get_group_id (GstMultiQueuePad * pad)
370 {
371 guint ret = 0;
372 GstMultiQueue *mq;
373
374 if (!pad->sq)
375 return 0;
376
377 mq = g_weak_ref_get (&pad->sq->mqueue);
378
379 if (mq) {
380 GST_OBJECT_LOCK (mq);
381 }
382
383 ret = pad->sq->groupid;
384
385 if (mq) {
386 GST_OBJECT_UNLOCK (mq);
387 gst_object_unref (mq);
388 }
389
390 return ret;
391 }
392
393 static guint
gst_multiqueue_pad_get_current_level_buffers(GstMultiQueuePad * pad)394 gst_multiqueue_pad_get_current_level_buffers (GstMultiQueuePad * pad)
395 {
396 GstSingleQueue *sq = pad->sq;
397 GstDataQueueSize level;
398 GstMultiQueue *mq;
399
400 if (!sq)
401 return 0;
402
403 mq = g_weak_ref_get (&pad->sq->mqueue);
404
405 if (mq) {
406 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
407 }
408
409 gst_data_queue_get_level (sq->queue, &level);
410
411 if (mq) {
412 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
413 gst_object_unref (mq);
414 }
415
416 return level.visible;
417 }
418
419 static guint
gst_multiqueue_pad_get_current_level_bytes(GstMultiQueuePad * pad)420 gst_multiqueue_pad_get_current_level_bytes (GstMultiQueuePad * pad)
421 {
422 GstSingleQueue *sq = pad->sq;
423 GstDataQueueSize level;
424 GstMultiQueue *mq;
425
426 if (!sq)
427 return 0;
428
429 mq = g_weak_ref_get (&pad->sq->mqueue);
430
431 if (mq) {
432 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
433 }
434
435 gst_data_queue_get_level (sq->queue, &level);
436
437 if (mq) {
438 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
439 gst_object_unref (mq);
440 }
441
442 return level.bytes;
443 }
444
445 static guint64
gst_multiqueue_pad_get_current_level_time(GstMultiQueuePad * pad)446 gst_multiqueue_pad_get_current_level_time (GstMultiQueuePad * pad)
447 {
448 GstSingleQueue *sq = pad->sq;
449 GstMultiQueue *mq;
450 guint64 ret;
451
452 if (!sq)
453 return 0;
454
455 mq = g_weak_ref_get (&pad->sq->mqueue);
456
457 if (mq) {
458 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
459 }
460
461 ret = sq->cur_time;
462
463 if (mq) {
464 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
465 gst_object_unref (mq);
466 }
467
468 return ret;
469 }
470
471 static void
gst_multiqueue_pad_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)472 gst_multiqueue_pad_get_property (GObject * object, guint prop_id,
473 GValue * value, GParamSpec * pspec)
474 {
475 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
476
477 switch (prop_id) {
478 case PROP_PAD_GROUP_ID:
479 g_value_set_uint (value, gst_multiqueue_pad_get_group_id (pad));
480 break;
481 case PROP_CURRENT_LEVEL_BUFFERS:{
482 g_value_set_uint (value,
483 gst_multiqueue_pad_get_current_level_buffers (pad));
484 break;
485 }
486 case PROP_CURRENT_LEVEL_BYTES:{
487 g_value_set_uint (value,
488 gst_multiqueue_pad_get_current_level_bytes (pad));
489 break;
490 }
491 case PROP_CURRENT_LEVEL_TIME:{
492 g_value_set_uint64 (value,
493 gst_multiqueue_pad_get_current_level_time (pad));
494 break;
495 }
496 default:
497 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
498 break;
499 }
500 }
501
502 static void
gst_multiqueue_pad_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)503 gst_multiqueue_pad_set_property (GObject * object, guint prop_id,
504 const GValue * value, GParamSpec * pspec)
505 {
506 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
507
508 switch (prop_id) {
509 case PROP_PAD_GROUP_ID:
510 if (pad->sq) {
511 GstMultiQueue *mqueue = g_weak_ref_get (&pad->sq->mqueue);
512
513 if (mqueue)
514 GST_OBJECT_LOCK (mqueue);
515
516 pad->sq->groupid = g_value_get_uint (value);
517
518 if (mqueue) {
519 GST_OBJECT_UNLOCK (mqueue);
520 gst_object_unref (mqueue);
521 }
522 }
523 break;
524 default:
525 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
526 break;
527 }
528 }
529
530 static void
gst_multiqueue_pad_finalize(GObject * object)531 gst_multiqueue_pad_finalize (GObject * object)
532 {
533 GstMultiQueuePad *pad = GST_MULTIQUEUE_PAD (object);
534
535 if (pad->sq)
536 gst_single_queue_unref (pad->sq);
537
538 G_OBJECT_CLASS (gst_multiqueue_pad_parent_class)->finalize (object);
539 }
540
541 static void
gst_multiqueue_pad_class_init(GstMultiQueuePadClass * klass)542 gst_multiqueue_pad_class_init (GstMultiQueuePadClass * klass)
543 {
544 GObjectClass *gobject_class = (GObjectClass *) klass;
545
546 gobject_class->set_property = gst_multiqueue_pad_set_property;
547 gobject_class->get_property = gst_multiqueue_pad_get_property;
548 gobject_class->finalize = gst_multiqueue_pad_finalize;
549
550 /**
551 * GstMultiQueuePad:group-id:
552 *
553 * Group to which this pad belongs.
554 *
555 * Since: 1.10
556 */
557 g_object_class_install_property (gobject_class, PROP_PAD_GROUP_ID,
558 g_param_spec_uint ("group-id", "Group ID",
559 "Group to which this pad belongs", 0, G_MAXUINT32,
560 DEFAULT_PAD_GROUP_ID, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
561
562 /**
563 * GstMultiQueuePad:current-level-buffers:
564 *
565 * The corresponding queue's current level of buffers.
566 *
567 * Since: 1.18
568 */
569 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BUFFERS,
570 g_param_spec_uint ("current-level-buffers", "Current level buffers",
571 "Current level buffers", 0, G_MAXUINT32,
572 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
573
574 /**
575 * GstMultiQueuePad:current-level-bytes:
576 *
577 * The corresponding queue's current level of bytes.
578 *
579 * Since: 1.18
580 */
581 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_BYTES,
582 g_param_spec_uint ("current-level-bytes", "Current level bytes",
583 "Current level bytes", 0, G_MAXUINT32,
584 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
585
586 /**
587 * GstMultiQueuePad:current-level-time:
588 *
589 * The corresponding queue's current level of time.
590 *
591 * Since: 1.18
592 */
593 g_object_class_install_property (gobject_class, PROP_CURRENT_LEVEL_TIME,
594 g_param_spec_uint64 ("current-level-time", "Current level time",
595 "Current level time", 0, G_MAXUINT64,
596 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
597 }
598
599 static void
gst_multiqueue_pad_init(GstMultiQueuePad * pad)600 gst_multiqueue_pad_init (GstMultiQueuePad * pad)
601 {
602
603 }
604
605
606 /* Convenience function */
607 static inline GstClockTimeDiff
my_segment_to_running_time(GstSegment * segment,GstClockTime val)608 my_segment_to_running_time (GstSegment * segment, GstClockTime val)
609 {
610 GstClockTimeDiff res = GST_CLOCK_STIME_NONE;
611
612 if (GST_CLOCK_TIME_IS_VALID (val)) {
613 gboolean sign =
614 gst_segment_to_running_time_full (segment, GST_FORMAT_TIME, val, &val);
615 if (sign > 0)
616 res = val;
617 else if (sign < 0)
618 res = -val;
619 }
620 return res;
621 }
622
623 static void gst_multi_queue_finalize (GObject * object);
624 static void gst_multi_queue_set_property (GObject * object,
625 guint prop_id, const GValue * value, GParamSpec * pspec);
626 static void gst_multi_queue_get_property (GObject * object,
627 guint prop_id, GValue * value, GParamSpec * pspec);
628
629 static GstPad *gst_multi_queue_request_new_pad (GstElement * element,
630 GstPadTemplate * temp, const gchar * name, const GstCaps * caps);
631 static void gst_multi_queue_release_pad (GstElement * element, GstPad * pad);
632 static GstStateChangeReturn gst_multi_queue_change_state (GstElement *
633 element, GstStateChange transition);
634
635 static void gst_multi_queue_loop (GstPad * pad);
636
637 #define _do_init \
638 GST_DEBUG_CATEGORY_INIT (multi_queue_debug, "multiqueue", 0, "multiqueue element");
639 #define gst_multi_queue_parent_class parent_class
640 G_DEFINE_TYPE_WITH_CODE (GstMultiQueue, gst_multi_queue, GST_TYPE_ELEMENT,
641 _do_init);
642 GST_ELEMENT_REGISTER_DEFINE (multiqueue, "multiqueue", GST_RANK_NONE,
643 GST_TYPE_MULTI_QUEUE);
644
645 static guint gst_multi_queue_signals[LAST_SIGNAL] = { 0 };
646
647 static void
gst_multi_queue_class_init(GstMultiQueueClass * klass)648 gst_multi_queue_class_init (GstMultiQueueClass * klass)
649 {
650 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
651 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
652
653 gobject_class->set_property = gst_multi_queue_set_property;
654 gobject_class->get_property = gst_multi_queue_get_property;
655
656 /* SIGNALS */
657
658 /**
659 * GstMultiQueue::underrun:
660 * @multiqueue: the multiqueue instance
661 *
662 * This signal is emitted from the streaming thread when there is
663 * no data in any of the queues inside the multiqueue instance (underrun).
664 *
665 * This indicates either starvation or EOS from the upstream data sources.
666 */
667 gst_multi_queue_signals[SIGNAL_UNDERRUN] =
668 g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
669 G_STRUCT_OFFSET (GstMultiQueueClass, underrun), NULL, NULL,
670 NULL, G_TYPE_NONE, 0);
671
672 /**
673 * GstMultiQueue::overrun:
674 * @multiqueue: the multiqueue instance
675 *
676 * Reports that one of the queues in the multiqueue is full (overrun).
677 * A queue is full if the total amount of data inside it (num-buffers, time,
678 * size) is higher than the boundary values which can be set through the
679 * GObject properties.
680 *
681 * This can be used as an indicator of pre-roll.
682 */
683 gst_multi_queue_signals[SIGNAL_OVERRUN] =
684 g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
685 G_STRUCT_OFFSET (GstMultiQueueClass, overrun), NULL, NULL,
686 NULL, G_TYPE_NONE, 0);
687
688 /* PROPERTIES */
689
690 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BYTES,
691 g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
692 "Max. amount of data in the queue (bytes, 0=disable)",
693 0, G_MAXUINT, DEFAULT_MAX_SIZE_BYTES,
694 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
695 G_PARAM_STATIC_STRINGS));
696 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_BUFFERS,
697 g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
698 "Max. number of buffers in the queue (0=disable)", 0, G_MAXUINT,
699 DEFAULT_MAX_SIZE_BUFFERS,
700 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
701 G_PARAM_STATIC_STRINGS));
702 g_object_class_install_property (gobject_class, PROP_MAX_SIZE_TIME,
703 g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
704 "Max. amount of data in the queue (in ns, 0=disable)", 0, G_MAXUINT64,
705 DEFAULT_MAX_SIZE_TIME, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
706 G_PARAM_STATIC_STRINGS));
707
708 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BYTES,
709 g_param_spec_uint ("extra-size-bytes", "Extra Size (kB)",
710 "Amount of data the queues can grow if one of them is empty (bytes, 0=disable)"
711 " (NOT IMPLEMENTED)",
712 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BYTES,
713 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
714 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_BUFFERS,
715 g_param_spec_uint ("extra-size-buffers", "Extra Size (buffers)",
716 "Amount of buffers the queues can grow if one of them is empty (0=disable)"
717 " (NOT IMPLEMENTED)",
718 0, G_MAXUINT, DEFAULT_EXTRA_SIZE_BUFFERS,
719 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
720 g_object_class_install_property (gobject_class, PROP_EXTRA_SIZE_TIME,
721 g_param_spec_uint64 ("extra-size-time", "Extra Size (ns)",
722 "Amount of time the queues can grow if one of them is empty (in ns, 0=disable)"
723 " (NOT IMPLEMENTED)",
724 0, G_MAXUINT64, DEFAULT_EXTRA_SIZE_TIME,
725 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
726
727 /**
728 * GstMultiQueue:use-buffering:
729 *
730 * Enable the buffering option in multiqueue so that BUFFERING messages are
731 * emitted based on low-/high-percent thresholds.
732 */
733 g_object_class_install_property (gobject_class, PROP_USE_BUFFERING,
734 g_param_spec_boolean ("use-buffering", "Use buffering",
735 "Emit GST_MESSAGE_BUFFERING based on low-/high-percent thresholds "
736 "(0% = low-watermark, 100% = high-watermark)",
737 DEFAULT_USE_BUFFERING, G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
738 G_PARAM_STATIC_STRINGS));
739 /**
740 * GstMultiQueue:low-percent:
741 *
742 * Low threshold percent for buffering to start.
743 */
744 g_object_class_install_property (gobject_class, PROP_LOW_PERCENT,
745 g_param_spec_int ("low-percent", "Low percent",
746 "Low threshold for buffering to start. Only used if use-buffering is True "
747 "(Deprecated: use low-watermark instead)",
748 0, 100, DEFAULT_LOW_WATERMARK * 100,
749 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
750 /**
751 * GstMultiQueue:high-percent:
752 *
753 * High threshold percent for buffering to finish.
754 */
755 g_object_class_install_property (gobject_class, PROP_HIGH_PERCENT,
756 g_param_spec_int ("high-percent", "High percent",
757 "High threshold for buffering to finish. Only used if use-buffering is True "
758 "(Deprecated: use high-watermark instead)",
759 0, 100, DEFAULT_HIGH_WATERMARK * 100,
760 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
761 /**
762 * GstMultiQueue:low-watermark:
763 *
764 * Low threshold watermark for buffering to start.
765 *
766 * Since: 1.10
767 */
768 g_object_class_install_property (gobject_class, PROP_LOW_WATERMARK,
769 g_param_spec_double ("low-watermark", "Low watermark",
770 "Low threshold for buffering to start. Only used if use-buffering is True",
771 0.0, 1.0, DEFAULT_LOW_WATERMARK,
772 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
773 /**
774 * GstMultiQueue:high-watermark:
775 *
776 * High threshold watermark for buffering to finish.
777 *
778 * Since: 1.10
779 */
780 g_object_class_install_property (gobject_class, PROP_HIGH_WATERMARK,
781 g_param_spec_double ("high-watermark", "High watermark",
782 "High threshold for buffering to finish. Only used if use-buffering is True",
783 0.0, 1.0, DEFAULT_HIGH_WATERMARK,
784 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
785
786 /**
787 * GstMultiQueue:sync-by-running-time:
788 *
789 * If enabled multiqueue will synchronize deactivated or not-linked streams
790 * to the activated and linked streams by taking the running time.
791 * Otherwise multiqueue will synchronize the deactivated or not-linked
792 * streams by keeping the order in which buffers and events arrived compared
793 * to active and linked streams.
794 */
795 g_object_class_install_property (gobject_class, PROP_SYNC_BY_RUNNING_TIME,
796 g_param_spec_boolean ("sync-by-running-time", "Sync By Running Time",
797 "Synchronize deactivated or not-linked streams by running time",
798 DEFAULT_SYNC_BY_RUNNING_TIME,
799 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
800
801 g_object_class_install_property (gobject_class, PROP_USE_INTERLEAVE,
802 g_param_spec_boolean ("use-interleave", "Use interleave",
803 "Adjust time limits based on input interleave",
804 DEFAULT_USE_INTERLEAVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
805
806 g_object_class_install_property (gobject_class, PROP_UNLINKED_CACHE_TIME,
807 g_param_spec_uint64 ("unlinked-cache-time", "Unlinked cache time (ns)",
808 "Extra buffering in time for unlinked streams (if 'sync-by-running-time')",
809 0, G_MAXUINT64, DEFAULT_UNLINKED_CACHE_TIME,
810 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
811 G_PARAM_STATIC_STRINGS));
812
813 g_object_class_install_property (gobject_class, PROP_MINIMUM_INTERLEAVE,
814 g_param_spec_uint64 ("min-interleave-time", "Minimum interleave time",
815 "Minimum extra buffering for deinterleaving (size of the queues) when use-interleave=true",
816 0, G_MAXUINT64, DEFAULT_MINIMUM_INTERLEAVE,
817 G_PARAM_READWRITE | GST_PARAM_MUTABLE_PLAYING |
818 G_PARAM_STATIC_STRINGS));
819
820 /**
821 * GstMultiQueue:stats:
822 *
823 * Various #GstMultiQueue statistics. This property returns a #GstStructure
824 * with name "application/x-gst-multi-queue-stats" with the following fields:
825 *
826 * - "queues" GST_TYPE_ARRAY Contains one GstStructure named "queue_%d"
827 * (where \%d is the queue's ID) per internal queue:
828 * - "buffers" G_TYPE_UINT The queue's current level of buffers
829 * - "bytes" G_TYPE_UINT The queue's current level of bytes
830 * - "time" G_TYPE_UINT64 The queue's current level of time
831 *
832 * Since: 1.18
833 */
834 g_object_class_install_property (gobject_class, PROP_STATS,
835 g_param_spec_boxed ("stats", "Stats",
836 "Multiqueue Statistics",
837 GST_TYPE_STRUCTURE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
838
839 #ifdef OHOS_EXT_FUNC
840 // ohos.ext.func.0013
841 /**
842 * GstMultiQueue:mq-num-id:
843 *
844 * Low threshold percent for buffering to start.
845 */
846 g_object_class_install_property (gobject_class, PROP_MQ_NUM_ID,
847 g_param_spec_uint ("mq-num-id", "Mq num id", "Multiqueue number id",
848 0, 100, 0,
849 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
850 #endif
851 gobject_class->finalize = gst_multi_queue_finalize;
852
853 gst_element_class_set_static_metadata (gstelement_class,
854 "MultiQueue",
855 "Generic", "Multiple data queue", "Edward Hervey <edward@fluendo.com>");
856 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
857 &sinktemplate, GST_TYPE_MULTIQUEUE_PAD);
858 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
859 &srctemplate, GST_TYPE_MULTIQUEUE_PAD);
860
861 gstelement_class->request_new_pad =
862 GST_DEBUG_FUNCPTR (gst_multi_queue_request_new_pad);
863 gstelement_class->release_pad =
864 GST_DEBUG_FUNCPTR (gst_multi_queue_release_pad);
865 gstelement_class->change_state =
866 GST_DEBUG_FUNCPTR (gst_multi_queue_change_state);
867
868 gst_type_mark_as_plugin_api (GST_TYPE_MULTIQUEUE_PAD, 0);
869 }
870
871 static void
gst_multi_queue_init(GstMultiQueue * mqueue)872 gst_multi_queue_init (GstMultiQueue * mqueue)
873 {
874 mqueue->nbqueues = 0;
875 mqueue->queues = NULL;
876
877 mqueue->max_size.bytes = DEFAULT_MAX_SIZE_BYTES;
878 mqueue->max_size.visible = DEFAULT_MAX_SIZE_BUFFERS;
879 mqueue->max_size.time = DEFAULT_MAX_SIZE_TIME;
880
881 mqueue->extra_size.bytes = DEFAULT_EXTRA_SIZE_BYTES;
882 mqueue->extra_size.visible = DEFAULT_EXTRA_SIZE_BUFFERS;
883 mqueue->extra_size.time = DEFAULT_EXTRA_SIZE_TIME;
884
885 mqueue->use_buffering = DEFAULT_USE_BUFFERING;
886 mqueue->low_watermark = DEFAULT_LOW_WATERMARK * MAX_BUFFERING_LEVEL;
887 mqueue->high_watermark = DEFAULT_HIGH_WATERMARK * MAX_BUFFERING_LEVEL;
888
889 mqueue->sync_by_running_time = DEFAULT_SYNC_BY_RUNNING_TIME;
890 mqueue->use_interleave = DEFAULT_USE_INTERLEAVE;
891 mqueue->min_interleave_time = DEFAULT_MINIMUM_INTERLEAVE;
892 mqueue->unlinked_cache_time = DEFAULT_UNLINKED_CACHE_TIME;
893
894 mqueue->counter = 1;
895 mqueue->highid = -1;
896 mqueue->high_time = GST_CLOCK_STIME_NONE;
897
898 #ifdef OHOS_EXT_FUNC
899 // ohos.ext.func.0012
900 mqueue->buffering_time = 0;
901 mqueue->buffering_time_changed = FALSE;
902
903 mqueue->mq_num_id = 0;
904 #endif
905
906 g_mutex_init (&mqueue->qlock);
907 g_mutex_init (&mqueue->buffering_post_lock);
908 }
909
910 static void
gst_multi_queue_finalize(GObject * object)911 gst_multi_queue_finalize (GObject * object)
912 {
913 GstMultiQueue *mqueue = GST_MULTI_QUEUE (object);
914
915 g_list_free_full (mqueue->queues, (GDestroyNotify) gst_single_queue_unref);
916 mqueue->queues = NULL;
917 mqueue->queues_cookie++;
918
919 /* free/unref instance data */
920 g_mutex_clear (&mqueue->qlock);
921 g_mutex_clear (&mqueue->buffering_post_lock);
922
923 G_OBJECT_CLASS (parent_class)->finalize (object);
924 }
925
926 #define SET_CHILD_PROPERTY(mq,format) G_STMT_START { \
927 GList * tmp = mq->queues; \
928 while (tmp) { \
929 GstSingleQueue *q = (GstSingleQueue*)tmp->data; \
930 q->max_size.format = mq->max_size.format; \
931 update_buffering (mq, q); \
932 gst_data_queue_limits_changed (q->queue); \
933 tmp = g_list_next(tmp); \
934 }; \
935 } G_STMT_END
936
937 static void
gst_multi_queue_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)938 gst_multi_queue_set_property (GObject * object, guint prop_id,
939 const GValue * value, GParamSpec * pspec)
940 {
941 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
942
943 switch (prop_id) {
944 case PROP_MAX_SIZE_BYTES:
945 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
946 mq->max_size.bytes = g_value_get_uint (value);
947 SET_CHILD_PROPERTY (mq, bytes);
948 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
949 gst_multi_queue_post_buffering (mq);
950 break;
951 case PROP_MAX_SIZE_BUFFERS:
952 {
953 GList *tmp;
954 gint new_size = g_value_get_uint (value);
955
956 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
957
958 mq->max_size.visible = new_size;
959
960 tmp = mq->queues;
961 while (tmp) {
962 GstDataQueueSize size;
963 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
964 gst_data_queue_get_level (q->queue, &size);
965
966 GST_DEBUG_OBJECT (mq, "Queue %d: Requested buffers size: %d,"
967 " current: %d, current max %d", q->id, new_size, size.visible,
968 q->max_size.visible);
969
970 /* do not reduce max size below current level if the single queue
971 * has grown because of empty queue */
972 if (new_size == 0) {
973 q->max_size.visible = new_size;
974 } else if (q->max_size.visible == 0) {
975 q->max_size.visible = MAX (new_size, size.visible);
976 } else if (new_size > size.visible) {
977 q->max_size.visible = new_size;
978 }
979 update_buffering (mq, q);
980 gst_data_queue_limits_changed (q->queue);
981 tmp = g_list_next (tmp);
982 }
983
984 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
985 gst_multi_queue_post_buffering (mq);
986
987 break;
988 }
989 case PROP_MAX_SIZE_TIME:
990 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
991 mq->max_size.time = g_value_get_uint64 (value);
992 #ifdef OHOS_EXT_FUNC
993 // ohos.ext.func.0012
994 mq->buffering_time = mq->max_size.time;
995 #endif
996 SET_CHILD_PROPERTY (mq, time);
997 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
998 gst_multi_queue_post_buffering (mq);
999 break;
1000 case PROP_EXTRA_SIZE_BYTES:
1001 mq->extra_size.bytes = g_value_get_uint (value);
1002 break;
1003 case PROP_EXTRA_SIZE_BUFFERS:
1004 mq->extra_size.visible = g_value_get_uint (value);
1005 break;
1006 case PROP_EXTRA_SIZE_TIME:
1007 mq->extra_size.time = g_value_get_uint64 (value);
1008 break;
1009 case PROP_USE_BUFFERING:
1010 mq->use_buffering = g_value_get_boolean (value);
1011 recheck_buffering_status (mq);
1012 break;
1013 case PROP_LOW_PERCENT:
1014 mq->low_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
1015 /* Recheck buffering status - the new low_watermark value might
1016 * be above the current fill level. If the old low_watermark one
1017 * was below the current level, this means that mq->buffering is
1018 * disabled and needs to be re-enabled. */
1019 recheck_buffering_status (mq);
1020 break;
1021 case PROP_HIGH_PERCENT:
1022 mq->high_watermark = g_value_get_int (value) * BUF_LEVEL_PERCENT_FACTOR;
1023 recheck_buffering_status (mq);
1024 break;
1025 case PROP_LOW_WATERMARK:
1026 mq->low_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
1027 recheck_buffering_status (mq);
1028 break;
1029 #ifdef OHOS_EXT_FUNC
1030 // ohos.ext.func.0013
1031 case PROP_MQ_NUM_ID:
1032 mq->mq_num_id = g_value_get_uint (value);
1033 break;
1034 #endif
1035 case PROP_HIGH_WATERMARK:
1036 mq->high_watermark = g_value_get_double (value) * MAX_BUFFERING_LEVEL;
1037 recheck_buffering_status (mq);
1038 break;
1039 case PROP_SYNC_BY_RUNNING_TIME:
1040 mq->sync_by_running_time = g_value_get_boolean (value);
1041 break;
1042 case PROP_USE_INTERLEAVE:
1043 mq->use_interleave = g_value_get_boolean (value);
1044 break;
1045 case PROP_UNLINKED_CACHE_TIME:
1046 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1047 mq->unlinked_cache_time = g_value_get_uint64 (value);
1048 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1049 gst_multi_queue_post_buffering (mq);
1050 break;
1051 case PROP_MINIMUM_INTERLEAVE:
1052 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1053 mq->min_interleave_time = g_value_get_uint64 (value);
1054 if (mq->use_interleave)
1055 calculate_interleave (mq, NULL);
1056 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1057 break;
1058 default:
1059 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1060 break;
1061 }
1062 }
1063
1064 /* Called with mutex held */
1065 static GstStructure *
gst_multi_queue_get_stats(GstMultiQueue * mq)1066 gst_multi_queue_get_stats (GstMultiQueue * mq)
1067 {
1068 GstStructure *ret =
1069 gst_structure_new_empty ("application/x-gst-multi-queue-stats");
1070 GList *tmp;
1071 GstSingleQueue *sq;
1072
1073 if (mq->queues != NULL) {
1074 GValue queues = G_VALUE_INIT;
1075 GValue v = G_VALUE_INIT;
1076
1077 g_value_init (&queues, GST_TYPE_ARRAY);
1078
1079 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
1080 GstDataQueueSize level;
1081 GstStructure *s;
1082 gchar *id;
1083 g_value_init (&v, GST_TYPE_STRUCTURE);
1084
1085 sq = (GstSingleQueue *) tmp->data;
1086 gst_data_queue_get_level (sq->queue, &level);
1087 id = g_strdup_printf ("queue_%d", sq->id);
1088 s = gst_structure_new (id,
1089 "buffers", G_TYPE_UINT, level.visible,
1090 "bytes", G_TYPE_UINT, level.bytes,
1091 "time", G_TYPE_UINT64, sq->cur_time, NULL);
1092 g_value_take_boxed (&v, s);
1093 gst_value_array_append_and_take_value (&queues, &v);
1094 g_free (id);
1095 }
1096 gst_structure_take_value (ret, "queues", &queues);
1097 }
1098
1099 return ret;
1100 }
1101
1102 static void
gst_multi_queue_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)1103 gst_multi_queue_get_property (GObject * object, guint prop_id,
1104 GValue * value, GParamSpec * pspec)
1105 {
1106 GstMultiQueue *mq = GST_MULTI_QUEUE (object);
1107
1108 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1109
1110 switch (prop_id) {
1111 case PROP_EXTRA_SIZE_BYTES:
1112 g_value_set_uint (value, mq->extra_size.bytes);
1113 break;
1114 case PROP_EXTRA_SIZE_BUFFERS:
1115 g_value_set_uint (value, mq->extra_size.visible);
1116 break;
1117 case PROP_EXTRA_SIZE_TIME:
1118 g_value_set_uint64 (value, mq->extra_size.time);
1119 break;
1120 case PROP_MAX_SIZE_BYTES:
1121 g_value_set_uint (value, mq->max_size.bytes);
1122 break;
1123 case PROP_MAX_SIZE_BUFFERS:
1124 g_value_set_uint (value, mq->max_size.visible);
1125 break;
1126 case PROP_MAX_SIZE_TIME:
1127 g_value_set_uint64 (value, mq->max_size.time);
1128 break;
1129 case PROP_USE_BUFFERING:
1130 g_value_set_boolean (value, mq->use_buffering);
1131 break;
1132 case PROP_LOW_PERCENT:
1133 g_value_set_int (value, mq->low_watermark / BUF_LEVEL_PERCENT_FACTOR);
1134 break;
1135 case PROP_HIGH_PERCENT:
1136 g_value_set_int (value, mq->high_watermark / BUF_LEVEL_PERCENT_FACTOR);
1137 break;
1138 case PROP_LOW_WATERMARK:
1139 g_value_set_double (value, mq->low_watermark /
1140 (gdouble) MAX_BUFFERING_LEVEL);
1141 break;
1142 case PROP_HIGH_WATERMARK:
1143 g_value_set_double (value, mq->high_watermark /
1144 (gdouble) MAX_BUFFERING_LEVEL);
1145 break;
1146 case PROP_SYNC_BY_RUNNING_TIME:
1147 g_value_set_boolean (value, mq->sync_by_running_time);
1148 break;
1149 case PROP_USE_INTERLEAVE:
1150 g_value_set_boolean (value, mq->use_interleave);
1151 break;
1152 case PROP_UNLINKED_CACHE_TIME:
1153 g_value_set_uint64 (value, mq->unlinked_cache_time);
1154 break;
1155 case PROP_MINIMUM_INTERLEAVE:
1156 g_value_set_uint64 (value, mq->min_interleave_time);
1157 break;
1158 case PROP_STATS:
1159 g_value_take_boxed (value, gst_multi_queue_get_stats (mq));
1160 break;
1161 default:
1162 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1163 break;
1164 }
1165
1166 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1167 }
1168
1169 static GstIterator *
gst_multi_queue_iterate_internal_links(GstPad * pad,GstObject * parent)1170 gst_multi_queue_iterate_internal_links (GstPad * pad, GstObject * parent)
1171 {
1172 GstIterator *it = NULL;
1173 GstPad *opad, *sinkpad, *srcpad;
1174 GstSingleQueue *squeue;
1175 GstMultiQueue *mq = GST_MULTI_QUEUE (parent);
1176 GValue val = { 0, };
1177
1178 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1179 squeue = GST_MULTIQUEUE_PAD (pad)->sq;
1180 if (!squeue)
1181 goto out;
1182
1183 srcpad = g_weak_ref_get (&squeue->srcpad);
1184 sinkpad = g_weak_ref_get (&squeue->sinkpad);
1185 if (sinkpad == pad && srcpad) {
1186 opad = srcpad;
1187 gst_clear_object (&sinkpad);
1188
1189 } else if (srcpad == pad && sinkpad) {
1190 opad = sinkpad;
1191 gst_clear_object (&srcpad);
1192
1193 } else {
1194 gst_clear_object (&srcpad);
1195 gst_clear_object (&sinkpad);
1196 goto out;
1197 }
1198
1199 g_value_init (&val, GST_TYPE_PAD);
1200 g_value_set_object (&val, opad);
1201 it = gst_iterator_new_single (GST_TYPE_PAD, &val);
1202 g_value_unset (&val);
1203
1204 gst_object_unref (opad);
1205
1206 out:
1207 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1208
1209 return it;
1210 }
1211
1212
1213 /*
1214 * GstElement methods
1215 */
1216
1217 static GstPad *
gst_multi_queue_request_new_pad(GstElement * element,GstPadTemplate * temp,const gchar * name,const GstCaps * caps)1218 gst_multi_queue_request_new_pad (GstElement * element, GstPadTemplate * temp,
1219 const gchar * name, const GstCaps * caps)
1220 {
1221 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1222 GstSingleQueue *squeue;
1223 GstPad *new_pad;
1224 guint temp_id = -1;
1225
1226 if (name) {
1227 sscanf (name + 4, "_%u", &temp_id);
1228 GST_LOG_OBJECT (element, "name : %s (id %d)", GST_STR_NULL (name), temp_id);
1229 }
1230
1231 /* Create a new single queue, add the sink and source pad and return the sink pad */
1232 squeue = gst_single_queue_new (mqueue, temp_id);
1233
1234 new_pad = squeue ? g_weak_ref_get (&squeue->sinkpad) : NULL;
1235 /* request pad assumes the element is owning the ref of the pad it returns */
1236 if (new_pad)
1237 gst_object_unref (new_pad);
1238
1239 GST_DEBUG_OBJECT (mqueue, "Returning pad %" GST_PTR_FORMAT, new_pad);
1240
1241 return new_pad;
1242 }
1243
1244 static void
gst_multi_queue_release_pad(GstElement * element,GstPad * pad)1245 gst_multi_queue_release_pad (GstElement * element, GstPad * pad)
1246 {
1247 GstPad *sinkpad = NULL, *srcpad = NULL;
1248 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1249 GstSingleQueue *sq = NULL;
1250 GList *tmp;
1251
1252 GST_LOG_OBJECT (element, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
1253
1254 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1255 /* Find which single queue it belongs to, knowing that it should be a sinkpad */
1256 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1257 sq = (GstSingleQueue *) tmp->data;
1258 sinkpad = g_weak_ref_get (&sq->sinkpad);
1259
1260 if (sinkpad == pad) {
1261 srcpad = g_weak_ref_get (&sq->srcpad);
1262 break;
1263 }
1264
1265 gst_object_unref (sinkpad);
1266 }
1267
1268 if (!tmp) {
1269 gst_clear_object (&sinkpad);
1270 gst_clear_object (&srcpad);
1271 GST_WARNING_OBJECT (mqueue, "That pad doesn't belong to this element ???");
1272 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1273 return;
1274 }
1275
1276 /* FIXME: The removal of the singlequeue should probably not happen until it
1277 * finishes draining */
1278
1279 /* remove it from the list */
1280 mqueue->queues = g_list_delete_link (mqueue->queues, tmp);
1281 mqueue->queues_cookie++;
1282
1283 /* FIXME : recompute next-non-linked */
1284 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1285
1286 /* delete SingleQueue */
1287 gst_data_queue_set_flushing (sq->queue, TRUE);
1288
1289 gst_pad_set_active (srcpad, FALSE);
1290 gst_pad_set_active (sinkpad, FALSE);
1291 gst_element_remove_pad (element, srcpad);
1292 gst_element_remove_pad (element, sinkpad);
1293 gst_object_unref (srcpad);
1294 gst_object_unref (sinkpad);
1295 }
1296
1297 static GstStateChangeReturn
gst_multi_queue_change_state(GstElement * element,GstStateChange transition)1298 gst_multi_queue_change_state (GstElement * element, GstStateChange transition)
1299 {
1300 GstMultiQueue *mqueue = GST_MULTI_QUEUE (element);
1301 GstSingleQueue *sq = NULL;
1302 GstStateChangeReturn result;
1303
1304 switch (transition) {
1305 case GST_STATE_CHANGE_READY_TO_PAUSED:{
1306 GList *tmp;
1307
1308 /* Set all pads to non-flushing */
1309 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1310 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1311 sq = (GstSingleQueue *) tmp->data;
1312 sq->flushing = FALSE;
1313 }
1314
1315 /* the visible limit might not have been set on single queues that have grown because of other queueus were empty */
1316 SET_CHILD_PROPERTY (mqueue, visible);
1317
1318 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1319 gst_multi_queue_post_buffering (mqueue);
1320
1321 break;
1322 }
1323 case GST_STATE_CHANGE_PAUSED_TO_READY:{
1324 GList *tmp;
1325
1326 /* Un-wait all waiting pads */
1327 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
1328 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
1329 sq = (GstSingleQueue *) tmp->data;
1330 sq->flushing = TRUE;
1331 g_cond_signal (&sq->turn);
1332
1333 sq->last_query = FALSE;
1334 g_cond_signal (&sq->query_handled);
1335 }
1336 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
1337 break;
1338 }
1339 default:
1340 break;
1341 }
1342
1343 result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1344
1345 switch (transition) {
1346 default:
1347 break;
1348 }
1349
1350 return result;
1351 }
1352
1353 static gboolean
gst_single_queue_start(GstMultiQueue * mq,GstSingleQueue * sq)1354 gst_single_queue_start (GstMultiQueue * mq, GstSingleQueue * sq)
1355 {
1356 gboolean res = FALSE;
1357 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1358
1359 GST_LOG_OBJECT (mq, "SingleQueue %d : starting task", sq->id);
1360
1361 if (srcpad) {
1362 res = gst_pad_start_task (srcpad,
1363 (GstTaskFunction) gst_multi_queue_loop, srcpad, NULL);
1364 gst_object_unref (srcpad);
1365 }
1366
1367 return res;
1368 }
1369
1370 static gboolean
gst_single_queue_pause(GstMultiQueue * mq,GstSingleQueue * sq)1371 gst_single_queue_pause (GstMultiQueue * mq, GstSingleQueue * sq)
1372 {
1373 gboolean result = FALSE;
1374 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1375
1376 GST_LOG_OBJECT (mq, "SingleQueue %d : pausing task", sq->id);
1377 if (srcpad) {
1378 result = gst_pad_pause_task (srcpad);
1379 gst_object_unref (srcpad);
1380 }
1381
1382 sq->sink_tainted = sq->src_tainted = TRUE;
1383 return result;
1384 }
1385
1386 static gboolean
gst_single_queue_stop(GstMultiQueue * mq,GstSingleQueue * sq)1387 gst_single_queue_stop (GstMultiQueue * mq, GstSingleQueue * sq)
1388 {
1389 gboolean result = FALSE;
1390 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
1391
1392 GST_LOG_OBJECT (mq, "SingleQueue %d : stopping task", sq->id);
1393 if (srcpad) {
1394 result = gst_pad_stop_task (srcpad);
1395 gst_object_unref (srcpad);
1396 }
1397 sq->sink_tainted = sq->src_tainted = TRUE;
1398 return result;
1399 }
1400
1401 static void
gst_single_queue_flush(GstMultiQueue * mq,GstSingleQueue * sq,gboolean flush,gboolean full)1402 gst_single_queue_flush (GstMultiQueue * mq, GstSingleQueue * sq, gboolean flush,
1403 gboolean full)
1404 {
1405 GST_DEBUG_OBJECT (mq, "flush %s queue %d", (flush ? "start" : "stop"),
1406 sq->id);
1407
1408 if (flush) {
1409 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1410 sq->srcresult = GST_FLOW_FLUSHING;
1411 gst_data_queue_set_flushing (sq->queue, TRUE);
1412
1413 sq->flushing = TRUE;
1414
1415 /* wake up non-linked task */
1416 GST_LOG_OBJECT (mq, "SingleQueue %d : waking up eventually waiting task",
1417 sq->id);
1418 g_cond_signal (&sq->turn);
1419 sq->last_query = FALSE;
1420 g_cond_signal (&sq->query_handled);
1421 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1422 } else {
1423 gst_single_queue_flush_queue (sq, full);
1424
1425 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1426 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
1427 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
1428 sq->has_src_segment = FALSE;
1429 /* All pads start off OK for a smooth kick-off */
1430 sq->srcresult = GST_FLOW_OK;
1431 sq->pushed = FALSE;
1432 sq->cur_time = 0;
1433 sq->max_size.visible = mq->max_size.visible;
1434 sq->is_eos = FALSE;
1435 sq->is_segment_done = FALSE;
1436 sq->nextid = 0;
1437 sq->oldid = 0;
1438 sq->last_oldid = G_MAXUINT32;
1439 sq->next_time = GST_CLOCK_STIME_NONE;
1440 sq->last_time = GST_CLOCK_STIME_NONE;
1441 sq->cached_sinktime = GST_CLOCK_STIME_NONE;
1442 sq->group_high_time = GST_CLOCK_STIME_NONE;
1443 gst_data_queue_set_flushing (sq->queue, FALSE);
1444
1445 /* We will become active again on the next buffer/gap */
1446 sq->active = FALSE;
1447
1448 /* Reset high time to be recomputed next */
1449 mq->high_time = GST_CLOCK_STIME_NONE;
1450
1451 sq->flushing = FALSE;
1452 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1453 }
1454 }
1455
1456 /* WITH LOCK TAKEN */
1457 static gint
get_buffering_level(GstMultiQueue * mq,GstSingleQueue * sq)1458 get_buffering_level (GstMultiQueue * mq, GstSingleQueue * sq)
1459 {
1460 GstDataQueueSize size;
1461 gint buffering_level, tmp;
1462
1463 gst_data_queue_get_level (sq->queue, &size);
1464
1465 GST_DEBUG_OBJECT (mq,
1466 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
1467 G_GUINT64_FORMAT, sq->id, size.visible, sq->max_size.visible,
1468 size.bytes, sq->max_size.bytes, sq->cur_time, sq->max_size.time);
1469
1470 /* get bytes and time buffer levels and take the max */
1471 if (sq->is_eos || sq->is_segment_done || sq->srcresult == GST_FLOW_NOT_LINKED
1472 || sq->is_sparse) {
1473 buffering_level = MAX_BUFFERING_LEVEL;
1474 } else {
1475 buffering_level = 0;
1476 if (sq->max_size.time > 0) {
1477 tmp =
1478 gst_util_uint64_scale (sq->cur_time,
1479 MAX_BUFFERING_LEVEL, sq->max_size.time);
1480 buffering_level = MAX (buffering_level, tmp);
1481 }
1482 if (sq->max_size.bytes > 0) {
1483 tmp =
1484 gst_util_uint64_scale_int (size.bytes,
1485 MAX_BUFFERING_LEVEL, sq->max_size.bytes);
1486 buffering_level = MAX (buffering_level, tmp);
1487 }
1488 }
1489
1490 return buffering_level;
1491 }
1492
1493 #ifdef OHOS_EXT_FUNC
1494 // ohos.ext.func.0012
1495 static void
update_buffering_time(GstMultiQueue * mq)1496 update_buffering_time (GstMultiQueue * mq)
1497 {
1498 GList *iter;
1499 GstClockTime buffering_time = mq->max_size.time;
1500 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1501 GstSingleQueue *sq = (GstSingleQueue *) iter->data;
1502 if (buffering_time > sq->cur_time) {
1503 buffering_time = sq->cur_time;
1504 }
1505 }
1506
1507 GST_DEBUG_OBJECT (mq, "Going to post buffering: mq_num_id = %d, buffering_time = %" G_GUINT64_FORMAT" cur_time = %" G_GUINT64_FORMAT,
1508 mq->mq_num_id, mq->buffering_time, buffering_time);
1509 mq->buffering_time = buffering_time;
1510 if ((mq->buffering_time > (mq->last_buffering_time + DEFAULT_INTERNAL_BUFFERING_TIME)) ||
1511 (mq->last_buffering_time > (mq->buffering_time + DEFAULT_INTERNAL_BUFFERING_TIME))) {
1512 mq->buffering_time_changed = TRUE;
1513 mq->last_buffering_time = mq->buffering_time;
1514 }
1515 }
1516 #endif
1517
1518 /* WITH LOCK TAKEN */
1519 static void
update_buffering(GstMultiQueue * mq,GstSingleQueue * sq)1520 update_buffering (GstMultiQueue * mq, GstSingleQueue * sq)
1521 {
1522 gint buffering_level, percent;
1523
1524 /* nothing to dowhen we are not in buffering mode */
1525 if (!mq->use_buffering)
1526 return;
1527
1528 buffering_level = get_buffering_level (mq, sq);
1529
1530 /* scale so that if buffering_level equals the high watermark,
1531 * the percentage is 100% */
1532 percent = gst_util_uint64_scale (buffering_level, 100, mq->high_watermark);
1533 /* clip */
1534 if (percent > 100)
1535 percent = 100;
1536
1537 if (mq->buffering) {
1538 if (buffering_level >= mq->high_watermark) {
1539 mq->buffering = FALSE;
1540 }
1541 /* make sure it increases */
1542 percent = MAX (mq->buffering_percent, percent);
1543
1544 SET_PERCENT (mq, percent);
1545 } else {
1546 GList *iter;
1547 gboolean is_buffering = TRUE;
1548
1549 for (iter = mq->queues; iter; iter = g_list_next (iter)) {
1550 GstSingleQueue *oq = (GstSingleQueue *) iter->data;
1551
1552 if (get_buffering_level (mq, oq) >= mq->high_watermark) {
1553 is_buffering = FALSE;
1554
1555 break;
1556 }
1557 }
1558
1559 #ifdef OHOS_EXT_FUNC
1560 // ohos.ext.func.0012
1561 if (is_buffering && (buffering_level < mq->low_watermark || mq->buffering_percent == 0)) {
1562 mq->buffering = TRUE;
1563 SET_PERCENT (mq, percent);
1564 }
1565 #else
1566 if (is_buffering && buffering_level < mq->low_watermark) {
1567 mq->buffering = TRUE;
1568 SET_PERCENT (mq, percent);
1569 }
1570 #endif
1571 }
1572
1573 #ifdef OHOS_EXT_FUNC
1574 // ohos.ext.func.0012
1575 update_buffering_time(mq);
1576 #endif
1577 }
1578
1579 static void
gst_multi_queue_post_buffering(GstMultiQueue * mq)1580 gst_multi_queue_post_buffering (GstMultiQueue * mq)
1581 {
1582 GstMessage *msg = NULL;
1583
1584 g_mutex_lock (&mq->buffering_post_lock);
1585 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1586 if (mq->buffering_percent_changed) {
1587 gint percent = mq->buffering_percent;
1588
1589 mq->buffering_percent_changed = FALSE;
1590
1591 GST_DEBUG_OBJECT (mq, "Going to post buffering: %d%%", percent);
1592 msg = gst_message_new_buffering (GST_OBJECT_CAST (mq), percent);
1593 }
1594
1595 #ifdef OHOS_EXT_FUNC
1596 // ohos.ext.func.0012
1597 GstMessage *msg_buffering_time = NULL;
1598 if (mq->buffering_time_changed) {
1599 gint64 buffering_time = mq->buffering_time;
1600 mq->buffering_time_changed = FALSE;
1601 GST_DEBUG_OBJECT (mq, "Going to post buffering time: %" G_GUINT64_FORMAT, buffering_time);
1602 msg_buffering_time = gst_message_new_buffering_time (GST_OBJECT_CAST (mq), buffering_time, mq->mq_num_id);
1603 }
1604 #endif
1605 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1606
1607 if (msg != NULL)
1608 gst_element_post_message (GST_ELEMENT_CAST (mq), msg);
1609
1610 #ifdef OHOS_EXT_FUNC
1611 // ohos.ext.func.0012
1612 if (msg_buffering_time != NULL) {
1613 gst_element_post_message (GST_ELEMENT_CAST (mq), msg_buffering_time);
1614 }
1615 #endif
1616 g_mutex_unlock (&mq->buffering_post_lock);
1617 }
1618
1619 static void
recheck_buffering_status(GstMultiQueue * mq)1620 recheck_buffering_status (GstMultiQueue * mq)
1621 {
1622 if (!mq->use_buffering && mq->buffering) {
1623 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1624 mq->buffering = FALSE;
1625 GST_DEBUG_OBJECT (mq,
1626 "Buffering property disabled, but queue was still buffering; "
1627 "setting buffering percentage to 100%%");
1628 SET_PERCENT (mq, 100);
1629 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1630 }
1631
1632 if (mq->use_buffering) {
1633 GList *tmp;
1634 gint old_perc;
1635
1636 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1637
1638 /* force buffering percentage to be recalculated */
1639 old_perc = mq->buffering_percent;
1640 mq->buffering_percent = 0;
1641
1642 tmp = mq->queues;
1643 while (tmp) {
1644 GstSingleQueue *q = (GstSingleQueue *) tmp->data;
1645 update_buffering (mq, q);
1646 gst_data_queue_limits_changed (q->queue);
1647 tmp = g_list_next (tmp);
1648 }
1649
1650 GST_DEBUG_OBJECT (mq,
1651 "Recalculated buffering percentage: old: %d%% new: %d%%",
1652 old_perc, mq->buffering_percent);
1653
1654 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1655 }
1656
1657 gst_multi_queue_post_buffering (mq);
1658 }
1659
1660 static void
calculate_interleave(GstMultiQueue * mq,GstSingleQueue * sq)1661 calculate_interleave (GstMultiQueue * mq, GstSingleQueue * sq)
1662 {
1663 GstClockTimeDiff low, high;
1664 GstClockTime interleave, other_interleave = 0;
1665 GList *tmp;
1666
1667 low = high = GST_CLOCK_STIME_NONE;
1668 interleave = mq->interleave;
1669 /* Go over all single queues and calculate lowest/highest value */
1670 for (tmp = mq->queues; tmp; tmp = tmp->next) {
1671 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
1672 /* Ignore sparse streams for interleave calculation */
1673 if (oq->is_sparse)
1674 continue;
1675 /* If a stream is not active yet (hasn't received any buffers), set
1676 * a maximum interleave to allow it to receive more data */
1677 if (!oq->active) {
1678 GST_LOG_OBJECT (mq,
1679 "queue %d is not active yet, forcing interleave to 5s", oq->id);
1680 mq->interleave = 5 * GST_SECOND;
1681 /* Update max-size time */
1682 mq->max_size.time = mq->interleave;
1683 SET_CHILD_PROPERTY (mq, time);
1684 goto beach;
1685 }
1686
1687 /* Calculate within each streaming thread */
1688 if (sq && sq->thread != oq->thread) {
1689 if (oq->interleave > other_interleave)
1690 other_interleave = oq->interleave;
1691 continue;
1692 }
1693
1694 if (GST_CLOCK_STIME_IS_VALID (oq->cached_sinktime)) {
1695 if (low == GST_CLOCK_STIME_NONE || oq->cached_sinktime < low)
1696 low = oq->cached_sinktime;
1697 if (high == GST_CLOCK_STIME_NONE || oq->cached_sinktime > high)
1698 high = oq->cached_sinktime;
1699 }
1700 GST_LOG_OBJECT (mq,
1701 "queue %d , sinktime:%" GST_STIME_FORMAT " low:%" GST_STIME_FORMAT
1702 " high:%" GST_STIME_FORMAT, oq->id,
1703 GST_STIME_ARGS (oq->cached_sinktime), GST_STIME_ARGS (low),
1704 GST_STIME_ARGS (high));
1705 }
1706
1707 if (GST_CLOCK_STIME_IS_VALID (low) && GST_CLOCK_STIME_IS_VALID (high)) {
1708 interleave = high - low;
1709 /* Padding of interleave and minimum value */
1710 interleave = (150 * interleave / 100) + mq->min_interleave_time;
1711 if (sq)
1712 sq->interleave = interleave;
1713
1714 interleave = MAX (interleave, other_interleave);
1715
1716 /* Update the stored interleave if:
1717 * * No data has arrived yet (high == low)
1718 * * Or it went higher
1719 * * Or it went lower and we've gone past the previous interleave needed */
1720 if (high == low || interleave > mq->interleave ||
1721 ((mq->last_interleave_update + (2 * MIN (GST_SECOND,
1722 mq->interleave)) < low)
1723 && interleave < (mq->interleave * 3 / 4))) {
1724 /* Update the interleave */
1725 mq->interleave = interleave;
1726 mq->last_interleave_update = high;
1727 /* Update max-size time */
1728 mq->max_size.time = mq->interleave;
1729 SET_CHILD_PROPERTY (mq, time);
1730 }
1731 }
1732
1733 beach:
1734 GST_DEBUG_OBJECT (mq,
1735 "low:%" GST_STIME_FORMAT " high:%" GST_STIME_FORMAT " interleave:%"
1736 GST_TIME_FORMAT " mq->interleave:%" GST_TIME_FORMAT
1737 " last_interleave_update:%" GST_STIME_FORMAT, GST_STIME_ARGS (low),
1738 GST_STIME_ARGS (high), GST_TIME_ARGS (interleave),
1739 GST_TIME_ARGS (mq->interleave),
1740 GST_STIME_ARGS (mq->last_interleave_update));
1741 }
1742
1743
1744 /* calculate the diff between running time on the sink and src of the queue.
1745 * This is the total amount of time in the queue.
1746 * WITH LOCK TAKEN */
1747 static void
update_time_level(GstMultiQueue * mq,GstSingleQueue * sq)1748 update_time_level (GstMultiQueue * mq, GstSingleQueue * sq)
1749 {
1750 GstClockTimeDiff sink_time, src_time;
1751
1752 if (sq->sink_tainted) {
1753 sink_time = sq->sinktime = my_segment_to_running_time (&sq->sink_segment,
1754 sq->sink_segment.position);
1755
1756 GST_DEBUG_OBJECT (mq,
1757 "queue %d sink_segment.position:%" GST_TIME_FORMAT ", sink_time:%"
1758 GST_STIME_FORMAT, sq->id, GST_TIME_ARGS (sq->sink_segment.position),
1759 GST_STIME_ARGS (sink_time));
1760
1761 if (G_UNLIKELY (sq->last_time == GST_CLOCK_STIME_NONE)) {
1762 /* If the single queue still doesn't have a last_time set, this means
1763 * that nothing has been pushed out yet.
1764 * In order for the high_time computation to be as efficient as possible,
1765 * we set the last_time */
1766 sq->last_time = sink_time;
1767 }
1768 if (G_UNLIKELY (sink_time != GST_CLOCK_STIME_NONE)) {
1769 /* if we have a time, we become untainted and use the time */
1770 sq->sink_tainted = FALSE;
1771 if (mq->use_interleave) {
1772 sq->cached_sinktime = sink_time;
1773 calculate_interleave (mq, sq);
1774 }
1775 }
1776 } else
1777 sink_time = sq->sinktime;
1778
1779 if (sq->src_tainted) {
1780 GstSegment *segment;
1781 gint64 position;
1782
1783 if (sq->has_src_segment) {
1784 segment = &sq->src_segment;
1785 position = sq->src_segment.position;
1786 } else {
1787 /*
1788 * If the src pad had no segment yet, use the sink segment
1789 * to avoid signalling overrun if the received sink segment has a
1790 * a position > max-size-time while the src pad time would be the default=0
1791 *
1792 * This can happen when switching pads on chained/adaptive streams and the
1793 * new chain has a segment with a much larger position
1794 */
1795 segment = &sq->sink_segment;
1796 position = sq->sink_segment.position;
1797 }
1798
1799 src_time = sq->srctime = my_segment_to_running_time (segment, position);
1800 /* if we have a time, we become untainted and use the time */
1801 if (G_UNLIKELY (src_time != GST_CLOCK_STIME_NONE)) {
1802 sq->src_tainted = FALSE;
1803 }
1804 } else
1805 src_time = sq->srctime;
1806
1807 GST_DEBUG_OBJECT (mq,
1808 "queue %d, sink %" GST_STIME_FORMAT ", src %" GST_STIME_FORMAT, sq->id,
1809 GST_STIME_ARGS (sink_time), GST_STIME_ARGS (src_time));
1810
1811 /* This allows for streams with out of order timestamping - sometimes the
1812 * emerging timestamp is later than the arriving one(s) */
1813 if (G_LIKELY (GST_CLOCK_STIME_IS_VALID (sink_time) &&
1814 GST_CLOCK_STIME_IS_VALID (src_time) && sink_time > src_time))
1815 sq->cur_time = sink_time - src_time;
1816 else
1817 sq->cur_time = 0;
1818
1819 /* updating the time level can change the buffering state */
1820 update_buffering (mq, sq);
1821
1822 return;
1823 }
1824
1825 /* take a SEGMENT event and apply the values to segment, updating the time
1826 * level of queue. */
1827 static void
apply_segment(GstMultiQueue * mq,GstSingleQueue * sq,GstEvent * event,GstSegment * segment)1828 apply_segment (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1829 GstSegment * segment)
1830 {
1831 gst_event_copy_segment (event, segment);
1832
1833 /* now configure the values, we use these to track timestamps on the
1834 * sinkpad. */
1835 if (segment->format != GST_FORMAT_TIME) {
1836 /* non-time format, pretent the current time segment is closed with a
1837 * 0 start and unknown stop time. */
1838 segment->format = GST_FORMAT_TIME;
1839 segment->start = 0;
1840 segment->stop = -1;
1841 segment->time = 0;
1842 }
1843 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1844
1845 /* Make sure we have a valid initial segment position (and not garbage
1846 * from upstream) */
1847 if (segment->rate > 0.0)
1848 segment->position = segment->start;
1849 else
1850 segment->position = segment->stop;
1851 if (segment == &sq->sink_segment)
1852 sq->sink_tainted = TRUE;
1853 else {
1854 sq->has_src_segment = TRUE;
1855 sq->src_tainted = TRUE;
1856 }
1857
1858 GST_DEBUG_OBJECT (mq,
1859 "queue %d, configured SEGMENT %" GST_SEGMENT_FORMAT, sq->id, segment);
1860
1861 /* segment can update the time level of the queue */
1862 update_time_level (mq, sq);
1863
1864 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1865 gst_multi_queue_post_buffering (mq);
1866 }
1867
1868 /* take a buffer and update segment, updating the time level of the queue. */
1869 static void
apply_buffer(GstMultiQueue * mq,GstSingleQueue * sq,GstClockTime timestamp,GstClockTime duration,GstSegment * segment)1870 apply_buffer (GstMultiQueue * mq, GstSingleQueue * sq, GstClockTime timestamp,
1871 GstClockTime duration, GstSegment * segment)
1872 {
1873 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1874
1875 /* if no timestamp is set, assume it's continuous with the previous
1876 * time */
1877 if (timestamp == GST_CLOCK_TIME_NONE)
1878 timestamp = segment->position;
1879
1880 /* add duration */
1881 if (duration != GST_CLOCK_TIME_NONE)
1882 timestamp += duration;
1883
1884 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1885 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1886 GST_TIME_ARGS (timestamp));
1887
1888 segment->position = timestamp;
1889
1890 if (segment == &sq->sink_segment)
1891 sq->sink_tainted = TRUE;
1892 else
1893 sq->src_tainted = TRUE;
1894
1895 /* calc diff with other end */
1896 update_time_level (mq, sq);
1897 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1898 gst_multi_queue_post_buffering (mq);
1899 }
1900
1901 static void
apply_gap(GstMultiQueue * mq,GstSingleQueue * sq,GstEvent * event,GstSegment * segment)1902 apply_gap (GstMultiQueue * mq, GstSingleQueue * sq, GstEvent * event,
1903 GstSegment * segment)
1904 {
1905 GstClockTime timestamp;
1906 GstClockTime duration;
1907
1908 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
1909
1910 gst_event_parse_gap (event, ×tamp, &duration);
1911
1912 if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
1913
1914 if (GST_CLOCK_TIME_IS_VALID (duration)) {
1915 timestamp += duration;
1916 }
1917
1918 GST_DEBUG_OBJECT (mq, "queue %d, %s position updated to %" GST_TIME_FORMAT,
1919 sq->id, segment == &sq->sink_segment ? "sink" : "src",
1920 GST_TIME_ARGS (timestamp));
1921
1922 segment->position = timestamp;
1923
1924 if (segment == &sq->sink_segment)
1925 sq->sink_tainted = TRUE;
1926 else
1927 sq->src_tainted = TRUE;
1928
1929 /* calc diff with other end */
1930 update_time_level (mq, sq);
1931 }
1932
1933 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
1934 gst_multi_queue_post_buffering (mq);
1935 }
1936
1937 static GstClockTimeDiff
get_running_time(GstSegment * segment,GstMiniObject * object,gboolean end)1938 get_running_time (GstSegment * segment, GstMiniObject * object, gboolean end)
1939 {
1940 GstClockTimeDiff time = GST_CLOCK_STIME_NONE;
1941
1942 if (GST_IS_BUFFER (object)) {
1943 GstBuffer *buf = GST_BUFFER_CAST (object);
1944 GstClockTime btime = GST_BUFFER_DTS_OR_PTS (buf);
1945
1946 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1947 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1948 btime += GST_BUFFER_DURATION (buf);
1949 time = my_segment_to_running_time (segment, btime);
1950 }
1951 } else if (GST_IS_BUFFER_LIST (object)) {
1952 GstBufferList *list = GST_BUFFER_LIST_CAST (object);
1953 gint i, n;
1954 GstBuffer *buf;
1955
1956 n = gst_buffer_list_length (list);
1957 for (i = 0; i < n; i++) {
1958 GstClockTime btime;
1959 buf = gst_buffer_list_get (list, i);
1960 btime = GST_BUFFER_DTS_OR_PTS (buf);
1961 if (GST_CLOCK_TIME_IS_VALID (btime)) {
1962 if (end && GST_BUFFER_DURATION_IS_VALID (buf))
1963 btime += GST_BUFFER_DURATION (buf);
1964 time = my_segment_to_running_time (segment, btime);
1965 if (!end)
1966 goto done;
1967 } else if (!end) {
1968 goto done;
1969 }
1970 }
1971 } else if (GST_IS_EVENT (object)) {
1972 GstEvent *event = GST_EVENT_CAST (object);
1973
1974 /* For newsegment events return the running time of the start position */
1975 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
1976 const GstSegment *new_segment;
1977
1978 gst_event_parse_segment (event, &new_segment);
1979 if (new_segment->format == GST_FORMAT_TIME) {
1980 time =
1981 my_segment_to_running_time ((GstSegment *) new_segment,
1982 new_segment->start);
1983 }
1984 } else if (GST_EVENT_TYPE (event) == GST_EVENT_GAP) {
1985 GstClockTime ts, dur;
1986 gst_event_parse_gap (event, &ts, &dur);
1987 if (GST_CLOCK_TIME_IS_VALID (ts)) {
1988 if (GST_CLOCK_TIME_IS_VALID (dur))
1989 ts += dur;
1990 time = my_segment_to_running_time (segment, ts);
1991 }
1992 }
1993 }
1994
1995 done:
1996 return time;
1997 }
1998
1999 static GstFlowReturn
gst_single_queue_push_one(GstMultiQueue * mq,GstSingleQueue * sq,GstMiniObject * object,gboolean * allow_drop)2000 gst_single_queue_push_one (GstMultiQueue * mq, GstSingleQueue * sq,
2001 GstMiniObject * object, gboolean * allow_drop)
2002 {
2003 GstFlowReturn result = sq->srcresult;
2004 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
2005
2006 if (!srcpad) {
2007 GST_INFO_OBJECT (mq,
2008 "Pushing while corresponding sourcepad has been cleared");
2009 return GST_FLOW_FLUSHING;
2010 }
2011
2012 if (GST_IS_BUFFER (object)) {
2013 GstBuffer *buffer;
2014 GstClockTime timestamp, duration;
2015
2016 buffer = GST_BUFFER_CAST (object);
2017 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2018 duration = GST_BUFFER_DURATION (buffer);
2019
2020 apply_buffer (mq, sq, timestamp, duration, &sq->src_segment);
2021
2022 /* Applying the buffer may have made the queue non-full again, unblock it if needed */
2023 gst_data_queue_limits_changed (sq->queue);
2024
2025 if (G_UNLIKELY (*allow_drop)) {
2026 GST_DEBUG_OBJECT (mq,
2027 "SingleQueue %d : Dropping EOS buffer %p with ts %" GST_TIME_FORMAT,
2028 sq->id, buffer, GST_TIME_ARGS (timestamp));
2029 gst_buffer_unref (buffer);
2030 } else {
2031 GST_DEBUG_OBJECT (mq,
2032 "SingleQueue %d : Pushing buffer %p with ts %" GST_TIME_FORMAT,
2033 sq->id, buffer, GST_TIME_ARGS (timestamp));
2034 result = gst_pad_push (srcpad, buffer);
2035 }
2036 } else if (GST_IS_EVENT (object)) {
2037 GstEvent *event;
2038
2039 event = GST_EVENT_CAST (object);
2040
2041 switch (GST_EVENT_TYPE (event)) {
2042 case GST_EVENT_SEGMENT_DONE:
2043 *allow_drop = FALSE;
2044 break;
2045 case GST_EVENT_EOS:
2046 result = GST_FLOW_EOS;
2047 if (G_UNLIKELY (*allow_drop))
2048 *allow_drop = FALSE;
2049 break;
2050 case GST_EVENT_STREAM_START:
2051 result = GST_FLOW_OK;
2052 if (G_UNLIKELY (*allow_drop))
2053 *allow_drop = FALSE;
2054 break;
2055 case GST_EVENT_SEGMENT:
2056 apply_segment (mq, sq, event, &sq->src_segment);
2057 /* Applying the segment may have made the queue non-full again, unblock it if needed */
2058 gst_data_queue_limits_changed (sq->queue);
2059 if (G_UNLIKELY (*allow_drop)) {
2060 result = GST_FLOW_OK;
2061 *allow_drop = FALSE;
2062 }
2063 break;
2064 case GST_EVENT_GAP:
2065 apply_gap (mq, sq, event, &sq->src_segment);
2066 /* Applying the gap may have made the queue non-full again, unblock it if needed */
2067 gst_data_queue_limits_changed (sq->queue);
2068 break;
2069 default:
2070 break;
2071 }
2072
2073 if (G_UNLIKELY (*allow_drop)) {
2074 GST_DEBUG_OBJECT (mq,
2075 "SingleQueue %d : Dropping EOS event %p of type %s",
2076 sq->id, event, GST_EVENT_TYPE_NAME (event));
2077 gst_event_unref (event);
2078 } else {
2079 GST_DEBUG_OBJECT (mq,
2080 "SingleQueue %d : Pushing event %p of type %s",
2081 sq->id, event, GST_EVENT_TYPE_NAME (event));
2082
2083 gst_pad_push_event (srcpad, event);
2084 }
2085 } else if (GST_IS_QUERY (object)) {
2086 GstQuery *query;
2087 gboolean res;
2088
2089 query = GST_QUERY_CAST (object);
2090
2091 if (G_UNLIKELY (*allow_drop)) {
2092 GST_DEBUG_OBJECT (mq,
2093 "SingleQueue %d : Dropping EOS query %p", sq->id, query);
2094 gst_query_unref (query);
2095 res = FALSE;
2096 } else {
2097 res = gst_pad_peer_query (srcpad, query);
2098 }
2099
2100 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2101 sq->last_query = res;
2102 sq->last_handled_query = query;
2103 g_cond_signal (&sq->query_handled);
2104 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2105 } else {
2106 g_warning ("Unexpected object in singlequeue %u (refcounting problem?)",
2107 sq->id);
2108 }
2109
2110 gst_object_unref (srcpad);
2111 return result;
2112
2113 /* ERRORS */
2114 }
2115
2116 static GstMiniObject *
gst_multi_queue_item_steal_object(GstMultiQueueItem * item)2117 gst_multi_queue_item_steal_object (GstMultiQueueItem * item)
2118 {
2119 GstMiniObject *res;
2120
2121 res = item->object;
2122 item->object = NULL;
2123
2124 return res;
2125 }
2126
2127 static void
gst_multi_queue_item_destroy(GstMultiQueueItem * item)2128 gst_multi_queue_item_destroy (GstMultiQueueItem * item)
2129 {
2130 if (!item->is_query && item->object)
2131 gst_mini_object_unref (item->object);
2132 g_slice_free (GstMultiQueueItem, item);
2133 }
2134
2135 /* takes ownership of passed mini object! */
2136 static GstMultiQueueItem *
gst_multi_queue_buffer_item_new(GstMiniObject * object,guint32 curid)2137 gst_multi_queue_buffer_item_new (GstMiniObject * object, guint32 curid)
2138 {
2139 GstMultiQueueItem *item;
2140
2141 item = g_slice_new (GstMultiQueueItem);
2142 item->object = object;
2143 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2144 item->posid = curid;
2145 item->is_query = GST_IS_QUERY (object);
2146
2147 item->size = gst_buffer_get_size (GST_BUFFER_CAST (object));
2148 item->duration = GST_BUFFER_DURATION (object);
2149 if (item->duration == GST_CLOCK_TIME_NONE)
2150 item->duration = 0;
2151 item->visible = TRUE;
2152 return item;
2153 }
2154
2155 static GstMultiQueueItem *
gst_multi_queue_mo_item_new(GstMiniObject * object,guint32 curid)2156 gst_multi_queue_mo_item_new (GstMiniObject * object, guint32 curid)
2157 {
2158 GstMultiQueueItem *item;
2159
2160 item = g_slice_new (GstMultiQueueItem);
2161 item->object = object;
2162 item->destroy = (GDestroyNotify) gst_multi_queue_item_destroy;
2163 item->posid = curid;
2164 item->is_query = GST_IS_QUERY (object);
2165
2166 item->size = 0;
2167 item->duration = 0;
2168 item->visible = FALSE;
2169 return item;
2170 }
2171
2172 /* Each main loop attempts to push buffers until the return value
2173 * is not-linked. not-linked pads are not allowed to push data beyond
2174 * any linked pads, so they don't 'rush ahead of the pack'.
2175 */
2176 static void
gst_multi_queue_loop(GstPad * pad)2177 gst_multi_queue_loop (GstPad * pad)
2178 {
2179 GstSingleQueue *sq;
2180 GstMultiQueueItem *item;
2181 GstDataQueueItem *sitem;
2182 GstMultiQueue *mq;
2183 GstMiniObject *object = NULL;
2184 guint32 newid;
2185 GstFlowReturn result;
2186 GstClockTimeDiff next_time;
2187 gboolean is_buffer;
2188 gboolean is_query = FALSE;
2189 gboolean do_update_buffering = FALSE;
2190 gboolean dropping = FALSE;
2191 GstPad *srcpad = NULL;
2192
2193 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2194 mq = g_weak_ref_get (&sq->mqueue);
2195 srcpad = g_weak_ref_get (&sq->srcpad);
2196
2197 if (!mq || !srcpad)
2198 goto done;
2199
2200 next:
2201 GST_DEBUG_OBJECT (mq, "SingleQueue %d : trying to pop an object", sq->id);
2202
2203 if (sq->flushing)
2204 goto out_flushing;
2205
2206 /* Get something from the queue, blocking until that happens, or we get
2207 * flushed */
2208 if (!(gst_data_queue_pop (sq->queue, &sitem)))
2209 goto out_flushing;
2210
2211 item = (GstMultiQueueItem *) sitem;
2212 newid = item->posid;
2213
2214 is_query = item->is_query;
2215
2216 /* steal the object and destroy the item */
2217 object = gst_multi_queue_item_steal_object (item);
2218 gst_multi_queue_item_destroy (item);
2219
2220 is_buffer = GST_IS_BUFFER (object);
2221
2222 /* Get running time of the item. Events will have GST_CLOCK_STIME_NONE */
2223 next_time = get_running_time (&sq->src_segment, object, FALSE);
2224
2225 GST_LOG_OBJECT (mq, "SingleQueue %d : newid:%d , oldid:%d",
2226 sq->id, newid, sq->last_oldid);
2227
2228 /* If we're not-linked, we do some extra work because we might need to
2229 * wait before pushing. If we're linked but there's a gap in the IDs,
2230 * or it's the first loop, or we just passed the previous highid,
2231 * we might need to wake some sleeping pad up, so there's extra work
2232 * there too */
2233 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2234 if (sq->srcresult == GST_FLOW_NOT_LINKED
2235 || (sq->last_oldid == G_MAXUINT32) || (newid != (sq->last_oldid + 1))
2236 || sq->last_oldid > mq->highid) {
2237 GST_LOG_OBJECT (mq, "CHECKING sq->srcresult: %s",
2238 gst_flow_get_name (sq->srcresult));
2239
2240 /* Check again if we're flushing after the lock is taken,
2241 * the flush flag might have been changed in the meantime */
2242 if (sq->flushing) {
2243 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2244 goto out_flushing;
2245 }
2246
2247 /* Update the nextid so other threads know when to wake us up */
2248 sq->nextid = newid;
2249 /* Take into account the extra cache time since we're unlinked */
2250 if (GST_CLOCK_STIME_IS_VALID (next_time))
2251 next_time += mq->unlinked_cache_time;
2252 sq->next_time = next_time;
2253
2254 /* Update the oldid (the last ID we output) for highid tracking */
2255 if (sq->last_oldid != G_MAXUINT32)
2256 sq->oldid = sq->last_oldid;
2257
2258 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2259 gboolean should_wait;
2260 /* Go to sleep until it's time to push this buffer */
2261
2262 /* Recompute the highid */
2263 compute_high_id (mq);
2264 /* Recompute the high time */
2265 compute_high_time (mq, sq->groupid);
2266
2267 GST_DEBUG_OBJECT (mq,
2268 "groupid %d high_time %" GST_STIME_FORMAT " next_time %"
2269 GST_STIME_FORMAT, sq->groupid, GST_STIME_ARGS (sq->group_high_time),
2270 GST_STIME_ARGS (next_time));
2271
2272 if (mq->sync_by_running_time) {
2273 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2274 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2275 (mq->high_time == GST_CLOCK_STIME_NONE
2276 || next_time > mq->high_time);
2277 } else {
2278 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2279 next_time > sq->group_high_time;
2280 }
2281 } else
2282 should_wait = newid > mq->highid;
2283
2284 while (should_wait && sq->srcresult == GST_FLOW_NOT_LINKED) {
2285
2286 GST_DEBUG_OBJECT (mq,
2287 "queue %d sleeping for not-linked wakeup with "
2288 "newid %u, highid %u, next_time %" GST_STIME_FORMAT
2289 ", high_time %" GST_STIME_FORMAT, sq->id, newid, mq->highid,
2290 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time));
2291
2292 /* Wake up all non-linked pads before we sleep */
2293 wake_up_next_non_linked (mq);
2294
2295 mq->numwaiting++;
2296 g_cond_wait (&sq->turn, &mq->qlock);
2297 mq->numwaiting--;
2298
2299 if (sq->flushing) {
2300 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2301 goto out_flushing;
2302 }
2303
2304 /* Recompute the high time and ID */
2305 compute_high_time (mq, sq->groupid);
2306 compute_high_id (mq);
2307
2308 GST_DEBUG_OBJECT (mq, "queue %d woken from sleeping for not-linked "
2309 "wakeup with newid %u, highid %u, next_time %" GST_STIME_FORMAT
2310 ", high_time %" GST_STIME_FORMAT " mq high_time %" GST_STIME_FORMAT,
2311 sq->id, newid, mq->highid,
2312 GST_STIME_ARGS (next_time), GST_STIME_ARGS (sq->group_high_time),
2313 GST_STIME_ARGS (mq->high_time));
2314
2315 if (mq->sync_by_running_time) {
2316 if (sq->group_high_time == GST_CLOCK_STIME_NONE) {
2317 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2318 (mq->high_time == GST_CLOCK_STIME_NONE
2319 || next_time > mq->high_time);
2320 } else {
2321 should_wait = GST_CLOCK_STIME_IS_VALID (next_time) &&
2322 next_time > sq->group_high_time;
2323 }
2324 } else
2325 should_wait = newid > mq->highid;
2326 }
2327
2328 /* Re-compute the high_id in case someone else pushed */
2329 compute_high_id (mq);
2330 compute_high_time (mq, sq->groupid);
2331 } else {
2332 compute_high_id (mq);
2333 compute_high_time (mq, sq->groupid);
2334 /* Wake up all non-linked pads */
2335 wake_up_next_non_linked (mq);
2336 }
2337 /* We're done waiting, we can clear the nextid and nexttime */
2338 sq->nextid = 0;
2339 sq->next_time = GST_CLOCK_STIME_NONE;
2340 }
2341 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2342
2343 if (sq->flushing)
2344 goto out_flushing;
2345
2346 GST_LOG_OBJECT (mq, "sq:%d BEFORE PUSHING sq->srcresult: %s", sq->id,
2347 gst_flow_get_name (sq->srcresult));
2348
2349 /* Update time stats */
2350 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2351 next_time = get_running_time (&sq->src_segment, object, TRUE);
2352 if (GST_CLOCK_STIME_IS_VALID (next_time)) {
2353 if (sq->last_time == GST_CLOCK_STIME_NONE || sq->last_time < next_time)
2354 sq->last_time = next_time;
2355 if (mq->high_time == GST_CLOCK_STIME_NONE || mq->high_time <= next_time) {
2356 /* Wake up all non-linked pads now that we advanced the high time */
2357 mq->high_time = next_time;
2358 wake_up_next_non_linked (mq);
2359 }
2360 }
2361 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2362
2363 /* Try to push out the new object */
2364 result = gst_single_queue_push_one (mq, sq, object, &dropping);
2365 object = NULL;
2366
2367 /* Check if we pushed something already and if this is
2368 * now a switch from an active to a non-active stream.
2369 *
2370 * If it is, we reset all the waiting streams, let them
2371 * push another buffer to see if they're now active again.
2372 * This allows faster switching between streams and prevents
2373 * deadlocks if downstream does any waiting too.
2374 */
2375 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2376 if (sq->pushed && sq->srcresult == GST_FLOW_OK
2377 && result == GST_FLOW_NOT_LINKED) {
2378 GList *tmp;
2379
2380 GST_LOG_OBJECT (mq, "SingleQueue %d : Changed from active to non-active",
2381 sq->id);
2382
2383 compute_high_id (mq);
2384 compute_high_time (mq, sq->groupid);
2385 do_update_buffering = TRUE;
2386
2387 /* maybe no-one is waiting */
2388 if (mq->numwaiting > 0) {
2389 /* Else figure out which singlequeue(s) need waking up */
2390 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
2391 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
2392
2393 if (sq2->srcresult == GST_FLOW_NOT_LINKED) {
2394 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq2->id);
2395 sq2->pushed = FALSE;
2396 sq2->srcresult = GST_FLOW_OK;
2397 g_cond_signal (&sq2->turn);
2398 }
2399 }
2400 }
2401 }
2402
2403 if (is_buffer)
2404 sq->pushed = TRUE;
2405
2406 /* now hold on a bit;
2407 * can not simply throw this result to upstream, because
2408 * that might already be onto another segment, so we have to make
2409 * sure we are relaying the correct info wrt proper segment */
2410 if (result == GST_FLOW_EOS && !dropping &&
2411 sq->srcresult != GST_FLOW_NOT_LINKED) {
2412 GST_DEBUG_OBJECT (mq, "starting EOS drop on sq %d", sq->id);
2413 dropping = TRUE;
2414 /* pretend we have not seen EOS yet for upstream's sake */
2415 result = sq->srcresult;
2416 } else if (dropping && gst_data_queue_is_empty (sq->queue)) {
2417 /* queue empty, so stop dropping
2418 * we can commit the result we have now,
2419 * which is either OK after a segment, or EOS */
2420 GST_DEBUG_OBJECT (mq, "committed EOS drop on sq %d", sq->id);
2421 dropping = FALSE;
2422 result = GST_FLOW_EOS;
2423 }
2424 sq->srcresult = result;
2425 sq->last_oldid = newid;
2426
2427 if (do_update_buffering)
2428 update_buffering (mq, sq);
2429
2430 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2431 gst_multi_queue_post_buffering (mq);
2432
2433 GST_LOG_OBJECT (mq, "sq:%d AFTER PUSHING sq->srcresult: %s (is_eos:%d)",
2434 sq->id, gst_flow_get_name (sq->srcresult), GST_PAD_IS_EOS (srcpad));
2435
2436 /* Need to make sure wake up any sleeping pads when we exit */
2437 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2438 if (mq->numwaiting > 0 && (GST_PAD_IS_EOS (srcpad)
2439 || sq->srcresult == GST_FLOW_EOS)) {
2440 compute_high_time (mq, sq->groupid);
2441 compute_high_id (mq);
2442 wake_up_next_non_linked (mq);
2443 }
2444 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2445
2446 if (dropping)
2447 goto next;
2448
2449 if (result != GST_FLOW_OK && result != GST_FLOW_NOT_LINKED
2450 && result != GST_FLOW_EOS)
2451 goto out_flushing;
2452
2453 done:
2454 gst_clear_object (&mq);
2455 gst_clear_object (&srcpad);
2456
2457 return;
2458
2459 out_flushing:
2460 {
2461 if (object && !is_query)
2462 gst_mini_object_unref (object);
2463
2464 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2465 sq->last_query = FALSE;
2466 g_cond_signal (&sq->query_handled);
2467
2468 /* Post an error message if we got EOS while downstream
2469 * has returned an error flow return. After EOS there
2470 * will be no further buffer which could propagate the
2471 * error upstream */
2472 if ((sq->is_eos || sq->is_segment_done) && sq->srcresult < GST_FLOW_EOS) {
2473 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2474 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2475 } else {
2476 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2477 }
2478
2479 /* upstream needs to see fatal result ASAP to shut things down,
2480 * but might be stuck in one of our other full queues;
2481 * so empty this one and trigger dynamic queue growth. At
2482 * this point the srcresult is not OK, NOT_LINKED
2483 * or EOS, i.e. a real failure */
2484 gst_single_queue_flush_queue (sq, FALSE);
2485 single_queue_underrun_cb (sq->queue, sq);
2486 gst_data_queue_set_flushing (sq->queue, TRUE);
2487 gst_pad_pause_task (srcpad);
2488 GST_CAT_LOG_OBJECT (multi_queue_debug, mq,
2489 "SingleQueue[%d] task paused, reason:%s",
2490 sq->id, gst_flow_get_name (sq->srcresult));
2491 goto done;
2492 }
2493 }
2494
2495 /**
2496 * gst_multi_queue_chain:
2497 *
2498 * This is similar to GstQueue's chain function, except:
2499 * _ we don't have leak behaviours,
2500 * _ we push with a unique id (curid)
2501 */
2502 static GstFlowReturn
gst_multi_queue_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)2503 gst_multi_queue_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2504 {
2505 GstSingleQueue *sq;
2506 GstMultiQueue *mq;
2507 GstMultiQueueItem *item = NULL;
2508 guint32 curid;
2509 GstClockTime timestamp, duration;
2510
2511 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2512 mq = g_weak_ref_get (&sq->mqueue);
2513
2514 if (!mq)
2515 goto done;
2516
2517 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2518 if (sq->is_eos)
2519 goto was_eos;
2520
2521 sq->active = TRUE;
2522
2523 /* Get a unique incrementing id */
2524 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2525
2526 timestamp = GST_BUFFER_DTS_OR_PTS (buffer);
2527 duration = GST_BUFFER_DURATION (buffer);
2528
2529 GST_LOG_OBJECT (mq,
2530 "SingleQueue %d : about to enqueue buffer %p with id %d (pts:%"
2531 GST_TIME_FORMAT " dts:%" GST_TIME_FORMAT " dur:%" GST_TIME_FORMAT ")",
2532 sq->id, buffer, curid, GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2533 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), GST_TIME_ARGS (duration));
2534
2535 item = gst_multi_queue_buffer_item_new (GST_MINI_OBJECT_CAST (buffer), curid);
2536
2537 /* Update interleave before pushing data into queue */
2538 if (mq->use_interleave) {
2539 GstClockTime val = timestamp;
2540 GstClockTimeDiff dval;
2541
2542 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2543 if (val == GST_CLOCK_TIME_NONE)
2544 val = sq->sink_segment.position;
2545 if (duration != GST_CLOCK_TIME_NONE)
2546 val += duration;
2547
2548 dval = my_segment_to_running_time (&sq->sink_segment, val);
2549 if (GST_CLOCK_STIME_IS_VALID (dval)) {
2550 sq->cached_sinktime = dval;
2551 GST_DEBUG_OBJECT (mq,
2552 "Queue %d cached sink time now %" G_GINT64_FORMAT " %"
2553 GST_STIME_FORMAT, sq->id, sq->cached_sinktime,
2554 GST_STIME_ARGS (sq->cached_sinktime));
2555 calculate_interleave (mq, sq);
2556 }
2557 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2558 }
2559
2560 if (!(gst_data_queue_push (sq->queue, (GstDataQueueItem *) item)))
2561 goto flushing;
2562
2563 /* update time level, we must do this after pushing the data in the queue so
2564 * that we never end up filling the queue first. */
2565 apply_buffer (mq, sq, timestamp, duration, &sq->sink_segment);
2566
2567 done:
2568 gst_clear_object (&mq);
2569 return sq->srcresult;
2570
2571 /* ERRORS */
2572 flushing:
2573 {
2574 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2575 sq->id, gst_flow_get_name (sq->srcresult));
2576 if (item)
2577 gst_multi_queue_item_destroy (item);
2578 goto done;
2579 }
2580 was_eos:
2581 {
2582 GST_DEBUG_OBJECT (mq, "we are EOS, dropping buffer, return EOS");
2583 gst_buffer_unref (buffer);
2584 gst_object_unref (mq);
2585 return GST_FLOW_EOS;
2586 }
2587 }
2588
2589 static gboolean
gst_multi_queue_sink_activate_mode(GstPad * pad,GstObject * parent,GstPadMode mode,gboolean active)2590 gst_multi_queue_sink_activate_mode (GstPad * pad, GstObject * parent,
2591 GstPadMode mode, gboolean active)
2592 {
2593 gboolean res;
2594 GstSingleQueue *sq;
2595 GstMultiQueue *mq;
2596
2597 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2598 mq = (GstMultiQueue *) gst_pad_get_parent (pad);
2599
2600 /* mq is NULL if the pad is activated/deactivated before being
2601 * added to the multiqueue */
2602 if (mq)
2603 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2604
2605 switch (mode) {
2606 case GST_PAD_MODE_PUSH:
2607 if (active) {
2608 /* All pads start off linked until they push one buffer */
2609 sq->srcresult = GST_FLOW_OK;
2610 sq->pushed = FALSE;
2611 gst_data_queue_set_flushing (sq->queue, FALSE);
2612 } else {
2613 sq->srcresult = GST_FLOW_FLUSHING;
2614 sq->last_query = FALSE;
2615 g_cond_signal (&sq->query_handled);
2616 gst_data_queue_set_flushing (sq->queue, TRUE);
2617
2618 /* Wait until streaming thread has finished */
2619 if (mq)
2620 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2621 GST_PAD_STREAM_LOCK (pad);
2622 if (mq)
2623 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2624 gst_data_queue_flush (sq->queue);
2625 if (mq)
2626 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2627 GST_PAD_STREAM_UNLOCK (pad);
2628 if (mq)
2629 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2630 }
2631 res = TRUE;
2632 break;
2633 default:
2634 res = FALSE;
2635 break;
2636 }
2637
2638 if (mq) {
2639 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2640 gst_object_unref (mq);
2641 }
2642
2643 return res;
2644 }
2645
2646 static GstFlowReturn
gst_multi_queue_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)2647 gst_multi_queue_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
2648 {
2649 GstSingleQueue *sq;
2650 GstMultiQueue *mq;
2651 guint32 curid;
2652 GstMultiQueueItem *item;
2653 gboolean res = TRUE;
2654 GstFlowReturn flowret = GST_FLOW_OK;
2655 GstEventType type;
2656 GstEvent *sref = NULL;
2657 GstPad *srcpad;
2658
2659
2660 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2661 mq = (GstMultiQueue *) parent;
2662 srcpad = g_weak_ref_get (&sq->srcpad);
2663
2664 if (!srcpad) {
2665 GST_INFO_OBJECT (pad, "Pushing while corresponding sourcepad has been"
2666 " removed already");
2667
2668 return GST_FLOW_FLUSHING;
2669 }
2670
2671 type = GST_EVENT_TYPE (event);
2672
2673 switch (type) {
2674 case GST_EVENT_STREAM_START:
2675 {
2676 if (mq->sync_by_running_time) {
2677 GstStreamFlags stream_flags;
2678 gst_event_parse_stream_flags (event, &stream_flags);
2679 if ((stream_flags & GST_STREAM_FLAG_SPARSE)) {
2680 GST_INFO_OBJECT (mq, "SingleQueue %d is a sparse stream", sq->id);
2681 sq->is_sparse = TRUE;
2682 }
2683 }
2684
2685 sq->thread = g_thread_self ();
2686
2687 /* Remove EOS flag */
2688 sq->is_eos = FALSE;
2689 break;
2690 }
2691 case GST_EVENT_FLUSH_START:
2692 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush start event",
2693 sq->id);
2694
2695 res = gst_pad_push_event (srcpad, event);
2696
2697 gst_single_queue_flush (mq, sq, TRUE, FALSE);
2698 gst_single_queue_pause (mq, sq);
2699 goto done;
2700
2701 case GST_EVENT_FLUSH_STOP:
2702 GST_DEBUG_OBJECT (mq, "SingleQueue %d : received flush stop event",
2703 sq->id);
2704
2705 res = gst_pad_push_event (srcpad, event);
2706
2707 gst_single_queue_flush (mq, sq, FALSE, FALSE);
2708 gst_single_queue_start (mq, sq);
2709 goto done;
2710
2711 case GST_EVENT_SEGMENT:
2712 sq->is_segment_done = FALSE;
2713 sref = gst_event_ref (event);
2714 break;
2715 case GST_EVENT_GAP:
2716 /* take ref because the queue will take ownership and we need the event
2717 * afterwards to update the segment */
2718 sref = gst_event_ref (event);
2719 if (mq->use_interleave) {
2720 GstClockTime val, dur;
2721 GstClockTime stime;
2722 gst_event_parse_gap (event, &val, &dur);
2723 if (GST_CLOCK_TIME_IS_VALID (val)) {
2724 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2725 if (GST_CLOCK_TIME_IS_VALID (dur))
2726 val += dur;
2727 stime = my_segment_to_running_time (&sq->sink_segment, val);
2728 if (GST_CLOCK_STIME_IS_VALID (stime)) {
2729 sq->cached_sinktime = stime;
2730 calculate_interleave (mq, sq);
2731 }
2732 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2733 }
2734 }
2735 break;
2736
2737 default:
2738 if (!(GST_EVENT_IS_SERIALIZED (event))) {
2739 res = gst_pad_push_event (srcpad, event);
2740 goto done;
2741 }
2742 break;
2743 }
2744
2745 /* if eos, we are always full, so avoid hanging incoming indefinitely */
2746 if (sq->is_eos)
2747 goto was_eos;
2748
2749 /* Get an unique incrementing id. */
2750 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2751
2752 item = gst_multi_queue_mo_item_new ((GstMiniObject *) event, curid);
2753
2754 GST_DEBUG_OBJECT (mq,
2755 "SingleQueue %d : Enqueuing event %p of type %s with id %d",
2756 sq->id, event, GST_EVENT_TYPE_NAME (event), curid);
2757
2758 if (!gst_data_queue_push (sq->queue, (GstDataQueueItem *) item))
2759 goto flushing;
2760
2761 /* mark EOS when we received one, we must do that after putting the
2762 * buffer in the queue because EOS marks the buffer as filled. */
2763 switch (type) {
2764 case GST_EVENT_SEGMENT_DONE:
2765 sq->is_segment_done = TRUE;
2766 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2767 update_buffering (mq, sq);
2768 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2769 single_queue_overrun_cb (sq->queue, sq);
2770 gst_multi_queue_post_buffering (mq);
2771 break;
2772 case GST_EVENT_EOS:
2773 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2774 sq->is_eos = TRUE;
2775
2776 /* Post an error message if we got EOS while downstream
2777 * has returned an error flow return. After EOS there
2778 * will be no further buffer which could propagate the
2779 * error upstream */
2780 if (sq->srcresult < GST_FLOW_EOS) {
2781 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2782 GST_ELEMENT_FLOW_ERROR (mq, sq->srcresult);
2783 } else {
2784 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2785 }
2786
2787 /* EOS affects the buffering state */
2788 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2789 update_buffering (mq, sq);
2790 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2791 single_queue_overrun_cb (sq->queue, sq);
2792 gst_multi_queue_post_buffering (mq);
2793 break;
2794 case GST_EVENT_SEGMENT:
2795 apply_segment (mq, sq, sref, &sq->sink_segment);
2796 gst_event_unref (sref);
2797 /* a new segment allows us to accept more buffers if we got EOS
2798 * from downstream */
2799 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2800 if (sq->srcresult == GST_FLOW_EOS)
2801 sq->srcresult = GST_FLOW_OK;
2802 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2803 break;
2804 case GST_EVENT_GAP:
2805 sq->active = TRUE;
2806 apply_gap (mq, sq, sref, &sq->sink_segment);
2807 gst_event_unref (sref);
2808 default:
2809 break;
2810 }
2811
2812 done:
2813
2814 gst_object_unref (srcpad);
2815 if (res == FALSE)
2816 flowret = GST_FLOW_ERROR;
2817 GST_DEBUG_OBJECT (mq, "SingleQueue %d : returning %s", sq->id,
2818 gst_flow_get_name (flowret));
2819 return flowret;
2820
2821 flushing:
2822 {
2823 gst_object_unref (srcpad);
2824 GST_LOG_OBJECT (mq, "SingleQueue %d : exit because task paused, reason: %s",
2825 sq->id, gst_flow_get_name (sq->srcresult));
2826 if (sref)
2827 gst_event_unref (sref);
2828 gst_multi_queue_item_destroy (item);
2829 return sq->srcresult;
2830 }
2831 was_eos:
2832 {
2833 gst_object_unref (srcpad);
2834 GST_DEBUG_OBJECT (mq, "we are EOS, dropping event, return GST_FLOW_EOS");
2835 gst_event_unref (event);
2836 return GST_FLOW_EOS;
2837 }
2838 }
2839
2840 static gboolean
gst_multi_queue_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2841 gst_multi_queue_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
2842 {
2843 gboolean res;
2844 GstSingleQueue *sq;
2845 GstMultiQueue *mq;
2846
2847 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2848 mq = (GstMultiQueue *) parent;
2849
2850 switch (GST_QUERY_TYPE (query)) {
2851 default:
2852 if (GST_QUERY_IS_SERIALIZED (query)) {
2853 guint32 curid;
2854 GstMultiQueueItem *item;
2855
2856 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2857 if (sq->srcresult != GST_FLOW_OK)
2858 goto out_flushing;
2859
2860 /* serialized events go in the queue. We need to be certain that we
2861 * don't cause deadlocks waiting for the query return value. We check if
2862 * the queue is empty (nothing is blocking downstream and the query can
2863 * be pushed for sure) or we are not buffering. If we are buffering,
2864 * the pipeline waits to unblock downstream until our queue fills up
2865 * completely, which can not happen if we block on the query..
2866 * Therefore we only potentially block when we are not buffering. */
2867 if (!mq->use_buffering || gst_data_queue_is_empty (sq->queue)) {
2868 /* Get an unique incrementing id. */
2869 curid = g_atomic_int_add ((gint *) & mq->counter, 1);
2870
2871 item = gst_multi_queue_mo_item_new ((GstMiniObject *) query, curid);
2872
2873 GST_DEBUG_OBJECT (mq,
2874 "SingleQueue %d : Enqueuing query %p of type %s with id %d",
2875 sq->id, query, GST_QUERY_TYPE_NAME (query), curid);
2876 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2877 res = gst_data_queue_push (sq->queue, (GstDataQueueItem *) item);
2878 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2879 if (!res || sq->flushing)
2880 goto out_flushing;
2881 /* it might be that the query has been taken out of the queue
2882 * while we were unlocked. So, we need to check if the last
2883 * handled query is the same one than the one we just
2884 * pushed. If it is, we don't need to wait for the condition
2885 * variable, otherwise we wait for the condition variable to
2886 * be signaled. */
2887 while (!sq->flushing && sq->srcresult == GST_FLOW_OK
2888 && sq->last_handled_query != query)
2889 g_cond_wait (&sq->query_handled, &mq->qlock);
2890 res = sq->last_query;
2891 sq->last_handled_query = NULL;
2892 } else {
2893 GST_DEBUG_OBJECT (mq, "refusing query, we are buffering and the "
2894 "queue is not empty");
2895 res = FALSE;
2896 }
2897 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2898 } else {
2899 /* default handling */
2900 res = gst_pad_query_default (pad, parent, query);
2901 }
2902 break;
2903 }
2904 return res;
2905
2906 out_flushing:
2907 {
2908 GST_DEBUG_OBJECT (mq, "Flushing");
2909 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2910 return FALSE;
2911 }
2912 }
2913
2914 static gboolean
gst_multi_queue_src_activate_mode(GstPad * pad,GstObject * parent,GstPadMode mode,gboolean active)2915 gst_multi_queue_src_activate_mode (GstPad * pad, GstObject * parent,
2916 GstPadMode mode, gboolean active)
2917 {
2918 GstMultiQueue *mq;
2919 GstSingleQueue *sq;
2920 gboolean result;
2921
2922 sq = GST_MULTIQUEUE_PAD (pad)->sq;
2923 mq = g_weak_ref_get (&sq->mqueue);
2924
2925 if (!mq) {
2926 GST_ERROR_OBJECT (pad, "No multique set anymore, can't activate pad");
2927
2928 return FALSE;
2929 }
2930
2931 GST_DEBUG_OBJECT (mq, "SingleQueue %d", sq->id);
2932
2933 switch (mode) {
2934 case GST_PAD_MODE_PUSH:
2935 if (active) {
2936 gst_single_queue_flush (mq, sq, FALSE, TRUE);
2937 result = parent ? gst_single_queue_start (mq, sq) : TRUE;
2938 } else {
2939 gst_single_queue_flush (mq, sq, TRUE, TRUE);
2940 result = gst_single_queue_stop (mq, sq);
2941 }
2942 break;
2943 default:
2944 result = FALSE;
2945 break;
2946 }
2947 gst_object_unref (mq);
2948 return result;
2949 }
2950
2951 static gboolean
gst_multi_queue_src_event(GstPad * pad,GstObject * parent,GstEvent * event)2952 gst_multi_queue_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2953 {
2954 GstSingleQueue *sq = GST_MULTIQUEUE_PAD (pad)->sq;
2955 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
2956 gboolean ret;
2957 GstPad *sinkpad = g_weak_ref_get (&sq->sinkpad);
2958
2959 if (!mq || !sinkpad) {
2960 gst_clear_object (&sinkpad);
2961 gst_clear_object (&mq);
2962 GST_INFO_OBJECT (pad, "No multique/sinkpad set anymore, flushing");
2963
2964 return FALSE;
2965 }
2966
2967 switch (GST_EVENT_TYPE (event)) {
2968 case GST_EVENT_RECONFIGURE:
2969 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
2970 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
2971 sq->srcresult = GST_FLOW_OK;
2972 g_cond_signal (&sq->turn);
2973 }
2974 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
2975
2976 ret = gst_pad_push_event (sinkpad, event);
2977 break;
2978 default:
2979 ret = gst_pad_push_event (sinkpad, event);
2980 break;
2981 }
2982
2983 gst_object_unref (sinkpad);
2984 gst_object_unref (mq);
2985
2986 return ret;
2987 }
2988
2989 static gboolean
gst_multi_queue_src_query(GstPad * pad,GstObject * parent,GstQuery * query)2990 gst_multi_queue_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2991 {
2992 gboolean res;
2993
2994 /* FIXME, Handle position offset depending on queue size */
2995 switch (GST_QUERY_TYPE (query)) {
2996 default:
2997 /* default handling */
2998 res = gst_pad_query_default (pad, parent, query);
2999 break;
3000 }
3001 return res;
3002 }
3003
3004 /*
3005 * Next-non-linked functions
3006 */
3007
3008 /* WITH LOCK TAKEN */
3009 static void
wake_up_next_non_linked(GstMultiQueue * mq)3010 wake_up_next_non_linked (GstMultiQueue * mq)
3011 {
3012 GList *tmp;
3013
3014 /* maybe no-one is waiting */
3015 if (mq->numwaiting < 1)
3016 return;
3017
3018 if (mq->sync_by_running_time && GST_CLOCK_STIME_IS_VALID (mq->high_time)) {
3019 /* Else figure out which singlequeue(s) need waking up */
3020 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3021 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3022 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3023 GstClockTimeDiff high_time;
3024
3025 if (GST_CLOCK_STIME_IS_VALID (sq->group_high_time))
3026 high_time = sq->group_high_time;
3027 else
3028 high_time = mq->high_time;
3029
3030 if (GST_CLOCK_STIME_IS_VALID (sq->next_time) &&
3031 GST_CLOCK_STIME_IS_VALID (high_time)
3032 && sq->next_time <= high_time) {
3033 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
3034 g_cond_signal (&sq->turn);
3035 }
3036 }
3037 }
3038 } else {
3039 /* Else figure out which singlequeue(s) need waking up */
3040 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3041 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3042 if (sq->srcresult == GST_FLOW_NOT_LINKED &&
3043 sq->nextid != 0 && sq->nextid <= mq->highid) {
3044 GST_LOG_OBJECT (mq, "Waking up singlequeue %d", sq->id);
3045 g_cond_signal (&sq->turn);
3046 }
3047 }
3048 }
3049 }
3050
3051 /* WITH LOCK TAKEN */
3052 static void
compute_high_id(GstMultiQueue * mq)3053 compute_high_id (GstMultiQueue * mq)
3054 {
3055 /* The high-id is either the highest id among the linked pads, or if all
3056 * pads are not-linked, it's the lowest not-linked pad */
3057 GList *tmp;
3058 guint32 lowest = G_MAXUINT32;
3059 guint32 highid = G_MAXUINT32;
3060
3061 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3062 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3063 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3064
3065 if (!srcpad) {
3066 GST_INFO_OBJECT (mq,
3067 "srcpad has been removed already... ignoring single queue");
3068
3069 continue;
3070 }
3071
3072 GST_LOG_OBJECT (mq, "inspecting sq:%d , nextid:%d, oldid:%d, srcresult:%s",
3073 sq->id, sq->nextid, sq->oldid, gst_flow_get_name (sq->srcresult));
3074
3075 /* No need to consider queues which are not waiting */
3076 if (sq->nextid == 0) {
3077 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
3078 gst_object_unref (srcpad);
3079 continue;
3080 }
3081
3082 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3083 if (sq->nextid < lowest)
3084 lowest = sq->nextid;
3085 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3086 /* If we don't have a global highid, or the global highid is lower than
3087 * this single queue's last outputted id, store the queue's one,
3088 * unless the singlequeue output is at EOS */
3089 if ((highid == G_MAXUINT32) || (sq->oldid > highid))
3090 highid = sq->oldid;
3091 }
3092 gst_object_unref (srcpad);
3093 }
3094
3095 if (highid == G_MAXUINT32 || lowest < highid)
3096 mq->highid = lowest;
3097 else
3098 mq->highid = highid;
3099
3100 GST_LOG_OBJECT (mq, "Highid is now : %u, lowest non-linked %u", mq->highid,
3101 lowest);
3102 }
3103
3104 /* WITH LOCK TAKEN */
3105 static void
compute_high_time(GstMultiQueue * mq,guint groupid)3106 compute_high_time (GstMultiQueue * mq, guint groupid)
3107 {
3108 /* The high-time is either the highest last time among the linked
3109 * pads, or if all pads are not-linked, it's the lowest nex time of
3110 * not-linked pad */
3111 GList *tmp;
3112 GstClockTimeDiff highest = GST_CLOCK_STIME_NONE;
3113 GstClockTimeDiff lowest = GST_CLOCK_STIME_NONE;
3114 GstClockTimeDiff group_high = GST_CLOCK_STIME_NONE;
3115 GstClockTimeDiff group_low = GST_CLOCK_STIME_NONE;
3116 GstClockTimeDiff res;
3117 /* Number of streams which belong to groupid */
3118 guint group_count = 0;
3119
3120 if (!mq->sync_by_running_time)
3121 /* return GST_CLOCK_STIME_NONE; */
3122 return;
3123
3124 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3125 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3126 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3127
3128 if (!srcpad) {
3129 GST_INFO_OBJECT (mq,
3130 "srcpad has been removed already... ignoring single queue");
3131
3132 continue;
3133 }
3134
3135 GST_LOG_OBJECT (mq,
3136 "inspecting sq:%d (group:%d) , next_time:%" GST_STIME_FORMAT
3137 ", last_time:%" GST_STIME_FORMAT ", srcresult:%s", sq->id, sq->groupid,
3138 GST_STIME_ARGS (sq->next_time), GST_STIME_ARGS (sq->last_time),
3139 gst_flow_get_name (sq->srcresult));
3140
3141 if (sq->groupid == groupid)
3142 group_count++;
3143
3144 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3145 /* No need to consider queues which are not waiting */
3146 if (!GST_CLOCK_STIME_IS_VALID (sq->next_time)) {
3147 GST_LOG_OBJECT (mq, "sq:%d is not waiting - ignoring", sq->id);
3148 gst_object_unref (srcpad);
3149 continue;
3150 }
3151
3152 if (lowest == GST_CLOCK_STIME_NONE || sq->next_time < lowest)
3153 lowest = sq->next_time;
3154 if (sq->groupid == groupid && (group_low == GST_CLOCK_STIME_NONE
3155 || sq->next_time < group_low))
3156 group_low = sq->next_time;
3157 } else if (!GST_PAD_IS_EOS (srcpad) && sq->srcresult != GST_FLOW_EOS) {
3158 /* If we don't have a global high time, or the global high time
3159 * is lower than this single queue's last outputted time, store
3160 * the queue's one, unless the singlequeue output is at EOS. */
3161 if (highest == GST_CLOCK_STIME_NONE
3162 || (sq->last_time != GST_CLOCK_STIME_NONE && sq->last_time > highest))
3163 highest = sq->last_time;
3164 if (sq->groupid == groupid && (group_high == GST_CLOCK_STIME_NONE
3165 || (sq->last_time != GST_CLOCK_STIME_NONE
3166 && sq->last_time > group_high)))
3167 group_high = sq->last_time;
3168 }
3169 GST_LOG_OBJECT (mq,
3170 "highest now %" GST_STIME_FORMAT " lowest %" GST_STIME_FORMAT,
3171 GST_STIME_ARGS (highest), GST_STIME_ARGS (lowest));
3172 if (sq->groupid == groupid)
3173 GST_LOG_OBJECT (mq,
3174 "grouphigh %" GST_STIME_FORMAT " grouplow %" GST_STIME_FORMAT,
3175 GST_STIME_ARGS (group_high), GST_STIME_ARGS (group_low));
3176
3177 gst_object_unref (srcpad);
3178 }
3179
3180 if (highest == GST_CLOCK_STIME_NONE)
3181 mq->high_time = lowest;
3182 else
3183 mq->high_time = highest;
3184
3185 /* If there's only one stream of a given type, use the global high */
3186 if (group_count < 2)
3187 res = GST_CLOCK_STIME_NONE;
3188 else if (group_high == GST_CLOCK_STIME_NONE)
3189 res = group_low;
3190 else
3191 res = group_high;
3192
3193 GST_LOG_OBJECT (mq, "group count %d for groupid %u", group_count, groupid);
3194 GST_LOG_OBJECT (mq,
3195 "MQ High time is now : %" GST_STIME_FORMAT ", group %d high time %"
3196 GST_STIME_FORMAT ", lowest non-linked %" GST_STIME_FORMAT,
3197 GST_STIME_ARGS (mq->high_time), groupid, GST_STIME_ARGS (mq->high_time),
3198 GST_STIME_ARGS (lowest));
3199
3200 for (tmp = mq->queues; tmp; tmp = tmp->next) {
3201 GstSingleQueue *sq = (GstSingleQueue *) tmp->data;
3202 if (groupid == sq->groupid)
3203 sq->group_high_time = res;
3204 }
3205 }
3206
3207 #define IS_FILLED(q, format, value) (((q)->max_size.format) != 0 && \
3208 ((q)->max_size.format) <= (value))
3209
3210 /*
3211 * GstSingleQueue functions
3212 */
3213 static void
single_queue_overrun_cb(GstDataQueue * dq,GstSingleQueue * sq)3214 single_queue_overrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3215 {
3216 GList *tmp;
3217 GstDataQueueSize size;
3218 gboolean filled = TRUE;
3219 gboolean empty_found = FALSE;
3220 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3221
3222 if (!mq) {
3223 GST_ERROR ("No multique set anymore, not doing anything");
3224
3225 return;
3226 }
3227
3228 gst_data_queue_get_level (sq->queue, &size);
3229
3230 GST_LOG_OBJECT (mq,
3231 "Single Queue %d: EOS %d, visible %u/%u, bytes %u/%u, time %"
3232 G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, sq->id, sq->is_eos, size.visible,
3233 sq->max_size.visible, size.bytes, sq->max_size.bytes, sq->cur_time,
3234 sq->max_size.time);
3235
3236 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3237
3238 /* check if we reached the hard time/bytes limits;
3239 time limit is only taken into account for non-sparse streams */
3240 if (sq->is_eos || IS_FILLED (sq, bytes, size.bytes) ||
3241 (!sq->is_sparse && IS_FILLED (sq, time, sq->cur_time))) {
3242 goto done;
3243 }
3244
3245 /* Search for empty queues */
3246 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3247 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3248
3249 if (oq == sq)
3250 continue;
3251
3252 if (oq->srcresult == GST_FLOW_NOT_LINKED) {
3253 GST_LOG_OBJECT (mq, "Queue %d is not-linked", oq->id);
3254 continue;
3255 }
3256
3257 GST_LOG_OBJECT (mq, "Checking Queue %d", oq->id);
3258 if (gst_data_queue_is_empty (oq->queue) && !oq->is_sparse) {
3259 GST_LOG_OBJECT (mq, "Queue %d is empty", oq->id);
3260 empty_found = TRUE;
3261 break;
3262 }
3263 }
3264
3265 /* if hard limits are not reached then we allow one more buffer in the full
3266 * queue, but only if any of the other singelqueues are empty */
3267 if (empty_found) {
3268 if (IS_FILLED (sq, visible, size.visible)) {
3269 sq->max_size.visible = size.visible + 1;
3270 GST_DEBUG_OBJECT (mq,
3271 "Bumping single queue %d max visible to %d",
3272 sq->id, sq->max_size.visible);
3273 filled = FALSE;
3274 }
3275 }
3276
3277 done:
3278 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3279 gst_object_unref (mq);
3280
3281 /* Overrun is always forwarded, since this is blocking the upstream element */
3282 if (filled) {
3283 GST_DEBUG_OBJECT (mq, "Queue %d is filled, signalling overrun", sq->id);
3284 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_OVERRUN], 0);
3285 }
3286 }
3287
3288 static void
single_queue_underrun_cb(GstDataQueue * dq,GstSingleQueue * sq)3289 single_queue_underrun_cb (GstDataQueue * dq, GstSingleQueue * sq)
3290 {
3291 gboolean empty = TRUE;
3292 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3293 GList *tmp;
3294
3295 if (!mq) {
3296 GST_ERROR ("No multique set anymore, not doing anything");
3297
3298 return;
3299 }
3300
3301 if (sq->srcresult == GST_FLOW_NOT_LINKED) {
3302 GST_LOG_OBJECT (mq, "Single Queue %d is empty but not-linked", sq->id);
3303 gst_object_unref (mq);
3304 return;
3305 } else {
3306 GST_LOG_OBJECT (mq,
3307 "Single Queue %d is empty, Checking other single queues", sq->id);
3308 }
3309
3310 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3311 for (tmp = mq->queues; tmp; tmp = g_list_next (tmp)) {
3312 GstSingleQueue *oq = (GstSingleQueue *) tmp->data;
3313
3314 if (gst_data_queue_is_full (oq->queue)) {
3315 GstDataQueueSize size;
3316
3317 gst_data_queue_get_level (oq->queue, &size);
3318 if (IS_FILLED (oq, visible, size.visible)) {
3319 oq->max_size.visible = size.visible + 1;
3320 GST_DEBUG_OBJECT (mq,
3321 "queue %d is filled, bumping its max visible to %d", oq->id,
3322 oq->max_size.visible);
3323 gst_data_queue_limits_changed (oq->queue);
3324 }
3325 }
3326 if (!gst_data_queue_is_empty (oq->queue) || oq->is_sparse)
3327 empty = FALSE;
3328 }
3329 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3330 gst_object_unref (mq);
3331
3332 if (empty) {
3333 GST_DEBUG_OBJECT (mq, "All queues are empty, signalling it");
3334 g_signal_emit (mq, gst_multi_queue_signals[SIGNAL_UNDERRUN], 0);
3335 }
3336 }
3337
3338 static gboolean
single_queue_check_full(GstDataQueue * dataq,guint visible,guint bytes,guint64 time,GstSingleQueue * sq)3339 single_queue_check_full (GstDataQueue * dataq, guint visible, guint bytes,
3340 guint64 time, GstSingleQueue * sq)
3341 {
3342 gboolean res;
3343 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3344
3345 if (!mq) {
3346 GST_ERROR ("No multique set anymore, let's say we are full");
3347
3348 return TRUE;
3349 }
3350
3351 GST_DEBUG_OBJECT (mq,
3352 "queue %d: visible %u/%u, bytes %u/%u, time %" G_GUINT64_FORMAT "/%"
3353 G_GUINT64_FORMAT, sq->id, visible, sq->max_size.visible, bytes,
3354 sq->max_size.bytes, sq->cur_time, sq->max_size.time);
3355
3356 /* we are always filled on EOS */
3357 if (sq->is_eos || sq->is_segment_done) {
3358 res = TRUE;
3359 goto done;
3360 }
3361
3362 /* we never go past the max visible items unless we are in buffering mode */
3363 if (!mq->use_buffering && IS_FILLED (sq, visible, visible)) {
3364 res = TRUE;
3365 goto done;
3366 }
3367
3368 /* check time or bytes */
3369 res = IS_FILLED (sq, bytes, bytes);
3370 /* We only care about limits in time if we're not a sparse stream or
3371 * we're not syncing by running time */
3372 if (!sq->is_sparse || !mq->sync_by_running_time) {
3373 /* If unlinked, take into account the extra unlinked cache time */
3374 if (mq->sync_by_running_time && sq->srcresult == GST_FLOW_NOT_LINKED) {
3375 if (sq->cur_time > mq->unlinked_cache_time)
3376 res |= IS_FILLED (sq, time, sq->cur_time - mq->unlinked_cache_time);
3377 else
3378 res = FALSE;
3379 } else
3380 res |= IS_FILLED (sq, time, sq->cur_time);
3381 }
3382 done:
3383 gst_object_unref (mq);
3384
3385 return res;
3386 }
3387
3388 static void
gst_single_queue_flush_queue(GstSingleQueue * sq,gboolean full)3389 gst_single_queue_flush_queue (GstSingleQueue * sq, gboolean full)
3390 {
3391 GstDataQueueItem *sitem;
3392 GstMultiQueueItem *mitem;
3393 gboolean was_flushing = FALSE;
3394 GstPad *srcpad = g_weak_ref_get (&sq->srcpad);
3395 GstMultiQueue *mq = g_weak_ref_get (&sq->mqueue);
3396
3397 while (!gst_data_queue_is_empty (sq->queue)) {
3398 GstMiniObject *data;
3399
3400 /* FIXME: If this fails here although the queue is not empty,
3401 * we're flushing... but we want to rescue all sticky
3402 * events nonetheless.
3403 */
3404 if (!gst_data_queue_pop (sq->queue, &sitem)) {
3405 was_flushing = TRUE;
3406 gst_data_queue_set_flushing (sq->queue, FALSE);
3407 continue;
3408 }
3409
3410 mitem = (GstMultiQueueItem *) sitem;
3411
3412 data = sitem->object;
3413
3414 if (!full && !mitem->is_query && GST_IS_EVENT (data)
3415 && srcpad && GST_EVENT_IS_STICKY (data)
3416 && GST_EVENT_TYPE (data) != GST_EVENT_SEGMENT
3417 && GST_EVENT_TYPE (data) != GST_EVENT_EOS) {
3418 gst_pad_store_sticky_event (srcpad, GST_EVENT_CAST (data));
3419 }
3420
3421 sitem->destroy (sitem);
3422 }
3423 gst_clear_object (&srcpad);
3424
3425 gst_data_queue_flush (sq->queue);
3426 if (was_flushing)
3427 gst_data_queue_set_flushing (sq->queue, TRUE);
3428
3429 if (mq) {
3430 GST_MULTI_QUEUE_MUTEX_LOCK (mq);
3431 update_buffering (mq, sq);
3432 GST_MULTI_QUEUE_MUTEX_UNLOCK (mq);
3433 gst_multi_queue_post_buffering (mq);
3434 gst_object_unref (mq);
3435 }
3436 }
3437
3438 static void
gst_single_queue_unref(GstSingleQueue * sq)3439 gst_single_queue_unref (GstSingleQueue * sq)
3440 {
3441 if (g_atomic_int_dec_and_test (&sq->refcount)) {
3442 /* DRAIN QUEUE */
3443 gst_data_queue_flush (sq->queue);
3444 g_object_unref (sq->queue);
3445 g_cond_clear (&sq->turn);
3446 g_cond_clear (&sq->query_handled);
3447 g_weak_ref_clear (&sq->sinkpad);
3448 g_weak_ref_clear (&sq->srcpad);
3449 g_weak_ref_clear (&sq->mqueue);
3450 g_free (sq);
3451 }
3452 }
3453
3454
3455 static GstSingleQueue *
gst_single_queue_ref(GstSingleQueue * squeue)3456 gst_single_queue_ref (GstSingleQueue * squeue)
3457 {
3458 g_atomic_int_inc (&squeue->refcount);
3459
3460 return squeue;
3461 }
3462
3463 static GstSingleQueue *
gst_single_queue_new(GstMultiQueue * mqueue,guint id)3464 gst_single_queue_new (GstMultiQueue * mqueue, guint id)
3465 {
3466 GstPad *srcpad, *sinkpad;
3467 GstSingleQueue *sq;
3468 GstPadTemplate *templ;
3469 gchar *name;
3470 GList *tmp;
3471 guint temp_id = (id == -1) ? 0 : id;
3472
3473 GST_MULTI_QUEUE_MUTEX_LOCK (mqueue);
3474
3475 /* Find an unused queue ID, if possible the passed one */
3476 for (tmp = mqueue->queues; tmp; tmp = g_list_next (tmp)) {
3477 GstSingleQueue *sq2 = (GstSingleQueue *) tmp->data;
3478 /* This works because the IDs are sorted in ascending order */
3479 if (sq2->id == temp_id) {
3480 /* If this ID was requested by the caller return NULL,
3481 * otherwise just get us the next one */
3482 if (id == -1) {
3483 temp_id = sq2->id + 1;
3484 } else {
3485 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3486 return NULL;
3487 }
3488 } else if (sq2->id > temp_id) {
3489 break;
3490 }
3491 }
3492
3493 sq = g_new0 (GstSingleQueue, 1);
3494 g_atomic_int_set (&sq->refcount, 1);
3495
3496 mqueue->nbqueues++;
3497 sq->id = temp_id;
3498 sq->groupid = DEFAULT_PAD_GROUP_ID;
3499 sq->group_high_time = GST_CLOCK_STIME_NONE;
3500
3501 mqueue->queues = g_list_insert_before (mqueue->queues, tmp, sq);
3502 mqueue->queues_cookie++;
3503
3504 /* copy over max_size and extra_size so we don't need to take the lock
3505 * any longer when checking if the queue is full. */
3506 sq->max_size.visible = mqueue->max_size.visible;
3507 sq->max_size.bytes = mqueue->max_size.bytes;
3508 sq->max_size.time = mqueue->max_size.time;
3509
3510 sq->extra_size.visible = mqueue->extra_size.visible;
3511 sq->extra_size.bytes = mqueue->extra_size.bytes;
3512 sq->extra_size.time = mqueue->extra_size.time;
3513
3514 GST_DEBUG_OBJECT (mqueue, "Creating GstSingleQueue id:%d", sq->id);
3515
3516 g_weak_ref_init (&sq->mqueue, mqueue);
3517 sq->srcresult = GST_FLOW_FLUSHING;
3518 sq->pushed = FALSE;
3519 sq->queue = gst_data_queue_new ((GstDataQueueCheckFullFunction)
3520 single_queue_check_full,
3521 (GstDataQueueFullCallback) single_queue_overrun_cb,
3522 (GstDataQueueEmptyCallback) single_queue_underrun_cb, sq);
3523 sq->is_eos = FALSE;
3524 sq->is_sparse = FALSE;
3525 sq->flushing = FALSE;
3526 sq->active = FALSE;
3527 gst_segment_init (&sq->sink_segment, GST_FORMAT_TIME);
3528 gst_segment_init (&sq->src_segment, GST_FORMAT_TIME);
3529
3530 sq->nextid = 0;
3531 sq->oldid = 0;
3532 sq->next_time = GST_CLOCK_STIME_NONE;
3533 sq->last_time = GST_CLOCK_STIME_NONE;
3534 g_cond_init (&sq->turn);
3535 g_cond_init (&sq->query_handled);
3536
3537 sq->sinktime = GST_CLOCK_STIME_NONE;
3538 sq->srctime = GST_CLOCK_STIME_NONE;
3539 sq->sink_tainted = TRUE;
3540 sq->src_tainted = TRUE;
3541
3542 name = g_strdup_printf ("sink_%u", sq->id);
3543 templ = gst_static_pad_template_get (&sinktemplate);
3544 sinkpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3545 "direction", templ->direction, "template", templ, NULL);
3546 g_weak_ref_init (&sq->sinkpad, sinkpad);
3547 gst_object_unref (templ);
3548 g_free (name);
3549
3550 GST_MULTIQUEUE_PAD (sinkpad)->sq = sq;
3551
3552 gst_pad_set_chain_function (sinkpad,
3553 GST_DEBUG_FUNCPTR (gst_multi_queue_chain));
3554 gst_pad_set_activatemode_function (sinkpad,
3555 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_activate_mode));
3556 gst_pad_set_event_full_function (sinkpad,
3557 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_event));
3558 gst_pad_set_query_function (sinkpad,
3559 GST_DEBUG_FUNCPTR (gst_multi_queue_sink_query));
3560 gst_pad_set_iterate_internal_links_function (sinkpad,
3561 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3562 GST_OBJECT_FLAG_SET (sinkpad, GST_PAD_FLAG_PROXY_CAPS);
3563
3564 name = g_strdup_printf ("src_%u", sq->id);
3565 templ = gst_static_pad_template_get (&srctemplate);
3566 srcpad = g_object_new (GST_TYPE_MULTIQUEUE_PAD, "name", name,
3567 "direction", templ->direction, "template", templ, NULL);
3568 g_weak_ref_init (&sq->srcpad, srcpad);
3569 gst_object_unref (templ);
3570 g_free (name);
3571
3572 GST_MULTIQUEUE_PAD (srcpad)->sq = gst_single_queue_ref (sq);
3573
3574 gst_pad_set_activatemode_function (srcpad,
3575 GST_DEBUG_FUNCPTR (gst_multi_queue_src_activate_mode));
3576 gst_pad_set_event_function (srcpad,
3577 GST_DEBUG_FUNCPTR (gst_multi_queue_src_event));
3578 gst_pad_set_query_function (srcpad,
3579 GST_DEBUG_FUNCPTR (gst_multi_queue_src_query));
3580 gst_pad_set_iterate_internal_links_function (srcpad,
3581 GST_DEBUG_FUNCPTR (gst_multi_queue_iterate_internal_links));
3582 GST_OBJECT_FLAG_SET (srcpad, GST_PAD_FLAG_PROXY_CAPS);
3583
3584 GST_MULTI_QUEUE_MUTEX_UNLOCK (mqueue);
3585
3586 /* only activate the pads when we are not in the NULL state
3587 * and add the pad under the state_lock to prevent state changes
3588 * between activating and adding */
3589 g_rec_mutex_lock (GST_STATE_GET_LOCK (mqueue));
3590 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3591 gst_pad_set_active (srcpad, TRUE);
3592 gst_pad_set_active (sinkpad, TRUE);
3593 }
3594 gst_element_add_pad (GST_ELEMENT (mqueue), srcpad);
3595 gst_element_add_pad (GST_ELEMENT (mqueue), sinkpad);
3596 if (GST_STATE_TARGET (mqueue) != GST_STATE_NULL) {
3597 gst_single_queue_start (mqueue, sq);
3598 }
3599 g_rec_mutex_unlock (GST_STATE_GET_LOCK (mqueue));
3600
3601 GST_DEBUG_OBJECT (mqueue, "GstSingleQueue [%d] created and pads added",
3602 sq->id);
3603
3604 return sq;
3605 }
3606