• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GStreamer unit test for videoframe-audiolevel
2  *
3  * Copyright (C) 2015 Vivia Nikolaidou <vivia@toolsonair.com>
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Library General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Library General Public License for more details.
14  *
15  * You should have received a copy of the GNU Library General Public
16  * License along with this library; if not, write to the
17  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18  * Boston, MA 02110-1301, USA.
19  */
20 
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24 
25 /* suppress warnings for deprecated API such as GValueArray
26  * with newer GLib versions (>= 2.31.0) */
27 #define GLIB_DISABLE_DEPRECATION_WARNINGS
28 
29 #include <gst/check/gstcheck.h>
30 #include <gst/audio/audio.h>
31 
32 static gboolean got_eos;
33 static guint audio_buffer_count, video_buffer_count;
34 static GstSegment current_audio_segment, current_video_segment;
35 static guint num_msgs;
36 static GQueue v_timestamp_q, msg_timestamp_q;
37 
38 static guint n_abuffers, n_vbuffers;
39 static guint channels, fill_value;
40 static gdouble expected_rms;
41 static gboolean audiodelay, videodelay, per_channel, long_video;
42 static gboolean early_video, late_video;
43 static gboolean video_gaps, video_overlaps;
44 static gboolean audio_nondiscont, audio_drift;
45 
46 static guint fill_value_per_channel[] = { 0, 1 };
47 static gdouble expected_rms_per_channel[] = { 0, 0.0078125 };
48 
49 static void
set_default_params(void)50 set_default_params (void)
51 {
52   n_abuffers = 40;
53   n_vbuffers = 15;
54   channels = 2;
55   expected_rms = 0.0078125;
56   fill_value = 1;
57   audiodelay = FALSE;
58   videodelay = FALSE;
59   per_channel = FALSE;
60   long_video = FALSE;
61   video_gaps = FALSE;
62   video_overlaps = FALSE;
63   audio_nondiscont = FALSE;
64   audio_drift = FALSE;
65   early_video = FALSE;
66   late_video = FALSE;
67 };
68 
69 static GstFlowReturn
output_achain(GstPad * pad,GstObject * parent,GstBuffer * buffer)70 output_achain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
71 {
72   GstClockTime timestamp;
73   guint8 b;
74   gboolean audio_jitter = audio_nondiscont || audio_drift || early_video;
75 
76   timestamp = GST_BUFFER_TIMESTAMP (buffer);
77   if (!audio_jitter)
78     fail_unless_equals_int64 (timestamp,
79         (audio_buffer_count % n_abuffers) * 1 * GST_SECOND);
80   timestamp =
81       gst_segment_to_stream_time (&current_audio_segment, GST_FORMAT_TIME,
82       timestamp);
83   if (!audio_jitter)
84     fail_unless_equals_int64 (timestamp,
85         (audio_buffer_count % n_abuffers) * 1 * GST_SECOND);
86 
87   timestamp = GST_BUFFER_TIMESTAMP (buffer);
88   timestamp =
89       gst_segment_to_running_time (&current_audio_segment, GST_FORMAT_TIME,
90       timestamp);
91   if (!audio_jitter)
92     fail_unless_equals_int64 (timestamp, audio_buffer_count * 1 * GST_SECOND);
93 
94   gst_buffer_extract (buffer, 0, &b, 1);
95 
96   if (per_channel) {
97     fail_unless_equals_int (b, fill_value_per_channel[0]);
98   } else {
99     fail_unless_equals_int (b, fill_value);
100   }
101 
102   audio_buffer_count++;
103   gst_buffer_unref (buffer);
104   return GST_FLOW_OK;
105 }
106 
107 static gboolean
output_aevent(GstPad * pad,GstObject * parent,GstEvent * event)108 output_aevent (GstPad * pad, GstObject * parent, GstEvent * event)
109 {
110   switch (GST_EVENT_TYPE (event)) {
111     case GST_EVENT_FLUSH_STOP:
112       gst_segment_init (&current_audio_segment, GST_FORMAT_UNDEFINED);
113       break;
114     case GST_EVENT_SEGMENT:
115       gst_event_copy_segment (event, &current_audio_segment);
116       break;
117     case GST_EVENT_EOS:
118       got_eos = TRUE;
119       break;
120     default:
121       break;
122   }
123 
124   gst_event_unref (event);
125   return TRUE;
126 }
127 
128 static GstFlowReturn
output_vchain(GstPad * pad,GstObject * parent,GstBuffer * buffer)129 output_vchain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
130 {
131   GstClockTime timestamp;
132   guint8 b;
133   gboolean jitter = video_gaps || video_overlaps || late_video;
134 
135   timestamp = GST_BUFFER_TIMESTAMP (buffer);
136   if (!jitter)
137     fail_unless_equals_int64 (timestamp,
138         (video_buffer_count % n_vbuffers) * 25 * GST_MSECOND);
139   timestamp =
140       gst_segment_to_stream_time (&current_video_segment, GST_FORMAT_TIME,
141       timestamp);
142   if (!jitter)
143     fail_unless_equals_int64 (timestamp,
144         (video_buffer_count % n_vbuffers) * 25 * GST_MSECOND);
145 
146   timestamp = GST_BUFFER_TIMESTAMP (buffer);
147   timestamp =
148       gst_segment_to_running_time (&current_video_segment, GST_FORMAT_TIME,
149       timestamp);
150   if (!jitter)
151     fail_unless_equals_int64 (timestamp, video_buffer_count * 25 * GST_MSECOND);
152 
153   gst_buffer_extract (buffer, 0, &b, 1);
154   if (!jitter)
155     fail_unless_equals_int (b, video_buffer_count % n_vbuffers);
156 
157   video_buffer_count++;
158   gst_buffer_unref (buffer);
159   return GST_FLOW_OK;
160 }
161 
162 static gboolean
output_vevent(GstPad * pad,GstObject * parent,GstEvent * event)163 output_vevent (GstPad * pad, GstObject * parent, GstEvent * event)
164 {
165   switch (GST_EVENT_TYPE (event)) {
166     case GST_EVENT_FLUSH_STOP:
167       gst_segment_init (&current_video_segment, GST_FORMAT_UNDEFINED);
168       break;
169     case GST_EVENT_SEGMENT:
170       gst_event_copy_segment (event, &current_video_segment);
171       break;
172     case GST_EVENT_EOS:
173       got_eos = TRUE;
174       break;
175     default:
176       break;
177   }
178 
179   gst_event_unref (event);
180   return TRUE;
181 }
182 
183 static gpointer
push_abuffers(gpointer data)184 push_abuffers (gpointer data)
185 {
186   GstSegment segment;
187   GstPad *pad = data;
188   gint i, j, k;
189   GstClockTime timestamp = 0;
190   GstAudioInfo info;
191   GstCaps *caps;
192   guint buf_size = 1000;
193 
194   if (audiodelay)
195     g_usleep (2000);
196 
197   if (early_video)
198     timestamp = 50 * GST_MSECOND;
199 
200   gst_pad_send_event (pad, gst_event_new_stream_start ("test"));
201 
202   gst_audio_info_set_format (&info, GST_AUDIO_FORMAT_S8, buf_size, channels,
203       NULL);
204   caps = gst_audio_info_to_caps (&info);
205   gst_pad_send_event (pad, gst_event_new_caps (caps));
206   gst_caps_unref (caps);
207 
208   gst_segment_init (&segment, GST_FORMAT_TIME);
209   gst_pad_send_event (pad, gst_event_new_segment (&segment));
210 
211   for (i = 0; i < n_abuffers; i++) {
212     GstBuffer *buf = gst_buffer_new_and_alloc (channels * buf_size);
213 
214     if (per_channel) {
215       GstMapInfo map;
216       guint8 *in_data;
217 
218       gst_buffer_map (buf, &map, GST_MAP_WRITE);
219       in_data = map.data;
220 
221       for (j = 0; j < buf_size; j++) {
222         for (k = 0; k < channels; k++) {
223           in_data[j * channels + k] = fill_value_per_channel[k];
224         }
225       }
226 
227       gst_buffer_unmap (buf, &map);
228     } else {
229       gst_buffer_memset (buf, 0, fill_value, channels * buf_size);
230     }
231 
232     GST_BUFFER_TIMESTAMP (buf) = timestamp;
233     timestamp += 1 * GST_SECOND;
234     if (audio_drift)
235       timestamp += 50 * GST_MSECOND;
236     else if (i == 4 && audio_nondiscont)
237       timestamp += 30 * GST_MSECOND;
238     GST_BUFFER_DURATION (buf) = timestamp - GST_BUFFER_TIMESTAMP (buf);
239 
240     fail_unless (gst_pad_chain (pad, buf) == GST_FLOW_OK);
241   }
242   gst_pad_send_event (pad, gst_event_new_eos ());
243 
244   return NULL;
245 }
246 
247 static gpointer
push_vbuffers(gpointer data)248 push_vbuffers (gpointer data)
249 {
250   GstSegment segment;
251   GstPad *pad = data;
252   gint i;
253   GstClockTime timestamp = 0;
254 
255   if (videodelay)
256     g_usleep (2000);
257 
258   if (late_video)
259     timestamp = 50 * GST_MSECOND;
260 
261   gst_pad_send_event (pad, gst_event_new_stream_start ("test"));
262   gst_segment_init (&segment, GST_FORMAT_TIME);
263   gst_pad_send_event (pad, gst_event_new_segment (&segment));
264 
265   for (i = 0; i < n_vbuffers; i++) {
266     GstBuffer *buf = gst_buffer_new_and_alloc (1000);
267     GstClockTime *rtime = g_new (GstClockTime, 1);
268 
269     gst_buffer_memset (buf, 0, i, 1);
270 
271     GST_BUFFER_TIMESTAMP (buf) = timestamp;
272     timestamp += 25 * GST_MSECOND;
273     GST_BUFFER_DURATION (buf) = timestamp - GST_BUFFER_TIMESTAMP (buf);
274     *rtime = gst_segment_to_running_time (&segment, GST_FORMAT_TIME, timestamp);
275     g_queue_push_tail (&v_timestamp_q, rtime);
276 
277     if (i == 4) {
278       if (video_gaps)
279         timestamp += 10 * GST_MSECOND;
280       else if (video_overlaps)
281         timestamp -= 10 * GST_MSECOND;
282     }
283 
284     fail_unless (gst_pad_chain (pad, buf) == GST_FLOW_OK);
285   }
286   gst_pad_send_event (pad, gst_event_new_eos ());
287 
288   return NULL;
289 }
290 
291 static GstBusSyncReply
on_message(GstBus * bus,GstMessage * message,gpointer user_data)292 on_message (GstBus * bus, GstMessage * message, gpointer user_data)
293 {
294   const GstStructure *s = gst_message_get_structure (message);
295   const gchar *name = gst_structure_get_name (s);
296   GValueArray *rms_arr;
297   const GValue *array_val;
298   const GValue *value;
299   gdouble rms;
300   gint channels2;
301   guint i;
302   GstClockTime *rtime;
303 
304   if (message->type != GST_MESSAGE_ELEMENT
305       || strcmp (name, "videoframe-audiolevel") != 0)
306     goto done;
307 
308   num_msgs++;
309   rtime = g_new (GstClockTime, 1);
310   if (!gst_structure_get_clock_time (s, "running-time", rtime)) {
311     g_warning ("Could not parse running time");
312     g_free (rtime);
313   } else {
314     g_queue_push_tail (&msg_timestamp_q, rtime);
315   }
316 
317   /* the values are packed into GValueArrays with the value per channel */
318   array_val = gst_structure_get_value (s, "rms");
319   rms_arr = (GValueArray *) g_value_get_boxed (array_val);
320   channels2 = rms_arr->n_values;
321   fail_unless_equals_int (channels2, channels);
322 
323   for (i = 0; i < channels; ++i) {
324     value = g_value_array_get_nth (rms_arr, i);
325     rms = g_value_get_double (value);
326     if (per_channel) {
327       fail_unless_equals_float (rms, expected_rms_per_channel[i]);
328     } else if (early_video && *rtime <= 50 * GST_MSECOND) {
329       fail_unless_equals_float (rms, 0);
330     } else {
331       fail_unless_equals_float (rms, expected_rms);
332     }
333   }
334 
335 done:
336   return GST_BUS_PASS;
337 }
338 
339 static void
test_videoframe_audiolevel_generic(void)340 test_videoframe_audiolevel_generic (void)
341 {
342   GstElement *alevel;
343   GstPad *asink, *vsink, *asrc, *vsrc, *aoutput_sink, *voutput_sink;
344   GThread *athread, *vthread;
345   GstBus *bus;
346   guint i;
347 
348   got_eos = FALSE;
349   audio_buffer_count = 0;
350   video_buffer_count = 0;
351   num_msgs = 0;
352 
353   g_queue_init (&v_timestamp_q);
354   g_queue_init (&msg_timestamp_q);
355 
356   alevel = gst_element_factory_make ("videoframe-audiolevel", NULL);
357   fail_unless (alevel != NULL);
358 
359   bus = gst_bus_new ();
360   gst_element_set_bus (alevel, bus);
361   gst_bus_set_sync_handler (bus, on_message, NULL, NULL);
362 
363   asink = gst_element_get_static_pad (alevel, "asink");
364   fail_unless (asink != NULL);
365 
366   vsink = gst_element_get_static_pad (alevel, "vsink");
367   fail_unless (vsink != NULL);
368 
369   asrc = gst_element_get_static_pad (alevel, "asrc");
370   aoutput_sink = gst_pad_new ("sink", GST_PAD_SINK);
371   fail_unless (aoutput_sink != NULL);
372   fail_unless (gst_pad_link (asrc, aoutput_sink) == GST_PAD_LINK_OK);
373 
374   vsrc = gst_element_get_static_pad (alevel, "vsrc");
375   voutput_sink = gst_pad_new ("sink", GST_PAD_SINK);
376   fail_unless (voutput_sink != NULL);
377   fail_unless (gst_pad_link (vsrc, voutput_sink) == GST_PAD_LINK_OK);
378 
379   gst_pad_set_chain_function (aoutput_sink, output_achain);
380   gst_pad_set_event_function (aoutput_sink, output_aevent);
381 
382   gst_pad_set_chain_function (voutput_sink, output_vchain);
383   gst_pad_set_event_function (voutput_sink, output_vevent);
384 
385   gst_pad_set_active (aoutput_sink, TRUE);
386   gst_pad_set_active (voutput_sink, TRUE);
387   fail_unless (gst_element_set_state (alevel,
388           GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS);
389 
390   athread = g_thread_new ("athread", (GThreadFunc) push_abuffers, asink);
391   vthread = g_thread_new ("vthread", (GThreadFunc) push_vbuffers, vsink);
392 
393   g_thread_join (vthread);
394   g_thread_join (athread);
395 
396   fail_unless (got_eos);
397   fail_unless_equals_int (audio_buffer_count, n_abuffers);
398   fail_unless_equals_int (video_buffer_count, n_vbuffers);
399   if (!long_video)
400     fail_unless_equals_int (num_msgs, n_vbuffers);
401 
402   fail_unless_equals_int (g_queue_get_length (&v_timestamp_q), n_vbuffers);
403   /* num_msgs is equal to n_vbuffers except in the case of long_video */
404   fail_unless_equals_int (g_queue_get_length (&msg_timestamp_q), num_msgs);
405 
406   for (i = 0; i < g_queue_get_length (&msg_timestamp_q); i++) {
407     GstClockTime *vt = g_queue_pop_head (&v_timestamp_q);
408     GstClockTime *mt = g_queue_pop_head (&msg_timestamp_q);
409     fail_unless (vt != NULL);
410     fail_unless (mt != NULL);
411     if (!video_gaps && !video_overlaps && !early_video)
412       fail_unless_equals_uint64 (*vt, *mt);
413     g_free (vt);
414     g_free (mt);
415   }
416 
417   /* teardown */
418   gst_element_set_state (alevel, GST_STATE_NULL);
419   gst_bus_set_flushing (bus, TRUE);
420   gst_object_unref (bus);
421   g_queue_foreach (&v_timestamp_q, (GFunc) g_free, NULL);
422   g_queue_foreach (&msg_timestamp_q, (GFunc) g_free, NULL);
423   g_queue_clear (&v_timestamp_q);
424   g_queue_clear (&msg_timestamp_q);
425   gst_pad_unlink (asrc, aoutput_sink);
426   gst_object_unref (asrc);
427   gst_pad_unlink (vsrc, voutput_sink);
428   gst_object_unref (vsrc);
429   gst_object_unref (asink);
430   gst_object_unref (vsink);
431   gst_pad_set_active (aoutput_sink, FALSE);
432   gst_object_unref (aoutput_sink);
433   gst_pad_set_active (voutput_sink, FALSE);
434   gst_object_unref (voutput_sink);
435   gst_object_unref (alevel);
436 }
437 
GST_START_TEST(test_videoframe_audiolevel_16chan_1)438 GST_START_TEST (test_videoframe_audiolevel_16chan_1)
439 {
440   set_default_params ();
441   channels = 16;
442   test_videoframe_audiolevel_generic ();
443 }
444 
445 GST_END_TEST;
446 
GST_START_TEST(test_videoframe_audiolevel_8chan_1)447 GST_START_TEST (test_videoframe_audiolevel_8chan_1)
448 {
449   set_default_params ();
450   channels = 8;
451   test_videoframe_audiolevel_generic ();
452 }
453 
454 GST_END_TEST;
455 
GST_START_TEST(test_videoframe_audiolevel_2chan_1)456 GST_START_TEST (test_videoframe_audiolevel_2chan_1)
457 {
458   set_default_params ();
459   test_videoframe_audiolevel_generic ();
460 }
461 
462 GST_END_TEST;
463 
GST_START_TEST(test_videoframe_audiolevel_1chan_1)464 GST_START_TEST (test_videoframe_audiolevel_1chan_1)
465 {
466   set_default_params ();
467   channels = 1;
468   test_videoframe_audiolevel_generic ();
469 }
470 
471 GST_END_TEST;
472 
GST_START_TEST(test_videoframe_audiolevel_16chan_0)473 GST_START_TEST (test_videoframe_audiolevel_16chan_0)
474 {
475   set_default_params ();
476   channels = 16;
477   expected_rms = 0;
478   fill_value = 0;
479   test_videoframe_audiolevel_generic ();
480 }
481 
482 GST_END_TEST;
483 
GST_START_TEST(test_videoframe_audiolevel_8chan_0)484 GST_START_TEST (test_videoframe_audiolevel_8chan_0)
485 {
486   set_default_params ();
487   channels = 8;
488   expected_rms = 0;
489   fill_value = 0;
490   test_videoframe_audiolevel_generic ();
491 }
492 
493 GST_END_TEST;
494 
GST_START_TEST(test_videoframe_audiolevel_2chan_0)495 GST_START_TEST (test_videoframe_audiolevel_2chan_0)
496 {
497   set_default_params ();
498   channels = 2;
499   expected_rms = 0;
500   fill_value = 0;
501   test_videoframe_audiolevel_generic ();
502 }
503 
504 GST_END_TEST;
505 
GST_START_TEST(test_videoframe_audiolevel_1chan_0)506 GST_START_TEST (test_videoframe_audiolevel_1chan_0)
507 {
508   set_default_params ();
509   channels = 1;
510   expected_rms = 0;
511   fill_value = 0;
512   test_videoframe_audiolevel_generic ();
513 }
514 
515 GST_END_TEST;
516 
GST_START_TEST(test_videoframe_audiolevel_adelay)517 GST_START_TEST (test_videoframe_audiolevel_adelay)
518 {
519   set_default_params ();
520   audiodelay = TRUE;
521   test_videoframe_audiolevel_generic ();
522 }
523 
524 GST_END_TEST;
525 
GST_START_TEST(test_videoframe_audiolevel_vdelay)526 GST_START_TEST (test_videoframe_audiolevel_vdelay)
527 {
528   set_default_params ();
529   videodelay = TRUE;
530   test_videoframe_audiolevel_generic ();
531 }
532 
533 GST_END_TEST;
534 
GST_START_TEST(test_videoframe_audiolevel_per_channel)535 GST_START_TEST (test_videoframe_audiolevel_per_channel)
536 {
537   set_default_params ();
538   per_channel = TRUE;
539   test_videoframe_audiolevel_generic ();
540 }
541 
542 GST_END_TEST;
543 
GST_START_TEST(test_videoframe_audiolevel_long_video)544 GST_START_TEST (test_videoframe_audiolevel_long_video)
545 {
546   set_default_params ();
547   n_abuffers = 6;
548   n_vbuffers = 255;
549   long_video = TRUE;
550   test_videoframe_audiolevel_generic ();
551 }
552 
553 GST_END_TEST;
554 
GST_START_TEST(test_videoframe_audiolevel_video_gaps)555 GST_START_TEST (test_videoframe_audiolevel_video_gaps)
556 {
557   set_default_params ();
558   video_gaps = TRUE;
559   test_videoframe_audiolevel_generic ();
560 }
561 
562 GST_END_TEST;
563 
GST_START_TEST(test_videoframe_audiolevel_video_overlaps)564 GST_START_TEST (test_videoframe_audiolevel_video_overlaps)
565 {
566   set_default_params ();
567   video_overlaps = TRUE;
568   test_videoframe_audiolevel_generic ();
569 }
570 
571 GST_END_TEST;
572 
GST_START_TEST(test_videoframe_audiolevel_audio_nondiscont)573 GST_START_TEST (test_videoframe_audiolevel_audio_nondiscont)
574 {
575   set_default_params ();
576   audio_nondiscont = TRUE;
577   test_videoframe_audiolevel_generic ();
578 }
579 
580 GST_END_TEST;
581 
GST_START_TEST(test_videoframe_audiolevel_audio_drift)582 GST_START_TEST (test_videoframe_audiolevel_audio_drift)
583 {
584   set_default_params ();
585   audio_drift = TRUE;
586   test_videoframe_audiolevel_generic ();
587 }
588 
589 GST_END_TEST;
GST_START_TEST(test_videoframe_audiolevel_early_video)590 GST_START_TEST (test_videoframe_audiolevel_early_video)
591 {
592   set_default_params ();
593   early_video = TRUE;
594   test_videoframe_audiolevel_generic ();
595 }
596 
597 GST_END_TEST;
598 
GST_START_TEST(test_videoframe_audiolevel_late_video)599 GST_START_TEST (test_videoframe_audiolevel_late_video)
600 {
601   set_default_params ();
602   late_video = TRUE;
603   test_videoframe_audiolevel_generic ();
604 }
605 
606 GST_END_TEST;
607 
608 
609 static Suite *
videoframe_audiolevel_suite(void)610 videoframe_audiolevel_suite (void)
611 {
612   Suite *s = suite_create ("videoframe-audiolevel");
613   TCase *tc_chain;
614 
615   tc_chain = tcase_create ("videoframe-audiolevel");
616   tcase_add_test (tc_chain, test_videoframe_audiolevel_16chan_1);
617   tcase_add_test (tc_chain, test_videoframe_audiolevel_8chan_1);
618   tcase_add_test (tc_chain, test_videoframe_audiolevel_2chan_1);
619   tcase_add_test (tc_chain, test_videoframe_audiolevel_1chan_1);
620   tcase_add_test (tc_chain, test_videoframe_audiolevel_16chan_0);
621   tcase_add_test (tc_chain, test_videoframe_audiolevel_8chan_0);
622   tcase_add_test (tc_chain, test_videoframe_audiolevel_2chan_0);
623   tcase_add_test (tc_chain, test_videoframe_audiolevel_1chan_0);
624   tcase_add_test (tc_chain, test_videoframe_audiolevel_adelay);
625   tcase_add_test (tc_chain, test_videoframe_audiolevel_vdelay);
626   tcase_add_test (tc_chain, test_videoframe_audiolevel_per_channel);
627   tcase_add_test (tc_chain, test_videoframe_audiolevel_long_video);
628   tcase_add_test (tc_chain, test_videoframe_audiolevel_video_gaps);
629   tcase_add_test (tc_chain, test_videoframe_audiolevel_video_overlaps);
630   tcase_add_test (tc_chain, test_videoframe_audiolevel_audio_nondiscont);
631   tcase_add_test (tc_chain, test_videoframe_audiolevel_audio_drift);
632   tcase_add_test (tc_chain, test_videoframe_audiolevel_early_video);
633   tcase_add_test (tc_chain, test_videoframe_audiolevel_late_video);
634   suite_add_tcase (s, tc_chain);
635 
636   return s;
637 }
638 
639 GST_CHECK_MAIN (videoframe_audiolevel);
640