1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6 #include <sstream>
7
8 #include "base/basictypes.h"
9 #include "media/base/stream_parser.h"
10 #include "media/base/stream_parser_buffer.h"
11 #include "testing/gtest/include/gtest/gtest.h"
12
13 namespace media {
14
15 typedef StreamParser::TrackId TrackId;
16 typedef StreamParser::BufferQueue BufferQueue;
17 typedef StreamParser::TextBufferQueueMap TextBufferQueueMap;
18
19 const int kEnd = -1;
20 const uint8 kFakeData[] = { 0xFF };
21 const TrackId kAudioTrackId = 0;
22 const TrackId kVideoTrackId = 1;
23 const TrackId kTextTrackIdA = 2;
24 const TrackId kTextTrackIdB = 3;
25
IsAudio(scoped_refptr<StreamParserBuffer> buffer)26 static bool IsAudio(scoped_refptr<StreamParserBuffer> buffer) {
27 return buffer->type() == DemuxerStream::AUDIO;
28 }
29
IsVideo(scoped_refptr<StreamParserBuffer> buffer)30 static bool IsVideo(scoped_refptr<StreamParserBuffer> buffer) {
31 return buffer->type() == DemuxerStream::VIDEO;
32 }
33
IsText(scoped_refptr<StreamParserBuffer> buffer)34 static bool IsText(scoped_refptr<StreamParserBuffer> buffer) {
35 return buffer->type() == DemuxerStream::TEXT;
36 }
37
38 // Creates and appends a sequence of StreamParserBuffers to the provided
39 // |queue|. |decode_timestamps| determines the number of appended buffers and
40 // their sequence of decode timestamps; a |kEnd| timestamp indicates the
41 // end of the sequence and no buffer is appended for it. Each new buffer's
42 // type will be |type| with track ID set to |track_id|.
GenerateBuffers(const int * decode_timestamps,StreamParserBuffer::Type type,TrackId track_id,BufferQueue * queue)43 static void GenerateBuffers(const int* decode_timestamps,
44 StreamParserBuffer::Type type,
45 TrackId track_id,
46 BufferQueue* queue) {
47 DCHECK(decode_timestamps);
48 DCHECK(queue);
49 DCHECK_NE(type, DemuxerStream::UNKNOWN);
50 DCHECK_LT(type, DemuxerStream::NUM_TYPES);
51 for (int i = 0; decode_timestamps[i] != kEnd; ++i) {
52 scoped_refptr<StreamParserBuffer> buffer =
53 StreamParserBuffer::CopyFrom(kFakeData, sizeof(kFakeData),
54 true, type, track_id);
55 buffer->SetDecodeTimestamp(
56 DecodeTimestamp::FromMicroseconds(decode_timestamps[i]));
57 queue->push_back(buffer);
58 }
59 }
60
61 class StreamParserTest : public testing::Test {
62 protected:
StreamParserTest()63 StreamParserTest() {}
64
65 // Returns the number of buffers in |merged_buffers_| for which |predicate|
66 // returns true.
CountMatchingMergedBuffers(bool (* predicate)(scoped_refptr<StreamParserBuffer> buffer))67 size_t CountMatchingMergedBuffers(
68 bool (*predicate)(scoped_refptr<StreamParserBuffer> buffer)) {
69 return static_cast<size_t>(count_if(merged_buffers_.begin(),
70 merged_buffers_.end(),
71 predicate));
72 }
73
74 // Appends test audio buffers in the sequence described by |decode_timestamps|
75 // to |audio_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
GenerateAudioBuffers(const int * decode_timestamps)76 void GenerateAudioBuffers(const int* decode_timestamps) {
77 GenerateBuffers(decode_timestamps, DemuxerStream::AUDIO, kAudioTrackId,
78 &audio_buffers_);
79 }
80
81 // Appends test video buffers in the sequence described by |decode_timestamps|
82 // to |video_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
GenerateVideoBuffers(const int * decode_timestamps)83 void GenerateVideoBuffers(const int* decode_timestamps) {
84 GenerateBuffers(decode_timestamps, DemuxerStream::VIDEO, kVideoTrackId,
85 &video_buffers_);
86 }
87
88 // Current tests only need up to two distinct text BufferQueues. This helper
89 // conditionally appends buffers to the underlying |text_buffers_a_| and
90 // |text_buffers_b_| and conditionally inserts these BufferQueues into
91 // |text_map_| keyed by the respective track ID. If |decode_timestamps_{a,b}|
92 // is NULL, then the corresponding BufferQueue is neither appended to nor
93 // inserted into |text_map_| (though it may previously have been inserted).
94 // Note that key collision on map insertion does not replace the previous
95 // value.
GenerateTextBuffers(const int * decode_timestamps_a,const int * decode_timestamps_b)96 void GenerateTextBuffers(const int* decode_timestamps_a,
97 const int* decode_timestamps_b) {
98 if (decode_timestamps_a) {
99 GenerateBuffers(decode_timestamps_a, DemuxerStream::TEXT, kTextTrackIdA,
100 &text_buffers_a_);
101 text_map_.insert(std::make_pair(kTextTrackIdA, text_buffers_a_));
102 }
103
104 if (decode_timestamps_b) {
105 GenerateBuffers(decode_timestamps_b, DemuxerStream::TEXT, kTextTrackIdB,
106 &text_buffers_b_);
107 text_map_.insert(std::make_pair(kTextTrackIdB, text_buffers_b_));
108 }
109 }
110
111 // Returns a string that describes the sequence of buffers in
112 // |merged_buffers_|. The string is a concatenation of space-delimited buffer
113 // descriptors in the same sequence as |merged_buffers_|. Each descriptor is
114 // the concatenation of
115 // 1) a single character that describes the buffer's type(), e.g. A, V, or T
116 // for audio, video, or text, respectively
117 // 2) the buffer's track_id()
118 // 3) ":"
119 // 4) the buffer's decode timestamp.
120 // If |include_type_and_text_track| is false, then items 1, 2, and 3 are
121 // not included in descriptors. This is useful when buffers with different
122 // media types but the same decode timestamp are expected, and the exact
123 // sequence of media types for the tying timestamps is not subject to
124 // verification.
MergedBufferQueueString(bool include_type_and_text_track)125 std::string MergedBufferQueueString(bool include_type_and_text_track) {
126 std::stringstream results_stream;
127 for (BufferQueue::const_iterator itr = merged_buffers_.begin();
128 itr != merged_buffers_.end();
129 ++itr) {
130 if (itr != merged_buffers_.begin())
131 results_stream << " ";
132 const StreamParserBuffer& buffer = *(itr->get());
133 if (include_type_and_text_track) {
134 switch (buffer.type()) {
135 case DemuxerStream::AUDIO:
136 results_stream << "A";
137 break;
138 case DemuxerStream::VIDEO:
139 results_stream << "V";
140 break;
141 case DemuxerStream::TEXT:
142 results_stream << "T";
143
144 break;
145 default:
146 NOTREACHED();
147 }
148 results_stream << buffer.track_id() << ":";
149 }
150 results_stream << buffer.GetDecodeTimestamp().InMicroseconds();
151 }
152
153 return results_stream.str();
154 }
155
156 // Verifies that MergeBufferQueues() of the current |audio_buffers_|,
157 // |video_buffers_|, |text_map_|, and |merged_buffers_| returns true and
158 // results in an updated |merged_buffers_| that matches expectation. The
159 // expectation, specified in |expected|, is compared to the string resulting
160 // from MergedBufferQueueString() (see comments for that method) with
161 // |verify_type_and_text_track_sequence| passed. |merged_buffers_| is appended
162 // to by the merge, and may be setup by the caller to have some pre-existing
163 // buffers; it is both an input and output of this method.
164 // Regardless of |verify_type_and_text_track_sequence|, the marginal number
165 // of buffers of each type (audio, video, text) resulting from the merge is
166 // also verified to match the number of buffers in |audio_buffers_|,
167 // |video_buffers_|, and |text_map_|, respectively.
VerifyMergeSuccess(const std::string & expected,bool verify_type_and_text_track_sequence)168 void VerifyMergeSuccess(const std::string& expected,
169 bool verify_type_and_text_track_sequence) {
170 // |merged_buffers| may already have some buffers. Count them by type for
171 // later inclusion in verification.
172 size_t original_audio_in_merged = CountMatchingMergedBuffers(IsAudio);
173 size_t original_video_in_merged = CountMatchingMergedBuffers(IsVideo);
174 size_t original_text_in_merged = CountMatchingMergedBuffers(IsText);
175
176 EXPECT_TRUE(MergeBufferQueues(audio_buffers_, video_buffers_, text_map_,
177 &merged_buffers_));
178
179 // Verify resulting contents of |merged_buffers| matches |expected|.
180 EXPECT_EQ(expected,
181 MergedBufferQueueString(verify_type_and_text_track_sequence));
182
183 // Verify that the correct number of each type of buffer is in the merge
184 // result.
185 size_t audio_in_merged = CountMatchingMergedBuffers(IsAudio);
186 size_t video_in_merged = CountMatchingMergedBuffers(IsVideo);
187 size_t text_in_merged = CountMatchingMergedBuffers(IsText);
188
189 EXPECT_GE(audio_in_merged, original_audio_in_merged);
190 EXPECT_GE(video_in_merged, original_video_in_merged);
191 EXPECT_GE(text_in_merged, original_text_in_merged);
192
193 EXPECT_EQ(audio_buffers_.size(),
194 audio_in_merged - original_audio_in_merged);
195 EXPECT_EQ(video_buffers_.size(),
196 video_in_merged - original_video_in_merged);
197
198 size_t expected_text_buffer_count = 0;
199 for (TextBufferQueueMap::const_iterator itr = text_map_.begin();
200 itr != text_map_.end();
201 ++itr) {
202 expected_text_buffer_count += itr->second.size();
203 }
204 EXPECT_EQ(expected_text_buffer_count,
205 text_in_merged - original_text_in_merged);
206 }
207
208 // Verifies that MergeBufferQueues() of the current |audio_buffers_|,
209 // |video_buffers_|, |text_map_|, and |merged_buffers_| returns false.
VerifyMergeFailure()210 void VerifyMergeFailure() {
211 EXPECT_FALSE(MergeBufferQueues(audio_buffers_, video_buffers_, text_map_,
212 &merged_buffers_));
213 }
214
215 // Helper to allow tests to clear all the input BufferQueues (except
216 // |merged_buffers_|) and the TextBufferQueueMap that are used in
217 // VerifyMerge{Success/Failure}().
ClearQueuesAndTextMapButKeepAnyMergedBuffers()218 void ClearQueuesAndTextMapButKeepAnyMergedBuffers() {
219 audio_buffers_.clear();
220 video_buffers_.clear();
221 text_buffers_a_.clear();
222 text_buffers_b_.clear();
223 text_map_.clear();
224 }
225
226 private:
227 BufferQueue audio_buffers_;
228 BufferQueue video_buffers_;
229 BufferQueue text_buffers_a_;
230 BufferQueue text_buffers_b_;
231 BufferQueue merged_buffers_;
232 TextBufferQueueMap text_map_;
233
234 DISALLOW_COPY_AND_ASSIGN(StreamParserTest);
235 };
236
TEST_F(StreamParserTest,MergeBufferQueues_AllEmpty)237 TEST_F(StreamParserTest, MergeBufferQueues_AllEmpty) {
238 std::string expected = "";
239 VerifyMergeSuccess(expected, true);
240 }
241
TEST_F(StreamParserTest,MergeBufferQueues_SingleAudioBuffer)242 TEST_F(StreamParserTest, MergeBufferQueues_SingleAudioBuffer) {
243 std::string expected = "A0:100";
244 int audio_timestamps[] = { 100, kEnd };
245 GenerateAudioBuffers(audio_timestamps);
246 VerifyMergeSuccess(expected, true);
247 }
248
TEST_F(StreamParserTest,MergeBufferQueues_SingleVideoBuffer)249 TEST_F(StreamParserTest, MergeBufferQueues_SingleVideoBuffer) {
250 std::string expected = "V1:100";
251 int video_timestamps[] = { 100, kEnd };
252 GenerateVideoBuffers(video_timestamps);
253 VerifyMergeSuccess(expected, true);
254 }
255
TEST_F(StreamParserTest,MergeBufferQueues_SingleTextBuffer)256 TEST_F(StreamParserTest, MergeBufferQueues_SingleTextBuffer) {
257 std::string expected = "T2:100";
258 int text_timestamps[] = { 100, kEnd };
259 GenerateTextBuffers(text_timestamps, NULL);
260 VerifyMergeSuccess(expected, true);
261 }
262
TEST_F(StreamParserTest,MergeBufferQueues_OverlappingAudioVideo)263 TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideo) {
264 std::string expected = "A0:100 V1:101 V1:102 A0:103 A0:104 V1:105";
265 int audio_timestamps[] = { 100, 103, 104, kEnd };
266 GenerateAudioBuffers(audio_timestamps);
267 int video_timestamps[] = { 101, 102, 105, kEnd };
268 GenerateVideoBuffers(video_timestamps);
269 VerifyMergeSuccess(expected, true);
270 }
271
TEST_F(StreamParserTest,MergeBufferQueues_OverlappingMultipleText)272 TEST_F(StreamParserTest, MergeBufferQueues_OverlappingMultipleText) {
273 std::string expected = "T2:100 T2:101 T3:103 T2:104 T3:105 T3:106";
274 int text_timestamps_a[] = { 100, 101, 104, kEnd };
275 int text_timestamps_b[] = { 103, 105, 106, kEnd };
276 GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
277 VerifyMergeSuccess(expected, true);
278 }
279
TEST_F(StreamParserTest,MergeBufferQueues_OverlappingAudioVideoText)280 TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideoText) {
281 std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
282 "T2:107";
283 int audio_timestamps[] = { 100, 105, kEnd };
284 GenerateAudioBuffers(audio_timestamps);
285 int video_timestamps[] = { 101, 103, 106, kEnd };
286 GenerateVideoBuffers(video_timestamps);
287 int text_timestamps_a[] = { 102, 107, kEnd };
288 int text_timestamps_b[] = { 104, kEnd };
289 GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
290 VerifyMergeSuccess(expected, true);
291 }
292
TEST_F(StreamParserTest,MergeBufferQueues_NonDecreasingNoCrossMediaDuplicate)293 TEST_F(StreamParserTest, MergeBufferQueues_NonDecreasingNoCrossMediaDuplicate) {
294 std::string expected = "A0:100 A0:100 A0:100 V1:101 V1:101 V1:101 A0:102 "
295 "V1:103 V1:103";
296 int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
297 GenerateAudioBuffers(audio_timestamps);
298 int video_timestamps[] = { 101, 101, 101, 103, 103, kEnd };
299 GenerateVideoBuffers(video_timestamps);
300 VerifyMergeSuccess(expected, true);
301 }
302
TEST_F(StreamParserTest,MergeBufferQueues_CrossStreamDuplicates)303 TEST_F(StreamParserTest, MergeBufferQueues_CrossStreamDuplicates) {
304 // Interface keeps the choice undefined of which stream's buffer wins the
305 // selection when timestamps are tied. Verify at least the right number of
306 // each kind of buffer results, and that buffers are in nondecreasing order.
307 std::string expected = "100 100 100 100 100 100 102 102 102 102 102 102 102";
308 int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
309 GenerateAudioBuffers(audio_timestamps);
310 int video_timestamps[] = { 100, 100, 102, 102, 102, kEnd };
311 GenerateVideoBuffers(video_timestamps);
312 int text_timestamps[] = { 100, 102, 102, 102, kEnd };
313 GenerateTextBuffers(text_timestamps, NULL);
314 VerifyMergeSuccess(expected, false);
315 }
316
TEST_F(StreamParserTest,MergeBufferQueues_InvalidDecreasingSingleStream)317 TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingSingleStream) {
318 int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
319 GenerateAudioBuffers(audio_timestamps);
320 VerifyMergeFailure();
321 }
322
TEST_F(StreamParserTest,MergeBufferQueues_InvalidDecreasingMultipleStreams)323 TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingMultipleStreams) {
324 int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
325 GenerateAudioBuffers(audio_timestamps);
326 int video_timestamps[] = { 104, 100, kEnd };
327 GenerateVideoBuffers(video_timestamps);
328 VerifyMergeFailure();
329 }
330
TEST_F(StreamParserTest,MergeBufferQueues_ValidAppendToExistingMerge)331 TEST_F(StreamParserTest, MergeBufferQueues_ValidAppendToExistingMerge) {
332 std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
333 "T2:107";
334 int audio_timestamps[] = { 100, 105, kEnd };
335 GenerateAudioBuffers(audio_timestamps);
336 int video_timestamps[] = { 101, 103, 106, kEnd };
337 GenerateVideoBuffers(video_timestamps);
338 int text_timestamps_a[] = { 102, 107, kEnd };
339 int text_timestamps_b[] = { 104, kEnd };
340 GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
341 VerifyMergeSuccess(expected, true);
342
343 ClearQueuesAndTextMapButKeepAnyMergedBuffers();
344
345 expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 T2:107 "
346 "A0:107 V1:111 T2:112 V1:113 T3:114 A0:115 V1:116 T2:117";
347 int more_audio_timestamps[] = { 107, 115, kEnd };
348 GenerateAudioBuffers(more_audio_timestamps);
349 int more_video_timestamps[] = { 111, 113, 116, kEnd };
350 GenerateVideoBuffers(more_video_timestamps);
351 int more_text_timestamps_a[] = { 112, 117, kEnd };
352 int more_text_timestamps_b[] = { 114, kEnd };
353 GenerateTextBuffers(more_text_timestamps_a, more_text_timestamps_b);
354 VerifyMergeSuccess(expected, true);
355 }
356
TEST_F(StreamParserTest,MergeBufferQueues_InvalidAppendToExistingMerge)357 TEST_F(StreamParserTest, MergeBufferQueues_InvalidAppendToExistingMerge) {
358 std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
359 "T2:107";
360 int audio_timestamps[] = { 100, 105, kEnd };
361 GenerateAudioBuffers(audio_timestamps);
362 int video_timestamps[] = { 101, 103, 106, kEnd };
363 GenerateVideoBuffers(video_timestamps);
364 int text_timestamps_a[] = { 102, 107, kEnd };
365 int text_timestamps_b[] = { 104, kEnd };
366 GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
367 VerifyMergeSuccess(expected, true);
368
369 // Appending empty buffers to pre-existing merge result should succeed and not
370 // change the existing result.
371 ClearQueuesAndTextMapButKeepAnyMergedBuffers();
372 VerifyMergeSuccess(expected, true);
373
374 // But appending something with a lower timestamp than the last timestamp
375 // in the pre-existing merge result should fail.
376 int more_audio_timestamps[] = { 106, kEnd };
377 GenerateAudioBuffers(more_audio_timestamps);
378 VerifyMergeFailure();
379 }
380
381 } // namespace media
382