1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/mock_demuxer_host.h"
16 #include "media/base/test_data_util.h"
17 #include "media/base/test_helpers.h"
18 #include "media/filters/chunk_demuxer.h"
19 #include "media/formats/webm/cluster_builder.h"
20 #include "media/formats/webm/webm_constants.h"
21 #include "media/formats/webm/webm_crypto_helpers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
31 using ::testing::_;
32
33 namespace media {
34
35 const uint8 kTracksHeader[] = {
36 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
37 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
38 };
39
40 // WebM Block bytes that represent a VP8 keyframe.
41 const uint8 kVP8Keyframe[] = {
42 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
43 };
44
45 // WebM Block bytes that represent a VP8 interframe.
46 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
47
48 static const uint8 kCuesHeader[] = {
49 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
50 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
51 };
52
53 const int kTracksHeaderSize = sizeof(kTracksHeader);
54 const int kTracksSizeOffset = 4;
55
56 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
57 // at index 1 and spans 8 bytes.
58 const int kAudioTrackSizeOffset = 1;
59 const int kAudioTrackSizeWidth = 8;
60 const int kAudioTrackEntryHeaderSize =
61 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
62
63 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
64 // index 1 and spans 8 bytes.
65 const int kVideoTrackSizeOffset = 1;
66 const int kVideoTrackSizeWidth = 8;
67 const int kVideoTrackEntryHeaderSize =
68 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
69
70 const int kVideoTrackNum = 1;
71 const int kAudioTrackNum = 2;
72 const int kTextTrackNum = 3;
73 const int kAlternateTextTrackNum = 4;
74
75 const int kAudioBlockDuration = 23;
76 const int kVideoBlockDuration = 33;
77 const int kTextBlockDuration = 100;
78 const int kBlockSize = 10;
79
80 const char kSourceId[] = "SourceId";
81 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
82 const int kDefaultFirstClusterEndTimestamp = 66;
83 const int kDefaultSecondClusterEndTimestamp = 132;
84
kDefaultDuration()85 base::TimeDelta kDefaultDuration() {
86 return base::TimeDelta::FromMilliseconds(201224);
87 }
88
89 // Write an integer into buffer in the form of vint that spans 8 bytes.
90 // The data pointed by |buffer| should be at least 8 bytes long.
91 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
WriteInt64(uint8 * buffer,int64 number)92 static void WriteInt64(uint8* buffer, int64 number) {
93 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
94 buffer[0] = 0x01;
95 int64 tmp = number;
96 for (int i = 7; i > 0; i--) {
97 buffer[i] = tmp & 0xff;
98 tmp >>= 8;
99 }
100 }
101
102 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
103 return arg.get() && !arg->end_of_stream() &&
104 arg->timestamp().InMilliseconds() == timestamp_in_ms;
105 }
106
107 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
108
OnReadDone(const base::TimeDelta & expected_time,bool * called,DemuxerStream::Status status,const scoped_refptr<DecoderBuffer> & buffer)109 static void OnReadDone(const base::TimeDelta& expected_time,
110 bool* called,
111 DemuxerStream::Status status,
112 const scoped_refptr<DecoderBuffer>& buffer) {
113 EXPECT_EQ(status, DemuxerStream::kOk);
114 EXPECT_EQ(expected_time, buffer->timestamp());
115 *called = true;
116 }
117
OnReadDone_AbortExpected(bool * called,DemuxerStream::Status status,const scoped_refptr<DecoderBuffer> & buffer)118 static void OnReadDone_AbortExpected(
119 bool* called, DemuxerStream::Status status,
120 const scoped_refptr<DecoderBuffer>& buffer) {
121 EXPECT_EQ(status, DemuxerStream::kAborted);
122 EXPECT_EQ(NULL, buffer.get());
123 *called = true;
124 }
125
OnReadDone_EOSExpected(bool * called,DemuxerStream::Status status,const scoped_refptr<DecoderBuffer> & buffer)126 static void OnReadDone_EOSExpected(bool* called,
127 DemuxerStream::Status status,
128 const scoped_refptr<DecoderBuffer>& buffer) {
129 EXPECT_EQ(status, DemuxerStream::kOk);
130 EXPECT_TRUE(buffer->end_of_stream());
131 *called = true;
132 }
133
OnSeekDone_OKExpected(bool * called,PipelineStatus status)134 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
135 EXPECT_EQ(status, PIPELINE_OK);
136 *called = true;
137 }
138
LogFunc(const std::string & str)139 static void LogFunc(const std::string& str) { DVLOG(1) << str; }
140
141 class ChunkDemuxerTest : public ::testing::Test {
142 protected:
143 enum CodecsIndex {
144 AUDIO,
145 VIDEO,
146 MAX_CODECS_INDEX
147 };
148
149 // Default cluster to append first for simple tests.
kDefaultFirstCluster()150 scoped_ptr<Cluster> kDefaultFirstCluster() {
151 return GenerateCluster(0, 4);
152 }
153
154 // Default cluster to append after kDefaultFirstCluster()
155 // has been appended. This cluster starts with blocks that
156 // have timestamps consistent with the end times of the blocks
157 // in kDefaultFirstCluster() so that these two clusters represent
158 // a continuous region.
kDefaultSecondCluster()159 scoped_ptr<Cluster> kDefaultSecondCluster() {
160 return GenerateCluster(46, 66, 5);
161 }
162
ChunkDemuxerTest()163 ChunkDemuxerTest()
164 : append_window_end_for_next_append_(kInfiniteDuration()) {
165 CreateNewDemuxer();
166 }
167
CreateNewDemuxer()168 void CreateNewDemuxer() {
169 base::Closure open_cb =
170 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
171 Demuxer::NeedKeyCB need_key_cb =
172 base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
173 demuxer_.reset(
174 new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
175 }
176
~ChunkDemuxerTest()177 virtual ~ChunkDemuxerTest() {
178 ShutdownDemuxer();
179 }
180
CreateInitSegment(int stream_flags,bool is_audio_encrypted,bool is_video_encrypted,scoped_ptr<uint8[]> * buffer,int * size)181 void CreateInitSegment(int stream_flags,
182 bool is_audio_encrypted,
183 bool is_video_encrypted,
184 scoped_ptr<uint8[]>* buffer,
185 int* size) {
186 CreateInitSegmentInternal(
187 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
188 size);
189 }
190
CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,bool is_audio_encrypted,bool is_video_encrypted,scoped_ptr<uint8[]> * buffer,int * size)191 void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
192 bool is_audio_encrypted,
193 bool is_video_encrypted,
194 scoped_ptr<uint8[]>* buffer,
195 int* size) {
196 DCHECK(stream_flags & HAS_TEXT);
197 CreateInitSegmentInternal(
198 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
199 size);
200 }
201
CreateInitSegmentInternal(int stream_flags,bool is_audio_encrypted,bool is_video_encrypted,scoped_ptr<uint8[]> * buffer,bool use_alternate_text_track_id,int * size)202 void CreateInitSegmentInternal(int stream_flags,
203 bool is_audio_encrypted,
204 bool is_video_encrypted,
205 scoped_ptr<uint8[]>* buffer,
206 bool use_alternate_text_track_id,
207 int* size) {
208 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
209 bool has_video = (stream_flags & HAS_VIDEO) != 0;
210 bool has_text = (stream_flags & HAS_TEXT) != 0;
211 scoped_refptr<DecoderBuffer> ebml_header;
212 scoped_refptr<DecoderBuffer> info;
213 scoped_refptr<DecoderBuffer> audio_track_entry;
214 scoped_refptr<DecoderBuffer> video_track_entry;
215 scoped_refptr<DecoderBuffer> audio_content_encodings;
216 scoped_refptr<DecoderBuffer> video_content_encodings;
217 scoped_refptr<DecoderBuffer> text_track_entry;
218
219 ebml_header = ReadTestDataFile("webm_ebml_element");
220
221 info = ReadTestDataFile("webm_info_element");
222
223 int tracks_element_size = 0;
224
225 if (has_audio) {
226 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
227 tracks_element_size += audio_track_entry->data_size();
228 if (is_audio_encrypted) {
229 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
230 tracks_element_size += audio_content_encodings->data_size();
231 }
232 }
233
234 if (has_video) {
235 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
236 tracks_element_size += video_track_entry->data_size();
237 if (is_video_encrypted) {
238 video_content_encodings = ReadTestDataFile("webm_content_encodings");
239 tracks_element_size += video_content_encodings->data_size();
240 }
241 }
242
243 if (has_text) {
244 // TODO(matthewjheaney): create an abstraction to do
245 // this (http://crbug/321454).
246 // We need it to also handle the creation of multiple text tracks.
247 //
248 // This is the track entry for a text track,
249 // TrackEntry [AE], size=30
250 // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
251 // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
252 // track, even if TrackNum changes)
253 // TrackType [83], size=1, val=0x11
254 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
255 char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
256 "\x83\x81\x11\x86\x92"
257 "D_WEBVTT/SUBTITLES";
258 DCHECK_EQ(str[4], kTextTrackNum);
259 if (use_alternate_text_track_id)
260 str[4] = kAlternateTextTrackNum;
261
262 const int len = strlen(str);
263 DCHECK_EQ(len, 32);
264 const uint8* const buf = reinterpret_cast<const uint8*>(str);
265 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
266 tracks_element_size += text_track_entry->data_size();
267 }
268
269 *size = ebml_header->data_size() + info->data_size() +
270 kTracksHeaderSize + tracks_element_size;
271
272 buffer->reset(new uint8[*size]);
273
274 uint8* buf = buffer->get();
275 memcpy(buf, ebml_header->data(), ebml_header->data_size());
276 buf += ebml_header->data_size();
277
278 memcpy(buf, info->data(), info->data_size());
279 buf += info->data_size();
280
281 memcpy(buf, kTracksHeader, kTracksHeaderSize);
282 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
283 buf += kTracksHeaderSize;
284
285 // TODO(xhwang): Simplify this! Probably have test data files that contain
286 // ContentEncodings directly instead of trying to create one at run-time.
287 if (has_audio) {
288 memcpy(buf, audio_track_entry->data(),
289 audio_track_entry->data_size());
290 if (is_audio_encrypted) {
291 memcpy(buf + audio_track_entry->data_size(),
292 audio_content_encodings->data(),
293 audio_content_encodings->data_size());
294 WriteInt64(buf + kAudioTrackSizeOffset,
295 audio_track_entry->data_size() +
296 audio_content_encodings->data_size() -
297 kAudioTrackEntryHeaderSize);
298 buf += audio_content_encodings->data_size();
299 }
300 buf += audio_track_entry->data_size();
301 }
302
303 if (has_video) {
304 memcpy(buf, video_track_entry->data(),
305 video_track_entry->data_size());
306 if (is_video_encrypted) {
307 memcpy(buf + video_track_entry->data_size(),
308 video_content_encodings->data(),
309 video_content_encodings->data_size());
310 WriteInt64(buf + kVideoTrackSizeOffset,
311 video_track_entry->data_size() +
312 video_content_encodings->data_size() -
313 kVideoTrackEntryHeaderSize);
314 buf += video_content_encodings->data_size();
315 }
316 buf += video_track_entry->data_size();
317 }
318
319 if (has_text) {
320 memcpy(buf, text_track_entry->data(),
321 text_track_entry->data_size());
322 buf += text_track_entry->data_size();
323 }
324 }
325
AddId()326 ChunkDemuxer::Status AddId() {
327 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
328 }
329
AddId(const std::string & source_id,int stream_flags)330 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
331 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
332 bool has_video = (stream_flags & HAS_VIDEO) != 0;
333 std::vector<std::string> codecs;
334 std::string type;
335
336 if (has_audio) {
337 codecs.push_back("vorbis");
338 type = "audio/webm";
339 }
340
341 if (has_video) {
342 codecs.push_back("vp8");
343 type = "video/webm";
344 }
345
346 if (!has_audio && !has_video) {
347 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
348 }
349
350 return demuxer_->AddId(source_id, type, codecs);
351 }
352
AddIdForMp2tSource(const std::string & source_id)353 ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
354 std::vector<std::string> codecs;
355 std::string type = "video/mp2t";
356 codecs.push_back("mp4a.40.2");
357 codecs.push_back("avc1.640028");
358 return demuxer_->AddId(source_id, type, codecs);
359 }
360
AppendData(const uint8 * data,size_t length)361 void AppendData(const uint8* data, size_t length) {
362 AppendData(kSourceId, data, length);
363 }
364
AppendCluster(const std::string & source_id,scoped_ptr<Cluster> cluster)365 void AppendCluster(const std::string& source_id,
366 scoped_ptr<Cluster> cluster) {
367 AppendData(source_id, cluster->data(), cluster->size());
368 }
369
AppendCluster(scoped_ptr<Cluster> cluster)370 void AppendCluster(scoped_ptr<Cluster> cluster) {
371 AppendCluster(kSourceId, cluster.Pass());
372 }
373
AppendCluster(int timecode,int block_count)374 void AppendCluster(int timecode, int block_count) {
375 AppendCluster(GenerateCluster(timecode, block_count));
376 }
377
AppendSingleStreamCluster(const std::string & source_id,int track_number,int timecode,int block_count)378 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
379 int timecode, int block_count) {
380 int block_duration = 0;
381 switch (track_number) {
382 case kVideoTrackNum:
383 block_duration = kVideoBlockDuration;
384 break;
385 case kAudioTrackNum:
386 block_duration = kAudioBlockDuration;
387 break;
388 case kTextTrackNum: // Fall-through.
389 case kAlternateTextTrackNum:
390 block_duration = kTextBlockDuration;
391 break;
392 }
393 ASSERT_NE(block_duration, 0);
394 int end_timecode = timecode + block_count * block_duration;
395 AppendCluster(source_id,
396 GenerateSingleStreamCluster(
397 timecode, end_timecode, track_number, block_duration));
398 }
399
400 // |cluster_description| - A space delimited string of buffer info that
401 // is used to construct a cluster. Each buffer info is a timestamp in
402 // milliseconds and optionally followed by a 'K' to indicate that a buffer
403 // should be marked as a keyframe. For example "0K 30 60" should constuct
404 // a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
405 // at 30ms and 60ms.
AppendSingleStreamCluster(const std::string & source_id,int track_number,const std::string & cluster_description)406 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
407 const std::string& cluster_description) {
408 std::vector<std::string> timestamps;
409 base::SplitString(cluster_description, ' ', ×tamps);
410
411 ClusterBuilder cb;
412 std::vector<uint8> data(10);
413 for (size_t i = 0; i < timestamps.size(); ++i) {
414 std::string timestamp_str = timestamps[i];
415 int block_flags = 0;
416 if (EndsWith(timestamp_str, "K", true)) {
417 block_flags = kWebMFlagKeyframe;
418 // Remove the "K" off of the token.
419 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
420 }
421 int timestamp_in_ms;
422 CHECK(base::StringToInt(timestamp_str, ×tamp_in_ms));
423
424 if (i == 0)
425 cb.SetClusterTimecode(timestamp_in_ms);
426
427 if (track_number == kTextTrackNum ||
428 track_number == kAlternateTextTrackNum) {
429 cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
430 block_flags, &data[0], data.size());
431 } else {
432 cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
433 &data[0], data.size());
434 }
435 }
436 AppendCluster(source_id, cb.Finish());
437 }
438
AppendData(const std::string & source_id,const uint8 * data,size_t length)439 void AppendData(const std::string& source_id,
440 const uint8* data, size_t length) {
441 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
442
443 demuxer_->AppendData(source_id, data, length,
444 append_window_start_for_next_append_,
445 append_window_end_for_next_append_,
446 ×tamp_offset_map_[source_id]);
447 }
448
AppendDataInPieces(const uint8 * data,size_t length)449 void AppendDataInPieces(const uint8* data, size_t length) {
450 AppendDataInPieces(data, length, 7);
451 }
452
AppendDataInPieces(const uint8 * data,size_t length,size_t piece_size)453 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
454 const uint8* start = data;
455 const uint8* end = data + length;
456 while (start < end) {
457 size_t append_size = std::min(piece_size,
458 static_cast<size_t>(end - start));
459 AppendData(start, append_size);
460 start += append_size;
461 }
462 }
463
AppendInitSegment(int stream_flags)464 void AppendInitSegment(int stream_flags) {
465 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
466 }
467
AppendInitSegmentWithSourceId(const std::string & source_id,int stream_flags)468 void AppendInitSegmentWithSourceId(const std::string& source_id,
469 int stream_flags) {
470 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
471 }
472
AppendInitSegmentWithEncryptedInfo(const std::string & source_id,int stream_flags,bool is_audio_encrypted,bool is_video_encrypted)473 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
474 int stream_flags,
475 bool is_audio_encrypted,
476 bool is_video_encrypted) {
477 scoped_ptr<uint8[]> info_tracks;
478 int info_tracks_size = 0;
479 CreateInitSegment(stream_flags,
480 is_audio_encrypted, is_video_encrypted,
481 &info_tracks, &info_tracks_size);
482 AppendData(source_id, info_tracks.get(), info_tracks_size);
483 }
484
AppendGarbage()485 void AppendGarbage() {
486 // Fill up an array with gibberish.
487 int garbage_cluster_size = 10;
488 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
489 for (int i = 0; i < garbage_cluster_size; ++i)
490 garbage_cluster[i] = i;
491 AppendData(garbage_cluster.get(), garbage_cluster_size);
492 }
493
InitDoneCalled(PipelineStatus expected_status,PipelineStatus status)494 void InitDoneCalled(PipelineStatus expected_status,
495 PipelineStatus status) {
496 EXPECT_EQ(status, expected_status);
497 }
498
AppendEmptyCluster(int timecode)499 void AppendEmptyCluster(int timecode) {
500 AppendCluster(GenerateEmptyCluster(timecode));
501 }
502
CreateInitDoneCB(const base::TimeDelta & expected_duration,PipelineStatus expected_status)503 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
504 PipelineStatus expected_status) {
505 if (expected_duration != kNoTimestamp())
506 EXPECT_CALL(host_, SetDuration(expected_duration));
507 return CreateInitDoneCB(expected_status);
508 }
509
CreateInitDoneCB(PipelineStatus expected_status)510 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
511 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
512 base::Unretained(this),
513 expected_status);
514 }
515
516 enum StreamFlags {
517 HAS_AUDIO = 1 << 0,
518 HAS_VIDEO = 1 << 1,
519 HAS_TEXT = 1 << 2
520 };
521
InitDemuxer(int stream_flags)522 bool InitDemuxer(int stream_flags) {
523 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
524 }
525
InitDemuxerWithEncryptionInfo(int stream_flags,bool is_audio_encrypted,bool is_video_encrypted)526 bool InitDemuxerWithEncryptionInfo(
527 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
528
529 PipelineStatus expected_status =
530 (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
531
532 base::TimeDelta expected_duration = kNoTimestamp();
533 if (expected_status == PIPELINE_OK)
534 expected_duration = kDefaultDuration();
535
536 EXPECT_CALL(*this, DemuxerOpened());
537 demuxer_->Initialize(
538 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
539
540 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
541 return false;
542
543 AppendInitSegmentWithEncryptedInfo(
544 kSourceId, stream_flags,
545 is_audio_encrypted, is_video_encrypted);
546 return true;
547 }
548
InitDemuxerAudioAndVideoSourcesText(const std::string & audio_id,const std::string & video_id,bool has_text)549 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
550 const std::string& video_id,
551 bool has_text) {
552 EXPECT_CALL(*this, DemuxerOpened());
553 demuxer_->Initialize(
554 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
555
556 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
557 return false;
558 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
559 return false;
560
561 int audio_flags = HAS_AUDIO;
562 int video_flags = HAS_VIDEO;
563
564 if (has_text) {
565 audio_flags |= HAS_TEXT;
566 video_flags |= HAS_TEXT;
567 }
568
569 AppendInitSegmentWithSourceId(audio_id, audio_flags);
570 AppendInitSegmentWithSourceId(video_id, video_flags);
571 return true;
572 }
573
InitDemuxerAudioAndVideoSources(const std::string & audio_id,const std::string & video_id)574 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
575 const std::string& video_id) {
576 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
577 }
578
579 // Initializes the demuxer with data from 2 files with different
580 // decoder configurations. This is used to test the decoder config change
581 // logic.
582 //
583 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
584 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
585 // The resulting video stream returns data from each file for the following
586 // time ranges.
587 // bear-320x240.webm : [0-501) [801-2736)
588 // bear-640x360.webm : [527-793)
589 //
590 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
591 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
592 // The resulting audio stream returns data from each file for the following
593 // time ranges.
594 // bear-320x240.webm : [0-524) [779-2736)
595 // bear-640x360.webm : [527-759)
InitDemuxerWithConfigChangeData()596 bool InitDemuxerWithConfigChangeData() {
597 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
598 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
599
600 EXPECT_CALL(*this, DemuxerOpened());
601
602 demuxer_->Initialize(
603 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
604 PIPELINE_OK), true);
605
606 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
607 return false;
608
609 // Append the whole bear1 file.
610 // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
611 // the files are fixed to have the correct duration in their init segments,
612 // and the CreateInitDoneCB() call, above, is fixed to used that duration.
613 // See http://crbug.com/354284.
614 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
615 AppendData(bear1->data(), bear1->data_size());
616 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
617 // seen so far for audio track).
618 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
619 // DefaultDuration for video track).
620 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
621
622 // Append initialization segment for bear2.
623 // Note: Offsets here and below are derived from
624 // media/test/data/bear-640x360-manifest.js and
625 // media/test/data/bear-320x240-manifest.js which were
626 // generated from media/test/data/bear-640x360.webm and
627 // media/test/data/bear-320x240.webm respectively.
628 AppendData(bear2->data(), 4340);
629
630 // Append a media segment that goes from [0.527000, 1.014000).
631 AppendData(bear2->data() + 55290, 18785);
632 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
633
634 // Append initialization segment for bear1 & fill gap with [779-1197)
635 // segment.
636 AppendData(bear1->data(), 4370);
637 AppendData(bear1->data() + 72737, 28183);
638 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
639
640 MarkEndOfStream(PIPELINE_OK);
641 return true;
642 }
643
ShutdownDemuxer()644 void ShutdownDemuxer() {
645 if (demuxer_) {
646 demuxer_->Shutdown();
647 message_loop_.RunUntilIdle();
648 }
649 }
650
AddSimpleBlock(ClusterBuilder * cb,int track_num,int64 timecode)651 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
652 uint8 data[] = { 0x00 };
653 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
654 }
655
GenerateCluster(int timecode,int block_count)656 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
657 return GenerateCluster(timecode, timecode, block_count);
658 }
659
AddVideoBlockGroup(ClusterBuilder * cb,int track_num,int64 timecode,int duration,int flags)660 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
661 int duration, int flags) {
662 const uint8* data =
663 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
664 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
665 sizeof(kVP8Interframe);
666 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
667 }
668
GenerateCluster(int first_audio_timecode,int first_video_timecode,int block_count)669 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
670 int first_video_timecode,
671 int block_count) {
672 return GenerateCluster(first_audio_timecode, first_video_timecode,
673 block_count, false);
674 }
GenerateCluster(int first_audio_timecode,int first_video_timecode,int block_count,bool unknown_size)675 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
676 int first_video_timecode,
677 int block_count,
678 bool unknown_size) {
679 CHECK_GT(block_count, 0);
680
681 int size = 10;
682 scoped_ptr<uint8[]> data(new uint8[size]);
683
684 ClusterBuilder cb;
685 cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
686
687 if (block_count == 1) {
688 cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
689 kAudioBlockDuration, kWebMFlagKeyframe,
690 data.get(), size);
691 return cb.Finish();
692 }
693
694 int audio_timecode = first_audio_timecode;
695 int video_timecode = first_video_timecode;
696
697 // Create simple blocks for everything except the last 2 blocks.
698 // The first video frame must be a keyframe.
699 uint8 video_flag = kWebMFlagKeyframe;
700 for (int i = 0; i < block_count - 2; i++) {
701 if (audio_timecode <= video_timecode) {
702 cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
703 data.get(), size);
704 audio_timecode += kAudioBlockDuration;
705 continue;
706 }
707
708 cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
709 size);
710 video_timecode += kVideoBlockDuration;
711 video_flag = 0;
712 }
713
714 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
715 // block duration calculation logic.
716 if (audio_timecode <= video_timecode) {
717 cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
718 kWebMFlagKeyframe, data.get(), size);
719 AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
720 kVideoBlockDuration, video_flag);
721 } else {
722 AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
723 kVideoBlockDuration, video_flag);
724 cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
725 kWebMFlagKeyframe, data.get(), size);
726 }
727
728 return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
729 }
730
GenerateSingleStreamCluster(int timecode,int end_timecode,int track_number,int block_duration)731 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
732 int end_timecode,
733 int track_number,
734 int block_duration) {
735 CHECK_GT(end_timecode, timecode);
736
737 std::vector<uint8> data(kBlockSize);
738
739 ClusterBuilder cb;
740 cb.SetClusterTimecode(timecode);
741
742 // Create simple blocks for everything except the last block.
743 while (timecode < (end_timecode - block_duration)) {
744 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
745 &data[0], data.size());
746 timecode += block_duration;
747 }
748
749 if (track_number == kVideoTrackNum) {
750 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
751 kWebMFlagKeyframe);
752 } else {
753 cb.AddBlockGroup(track_number, timecode, block_duration,
754 kWebMFlagKeyframe, &data[0], data.size());
755 }
756
757 return cb.Finish();
758 }
759
Read(DemuxerStream::Type type,const DemuxerStream::ReadCB & read_cb)760 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
761 demuxer_->GetStream(type)->Read(read_cb);
762 message_loop_.RunUntilIdle();
763 }
764
ReadAudio(const DemuxerStream::ReadCB & read_cb)765 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
766 Read(DemuxerStream::AUDIO, read_cb);
767 }
768
ReadVideo(const DemuxerStream::ReadCB & read_cb)769 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
770 Read(DemuxerStream::VIDEO, read_cb);
771 }
772
GenerateExpectedReads(int timecode,int block_count)773 void GenerateExpectedReads(int timecode, int block_count) {
774 GenerateExpectedReads(timecode, timecode, block_count);
775 }
776
GenerateExpectedReads(int start_audio_timecode,int start_video_timecode,int block_count)777 void GenerateExpectedReads(int start_audio_timecode,
778 int start_video_timecode,
779 int block_count) {
780 CHECK_GT(block_count, 0);
781
782 if (block_count == 1) {
783 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
784 return;
785 }
786
787 int audio_timecode = start_audio_timecode;
788 int video_timecode = start_video_timecode;
789
790 for (int i = 0; i < block_count; i++) {
791 if (audio_timecode <= video_timecode) {
792 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
793 audio_timecode += kAudioBlockDuration;
794 continue;
795 }
796
797 ExpectRead(DemuxerStream::VIDEO, video_timecode);
798 video_timecode += kVideoBlockDuration;
799 }
800 }
801
GenerateSingleStreamExpectedReads(int timecode,int block_count,DemuxerStream::Type type,int block_duration)802 void GenerateSingleStreamExpectedReads(int timecode,
803 int block_count,
804 DemuxerStream::Type type,
805 int block_duration) {
806 CHECK_GT(block_count, 0);
807 int stream_timecode = timecode;
808
809 for (int i = 0; i < block_count; i++) {
810 ExpectRead(type, stream_timecode);
811 stream_timecode += block_duration;
812 }
813 }
814
GenerateAudioStreamExpectedReads(int timecode,int block_count)815 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
816 GenerateSingleStreamExpectedReads(
817 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
818 }
819
GenerateVideoStreamExpectedReads(int timecode,int block_count)820 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
821 GenerateSingleStreamExpectedReads(
822 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
823 }
824
GenerateEmptyCluster(int timecode)825 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
826 ClusterBuilder cb;
827 cb.SetClusterTimecode(timecode);
828 return cb.Finish();
829 }
830
CheckExpectedRanges(const std::string & expected)831 void CheckExpectedRanges(const std::string& expected) {
832 CheckExpectedRanges(kSourceId, expected);
833 }
834
CheckExpectedRanges(const std::string & id,const std::string & expected)835 void CheckExpectedRanges(const std::string& id,
836 const std::string& expected) {
837 Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
838
839 std::stringstream ss;
840 ss << "{ ";
841 for (size_t i = 0; i < r.size(); ++i) {
842 ss << "[" << r.start(i).InMilliseconds() << ","
843 << r.end(i).InMilliseconds() << ") ";
844 }
845 ss << "}";
846 EXPECT_EQ(expected, ss.str());
847 }
848
849 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
850 const scoped_refptr<DecoderBuffer>&));
851
StoreStatusAndBuffer(DemuxerStream::Status * status_out,scoped_refptr<DecoderBuffer> * buffer_out,DemuxerStream::Status status,const scoped_refptr<DecoderBuffer> & buffer)852 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
853 scoped_refptr<DecoderBuffer>* buffer_out,
854 DemuxerStream::Status status,
855 const scoped_refptr<DecoderBuffer>& buffer) {
856 *status_out = status;
857 *buffer_out = buffer;
858 }
859
ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,DemuxerStream::Status * status,base::TimeDelta * last_timestamp)860 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
861 DemuxerStream::Status* status,
862 base::TimeDelta* last_timestamp) {
863 DemuxerStream* stream = demuxer_->GetStream(type);
864 scoped_refptr<DecoderBuffer> buffer;
865
866 *last_timestamp = kNoTimestamp();
867 do {
868 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
869 base::Unretained(this), status, &buffer));
870 base::MessageLoop::current()->RunUntilIdle();
871 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
872 *last_timestamp = buffer->timestamp();
873 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
874 }
875
ExpectEndOfStream(DemuxerStream::Type type)876 void ExpectEndOfStream(DemuxerStream::Type type) {
877 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
878 demuxer_->GetStream(type)->Read(base::Bind(
879 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
880 message_loop_.RunUntilIdle();
881 }
882
ExpectRead(DemuxerStream::Type type,int64 timestamp_in_ms)883 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
884 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
885 HasTimestamp(timestamp_in_ms)));
886 demuxer_->GetStream(type)->Read(base::Bind(
887 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
888 message_loop_.RunUntilIdle();
889 }
890
ExpectConfigChanged(DemuxerStream::Type type)891 void ExpectConfigChanged(DemuxerStream::Type type) {
892 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
893 demuxer_->GetStream(type)->Read(base::Bind(
894 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
895 message_loop_.RunUntilIdle();
896 }
897
CheckExpectedBuffers(DemuxerStream * stream,const std::string & expected)898 void CheckExpectedBuffers(DemuxerStream* stream,
899 const std::string& expected) {
900 std::vector<std::string> timestamps;
901 base::SplitString(expected, ' ', ×tamps);
902 std::stringstream ss;
903 for (size_t i = 0; i < timestamps.size(); ++i) {
904 // Initialize status to kAborted since it's possible for Read() to return
905 // without calling StoreStatusAndBuffer() if it doesn't have any buffers
906 // left to return.
907 DemuxerStream::Status status = DemuxerStream::kAborted;
908 scoped_refptr<DecoderBuffer> buffer;
909 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
910 base::Unretained(this), &status, &buffer));
911 base::MessageLoop::current()->RunUntilIdle();
912 if (status != DemuxerStream::kOk || buffer->end_of_stream())
913 break;
914
915 if (i > 0)
916 ss << " ";
917 ss << buffer->timestamp().InMilliseconds();
918
919 // Handle preroll buffers.
920 if (EndsWith(timestamps[i], "P", true)) {
921 ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
922 ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
923 ss << "P";
924 }
925 }
926 EXPECT_EQ(expected, ss.str());
927 }
928
929 MOCK_METHOD1(Checkpoint, void(int id));
930
931 struct BufferTimestamps {
932 int video_time_ms;
933 int audio_time_ms;
934 };
935 static const int kSkip = -1;
936
937 // Test parsing a WebM file.
938 // |filename| - The name of the file in media/test/data to parse.
939 // |timestamps| - The expected timestamps on the parsed buffers.
940 // a timestamp of kSkip indicates that a Read() call for that stream
941 // shouldn't be made on that iteration of the loop. If both streams have
942 // a kSkip then the loop will terminate.
ParseWebMFile(const std::string & filename,const BufferTimestamps * timestamps,const base::TimeDelta & duration)943 bool ParseWebMFile(const std::string& filename,
944 const BufferTimestamps* timestamps,
945 const base::TimeDelta& duration) {
946 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
947 }
948
ParseWebMFile(const std::string & filename,const BufferTimestamps * timestamps,const base::TimeDelta & duration,int stream_flags)949 bool ParseWebMFile(const std::string& filename,
950 const BufferTimestamps* timestamps,
951 const base::TimeDelta& duration,
952 int stream_flags) {
953 EXPECT_CALL(*this, DemuxerOpened());
954 demuxer_->Initialize(
955 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
956
957 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
958 return false;
959
960 // Read a WebM file into memory and send the data to the demuxer.
961 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
962 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
963
964 // Verify that the timestamps on the first few packets match what we
965 // expect.
966 for (size_t i = 0;
967 (timestamps[i].audio_time_ms != kSkip ||
968 timestamps[i].video_time_ms != kSkip);
969 i++) {
970 bool audio_read_done = false;
971 bool video_read_done = false;
972
973 if (timestamps[i].audio_time_ms != kSkip) {
974 ReadAudio(base::Bind(&OnReadDone,
975 base::TimeDelta::FromMilliseconds(
976 timestamps[i].audio_time_ms),
977 &audio_read_done));
978 EXPECT_TRUE(audio_read_done);
979 }
980
981 if (timestamps[i].video_time_ms != kSkip) {
982 ReadVideo(base::Bind(&OnReadDone,
983 base::TimeDelta::FromMilliseconds(
984 timestamps[i].video_time_ms),
985 &video_read_done));
986 EXPECT_TRUE(video_read_done);
987 }
988 }
989
990 return true;
991 }
992
993 MOCK_METHOD0(DemuxerOpened, void());
994 // TODO(xhwang): This is a workaround of the issue that move-only parameters
995 // are not supported in mocked methods. Remove this when the issue is fixed
996 // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
997 // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
998 MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
999 const uint8* init_data, int init_data_size));
DemuxerNeedKey(const std::string & type,const std::vector<uint8> & init_data)1000 void DemuxerNeedKey(const std::string& type,
1001 const std::vector<uint8>& init_data) {
1002 const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
1003 NeedKeyMock(type, init_data_ptr, init_data.size());
1004 }
1005
Seek(base::TimeDelta seek_time)1006 void Seek(base::TimeDelta seek_time) {
1007 demuxer_->StartWaitingForSeek(seek_time);
1008 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1009 message_loop_.RunUntilIdle();
1010 }
1011
MarkEndOfStream(PipelineStatus status)1012 void MarkEndOfStream(PipelineStatus status) {
1013 demuxer_->MarkEndOfStream(status);
1014 message_loop_.RunUntilIdle();
1015 }
1016
SetTimestampOffset(const std::string & id,base::TimeDelta timestamp_offset)1017 bool SetTimestampOffset(const std::string& id,
1018 base::TimeDelta timestamp_offset) {
1019 if (demuxer_->IsParsingMediaSegment(id))
1020 return false;
1021
1022 timestamp_offset_map_[id] = timestamp_offset;
1023 return true;
1024 }
1025
1026 base::MessageLoop message_loop_;
1027 MockDemuxerHost host_;
1028
1029 scoped_ptr<ChunkDemuxer> demuxer_;
1030
1031 base::TimeDelta append_window_start_for_next_append_;
1032 base::TimeDelta append_window_end_for_next_append_;
1033
1034 // Map of source id to timestamp offset to use for the next AppendData()
1035 // operation for that source id.
1036 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1037
1038 private:
1039 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1040 };
1041
TEST_F(ChunkDemuxerTest,Init)1042 TEST_F(ChunkDemuxerTest, Init) {
1043 // Test no streams, audio-only, video-only, and audio & video scenarios.
1044 // Audio and video streams can be encrypted or not encrypted.
1045 for (int i = 0; i < 16; i++) {
1046 bool has_audio = (i & 0x1) != 0;
1047 bool has_video = (i & 0x2) != 0;
1048 bool is_audio_encrypted = (i & 0x4) != 0;
1049 bool is_video_encrypted = (i & 0x8) != 0;
1050
1051 // No test on invalid combination.
1052 if ((!has_audio && is_audio_encrypted) ||
1053 (!has_video && is_video_encrypted)) {
1054 continue;
1055 }
1056
1057 CreateNewDemuxer();
1058
1059 if (is_audio_encrypted || is_video_encrypted) {
1060 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1061 (is_video_encrypted ? 1 : 0);
1062 EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
1063 DecryptConfig::kDecryptionKeySize))
1064 .Times(Exactly(need_key_count));
1065 }
1066
1067 int stream_flags = 0;
1068 if (has_audio)
1069 stream_flags |= HAS_AUDIO;
1070
1071 if (has_video)
1072 stream_flags |= HAS_VIDEO;
1073
1074 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1075 stream_flags, is_audio_encrypted, is_video_encrypted));
1076
1077 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1078 if (has_audio) {
1079 ASSERT_TRUE(audio_stream);
1080
1081 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1082 EXPECT_EQ(kCodecVorbis, config.codec());
1083 EXPECT_EQ(32, config.bits_per_channel());
1084 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1085 EXPECT_EQ(44100, config.samples_per_second());
1086 EXPECT_TRUE(config.extra_data());
1087 EXPECT_GT(config.extra_data_size(), 0u);
1088 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1089 EXPECT_EQ(is_audio_encrypted,
1090 audio_stream->audio_decoder_config().is_encrypted());
1091 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1092 ->supports_partial_append_window_trimming());
1093 } else {
1094 EXPECT_FALSE(audio_stream);
1095 }
1096
1097 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1098 if (has_video) {
1099 EXPECT_TRUE(video_stream);
1100 EXPECT_EQ(is_video_encrypted,
1101 video_stream->video_decoder_config().is_encrypted());
1102 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1103 ->supports_partial_append_window_trimming());
1104 } else {
1105 EXPECT_FALSE(video_stream);
1106 }
1107
1108 ShutdownDemuxer();
1109 demuxer_.reset();
1110 }
1111 }
1112
1113 // TODO(acolwell): Fold this test into Init tests since the tests are
1114 // almost identical.
TEST_F(ChunkDemuxerTest,InitText)1115 TEST_F(ChunkDemuxerTest, InitText) {
1116 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1117 // No encryption cases handled here.
1118 bool has_video = true;
1119 bool is_audio_encrypted = false;
1120 bool is_video_encrypted = false;
1121 for (int i = 0; i < 2; i++) {
1122 bool has_audio = (i & 0x1) != 0;
1123
1124 CreateNewDemuxer();
1125
1126 DemuxerStream* text_stream = NULL;
1127 TextTrackConfig text_config;
1128 EXPECT_CALL(host_, AddTextStream(_, _))
1129 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1130 SaveArg<1>(&text_config)));
1131
1132 int stream_flags = HAS_TEXT;
1133 if (has_audio)
1134 stream_flags |= HAS_AUDIO;
1135
1136 if (has_video)
1137 stream_flags |= HAS_VIDEO;
1138
1139 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1140 stream_flags, is_audio_encrypted, is_video_encrypted));
1141 ASSERT_TRUE(text_stream);
1142 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1143 EXPECT_EQ(kTextSubtitles, text_config.kind());
1144 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1145 ->supports_partial_append_window_trimming());
1146
1147 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1148 if (has_audio) {
1149 ASSERT_TRUE(audio_stream);
1150
1151 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1152 EXPECT_EQ(kCodecVorbis, config.codec());
1153 EXPECT_EQ(32, config.bits_per_channel());
1154 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1155 EXPECT_EQ(44100, config.samples_per_second());
1156 EXPECT_TRUE(config.extra_data());
1157 EXPECT_GT(config.extra_data_size(), 0u);
1158 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1159 EXPECT_EQ(is_audio_encrypted,
1160 audio_stream->audio_decoder_config().is_encrypted());
1161 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1162 ->supports_partial_append_window_trimming());
1163 } else {
1164 EXPECT_FALSE(audio_stream);
1165 }
1166
1167 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1168 if (has_video) {
1169 EXPECT_TRUE(video_stream);
1170 EXPECT_EQ(is_video_encrypted,
1171 video_stream->video_decoder_config().is_encrypted());
1172 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1173 ->supports_partial_append_window_trimming());
1174 } else {
1175 EXPECT_FALSE(video_stream);
1176 }
1177
1178 ShutdownDemuxer();
1179 demuxer_.reset();
1180 }
1181 }
1182
TEST_F(ChunkDemuxerTest,SingleTextTrackIdChange)1183 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1184 // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1185 // segment in which the text track ID changes. Verify appended buffers before
1186 // and after the second init segment map to the same underlying track buffers.
1187 CreateNewDemuxer();
1188 DemuxerStream* text_stream = NULL;
1189 TextTrackConfig text_config;
1190 EXPECT_CALL(host_, AddTextStream(_, _))
1191 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1192 SaveArg<1>(&text_config)));
1193 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1194 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1195 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1196 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1197 ASSERT_TRUE(audio_stream);
1198 ASSERT_TRUE(video_stream);
1199 ASSERT_TRUE(text_stream);
1200
1201 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
1202 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 30");
1203 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "10K");
1204 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1205
1206 scoped_ptr<uint8[]> info_tracks;
1207 int info_tracks_size = 0;
1208 CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1209 false, false,
1210 &info_tracks, &info_tracks_size);
1211 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1212 append_window_start_for_next_append_,
1213 append_window_end_for_next_append_,
1214 ×tamp_offset_map_[kSourceId]);
1215
1216 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46K 69K");
1217 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60K");
1218 AppendSingleStreamCluster(kSourceId, kAlternateTextTrackNum, "45K");
1219
1220 CheckExpectedRanges(kSourceId, "{ [0,92) }");
1221 CheckExpectedBuffers(audio_stream, "0 23 46 69");
1222 CheckExpectedBuffers(video_stream, "0 30 60");
1223 CheckExpectedBuffers(text_stream, "10 45");
1224
1225 ShutdownDemuxer();
1226 }
1227
TEST_F(ChunkDemuxerTest,InitSegmentSetsNeedRandomAccessPointFlag)1228 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1229 // Tests that non-keyframes following an init segment are allowed
1230 // and dropped, as expected if the initialization segment received
1231 // algorithm correctly sets the needs random access point flag to true for all
1232 // track buffers. Note that the first initialization segment is insufficient
1233 // to fully test this since needs random access point flag initializes to
1234 // true.
1235 CreateNewDemuxer();
1236 DemuxerStream* text_stream = NULL;
1237 EXPECT_CALL(host_, AddTextStream(_, _))
1238 .WillOnce(SaveArg<0>(&text_stream));
1239 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1240 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1241 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1242 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1243 ASSERT_TRUE(audio_stream && video_stream && text_stream);
1244
1245 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0 23K");
1246 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0 30K");
1247 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0 40K");
1248 CheckExpectedRanges(kSourceId, "{ [30,46) }");
1249
1250 AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1251 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46 69K");
1252 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60 90K");
1253 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "80 90K");
1254 CheckExpectedRanges(kSourceId, "{ [30,92) }");
1255
1256 CheckExpectedBuffers(audio_stream, "23 69");
1257 CheckExpectedBuffers(video_stream, "30 90");
1258
1259 // WebM parser marks all text buffers as keyframes.
1260 CheckExpectedBuffers(text_stream, "0 40 80 90");
1261 }
1262
1263 // Make sure that the demuxer reports an error if Shutdown()
1264 // is called before all the initialization segments are appended.
TEST_F(ChunkDemuxerTest,Shutdown_BeforeAllInitSegmentsAppended)1265 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1266 EXPECT_CALL(*this, DemuxerOpened());
1267 demuxer_->Initialize(
1268 &host_, CreateInitDoneCB(
1269 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1270
1271 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1272 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1273
1274 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1275
1276 ShutdownDemuxer();
1277 }
1278
TEST_F(ChunkDemuxerTest,Shutdown_BeforeAllInitSegmentsAppendedText)1279 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1280 EXPECT_CALL(*this, DemuxerOpened());
1281 demuxer_->Initialize(
1282 &host_, CreateInitDoneCB(
1283 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1284
1285 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1286 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1287
1288 EXPECT_CALL(host_, AddTextStream(_, _))
1289 .Times(Exactly(1));
1290
1291 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1292
1293 ShutdownDemuxer();
1294 }
1295
1296 // Verifies that all streams waiting for data receive an end of stream
1297 // buffer when Shutdown() is called.
TEST_F(ChunkDemuxerTest,Shutdown_EndOfStreamWhileWaitingForData)1298 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1299 DemuxerStream* text_stream = NULL;
1300 EXPECT_CALL(host_, AddTextStream(_, _))
1301 .WillOnce(SaveArg<0>(&text_stream));
1302 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1303
1304 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1305 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1306
1307 bool audio_read_done = false;
1308 bool video_read_done = false;
1309 bool text_read_done = false;
1310 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1311 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1312 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1313 message_loop_.RunUntilIdle();
1314
1315 EXPECT_FALSE(audio_read_done);
1316 EXPECT_FALSE(video_read_done);
1317 EXPECT_FALSE(text_read_done);
1318
1319 ShutdownDemuxer();
1320
1321 EXPECT_TRUE(audio_read_done);
1322 EXPECT_TRUE(video_read_done);
1323 EXPECT_TRUE(text_read_done);
1324 }
1325
1326 // Test that Seek() completes successfully when the first cluster
1327 // arrives.
TEST_F(ChunkDemuxerTest,AppendDataAfterSeek)1328 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1329 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1330 AppendCluster(kDefaultFirstCluster());
1331
1332 InSequence s;
1333
1334 EXPECT_CALL(*this, Checkpoint(1));
1335
1336 Seek(base::TimeDelta::FromMilliseconds(46));
1337
1338 EXPECT_CALL(*this, Checkpoint(2));
1339
1340 Checkpoint(1);
1341
1342 AppendCluster(kDefaultSecondCluster());
1343
1344 message_loop_.RunUntilIdle();
1345
1346 Checkpoint(2);
1347 }
1348
1349 // Test that parsing errors are handled for clusters appended after init.
TEST_F(ChunkDemuxerTest,ErrorWhileParsingClusterAfterInit)1350 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1351 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1352 AppendCluster(kDefaultFirstCluster());
1353
1354 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1355 AppendGarbage();
1356 }
1357
1358 // Test the case where a Seek() is requested while the parser
1359 // is in the middle of cluster. This is to verify that the parser
1360 // does not reset itself on a seek.
TEST_F(ChunkDemuxerTest,SeekWhileParsingCluster)1361 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1362 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1363
1364 InSequence s;
1365
1366 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1367
1368 // Split the cluster into two appends at an arbitrary point near the end.
1369 int first_append_size = cluster_a->size() - 11;
1370 int second_append_size = cluster_a->size() - first_append_size;
1371
1372 // Append the first part of the cluster.
1373 AppendData(cluster_a->data(), first_append_size);
1374
1375 ExpectRead(DemuxerStream::AUDIO, 0);
1376 ExpectRead(DemuxerStream::VIDEO, 0);
1377 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1378
1379 Seek(base::TimeDelta::FromSeconds(5));
1380
1381 // Append the rest of the cluster.
1382 AppendData(cluster_a->data() + first_append_size, second_append_size);
1383
1384 // Append the new cluster and verify that only the blocks
1385 // in the new cluster are returned.
1386 AppendCluster(GenerateCluster(5000, 6));
1387 GenerateExpectedReads(5000, 6);
1388 }
1389
1390 // Test the case where AppendData() is called before Init().
TEST_F(ChunkDemuxerTest,AppendDataBeforeInit)1391 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1392 scoped_ptr<uint8[]> info_tracks;
1393 int info_tracks_size = 0;
1394 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1395 false, false, &info_tracks, &info_tracks_size);
1396 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1397 append_window_start_for_next_append_,
1398 append_window_end_for_next_append_,
1399 ×tamp_offset_map_[kSourceId]);
1400 }
1401
1402 // Make sure Read() callbacks are dispatched with the proper data.
TEST_F(ChunkDemuxerTest,Read)1403 TEST_F(ChunkDemuxerTest, Read) {
1404 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1405
1406 AppendCluster(kDefaultFirstCluster());
1407
1408 bool audio_read_done = false;
1409 bool video_read_done = false;
1410 ReadAudio(base::Bind(&OnReadDone,
1411 base::TimeDelta::FromMilliseconds(0),
1412 &audio_read_done));
1413 ReadVideo(base::Bind(&OnReadDone,
1414 base::TimeDelta::FromMilliseconds(0),
1415 &video_read_done));
1416
1417 EXPECT_TRUE(audio_read_done);
1418 EXPECT_TRUE(video_read_done);
1419 }
1420
TEST_F(ChunkDemuxerTest,OutOfOrderClusters)1421 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1422 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1423 AppendCluster(kDefaultFirstCluster());
1424 AppendCluster(GenerateCluster(10, 4));
1425
1426 // Make sure that AppendCluster() does not fail with a cluster that has
1427 // overlaps with the previously appended cluster.
1428 AppendCluster(GenerateCluster(5, 4));
1429
1430 // Verify that AppendData() can still accept more data.
1431 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1432 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1433 append_window_start_for_next_append_,
1434 append_window_end_for_next_append_,
1435 ×tamp_offset_map_[kSourceId]);
1436 }
1437
TEST_F(ChunkDemuxerTest,NonMonotonicButAboveClusterTimecode)1438 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1439 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1440 AppendCluster(kDefaultFirstCluster());
1441
1442 ClusterBuilder cb;
1443
1444 // Test the case where block timecodes are not monotonically
1445 // increasing but stay above the cluster timecode.
1446 cb.SetClusterTimecode(5);
1447 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1448 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1449 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1450 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1451
1452 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1453 AppendCluster(cb.Finish());
1454
1455 // Verify that AppendData() ignores data after the error.
1456 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1457 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1458 append_window_start_for_next_append_,
1459 append_window_end_for_next_append_,
1460 ×tamp_offset_map_[kSourceId]);
1461 }
1462
TEST_F(ChunkDemuxerTest,BackwardsAndBeforeClusterTimecode)1463 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1464 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1465 AppendCluster(kDefaultFirstCluster());
1466
1467 ClusterBuilder cb;
1468
1469 // Test timecodes going backwards and including values less than the cluster
1470 // timecode.
1471 cb.SetClusterTimecode(5);
1472 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1473 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1474 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1475 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1476
1477 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1478 AppendCluster(cb.Finish());
1479
1480 // Verify that AppendData() ignores data after the error.
1481 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1482 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1483 append_window_start_for_next_append_,
1484 append_window_end_for_next_append_,
1485 ×tamp_offset_map_[kSourceId]);
1486 }
1487
1488
TEST_F(ChunkDemuxerTest,PerStreamMonotonicallyIncreasingTimestamps)1489 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1490 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1491 AppendCluster(kDefaultFirstCluster());
1492
1493 ClusterBuilder cb;
1494
1495 // Test monotonic increasing timestamps on a per stream
1496 // basis.
1497 cb.SetClusterTimecode(5);
1498 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1499 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1500 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1501 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1502
1503 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1504 AppendCluster(cb.Finish());
1505 }
1506
1507 // Test the case where a cluster is passed to AppendCluster() before
1508 // INFO & TRACKS data.
TEST_F(ChunkDemuxerTest,ClusterBeforeInitSegment)1509 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1510 EXPECT_CALL(*this, DemuxerOpened());
1511 demuxer_->Initialize(
1512 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1513
1514 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1515
1516 AppendCluster(GenerateCluster(0, 1));
1517 }
1518
1519 // Test cases where we get an MarkEndOfStream() call during initialization.
TEST_F(ChunkDemuxerTest,EOSDuringInit)1520 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1521 EXPECT_CALL(*this, DemuxerOpened());
1522 demuxer_->Initialize(
1523 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1524 MarkEndOfStream(PIPELINE_OK);
1525 }
1526
TEST_F(ChunkDemuxerTest,EndOfStreamWithNoAppend)1527 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1528 EXPECT_CALL(*this, DemuxerOpened());
1529 demuxer_->Initialize(
1530 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1531
1532 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1533
1534 CheckExpectedRanges("{ }");
1535 MarkEndOfStream(PIPELINE_OK);
1536 ShutdownDemuxer();
1537 CheckExpectedRanges("{ }");
1538 demuxer_->RemoveId(kSourceId);
1539 demuxer_.reset();
1540 }
1541
TEST_F(ChunkDemuxerTest,EndOfStreamWithNoMediaAppend)1542 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1543 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1544
1545 CheckExpectedRanges("{ }");
1546 MarkEndOfStream(PIPELINE_OK);
1547 CheckExpectedRanges("{ }");
1548 }
1549
TEST_F(ChunkDemuxerTest,DecodeErrorEndOfStream)1550 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1551 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1552
1553 AppendCluster(kDefaultFirstCluster());
1554 CheckExpectedRanges(kDefaultFirstClusterRange);
1555
1556 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1557 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1558 CheckExpectedRanges(kDefaultFirstClusterRange);
1559 }
1560
TEST_F(ChunkDemuxerTest,NetworkErrorEndOfStream)1561 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1562 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1563
1564 AppendCluster(kDefaultFirstCluster());
1565 CheckExpectedRanges(kDefaultFirstClusterRange);
1566
1567 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1568 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1569 }
1570
1571 // Helper class to reduce duplicate code when testing end of stream
1572 // Read() behavior.
1573 class EndOfStreamHelper {
1574 public:
EndOfStreamHelper(Demuxer * demuxer)1575 explicit EndOfStreamHelper(Demuxer* demuxer)
1576 : demuxer_(demuxer),
1577 audio_read_done_(false),
1578 video_read_done_(false) {
1579 }
1580
1581 // Request a read on the audio and video streams.
RequestReads()1582 void RequestReads() {
1583 EXPECT_FALSE(audio_read_done_);
1584 EXPECT_FALSE(video_read_done_);
1585
1586 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1587 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1588
1589 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1590 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1591 base::MessageLoop::current()->RunUntilIdle();
1592 }
1593
1594 // Check to see if |audio_read_done_| and |video_read_done_| variables
1595 // match |expected|.
CheckIfReadDonesWereCalled(bool expected)1596 void CheckIfReadDonesWereCalled(bool expected) {
1597 base::MessageLoop::current()->RunUntilIdle();
1598 EXPECT_EQ(expected, audio_read_done_);
1599 EXPECT_EQ(expected, video_read_done_);
1600 }
1601
1602 private:
OnEndOfStreamReadDone(bool * called,DemuxerStream::Status status,const scoped_refptr<DecoderBuffer> & buffer)1603 static void OnEndOfStreamReadDone(
1604 bool* called,
1605 DemuxerStream::Status status,
1606 const scoped_refptr<DecoderBuffer>& buffer) {
1607 EXPECT_EQ(status, DemuxerStream::kOk);
1608 EXPECT_TRUE(buffer->end_of_stream());
1609 *called = true;
1610 }
1611
1612 Demuxer* demuxer_;
1613 bool audio_read_done_;
1614 bool video_read_done_;
1615
1616 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1617 };
1618
1619 // Make sure that all pending reads that we don't have media data for get an
1620 // "end of stream" buffer when MarkEndOfStream() is called.
TEST_F(ChunkDemuxerTest,EndOfStreamWithPendingReads)1621 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1622 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1623
1624 AppendCluster(GenerateCluster(0, 2));
1625
1626 bool audio_read_done_1 = false;
1627 bool video_read_done_1 = false;
1628 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1629 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1630
1631 ReadAudio(base::Bind(&OnReadDone,
1632 base::TimeDelta::FromMilliseconds(0),
1633 &audio_read_done_1));
1634 ReadVideo(base::Bind(&OnReadDone,
1635 base::TimeDelta::FromMilliseconds(0),
1636 &video_read_done_1));
1637 message_loop_.RunUntilIdle();
1638
1639 EXPECT_TRUE(audio_read_done_1);
1640 EXPECT_TRUE(video_read_done_1);
1641
1642 end_of_stream_helper_1.RequestReads();
1643
1644 EXPECT_CALL(host_, SetDuration(
1645 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1646 MarkEndOfStream(PIPELINE_OK);
1647
1648 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1649
1650 end_of_stream_helper_2.RequestReads();
1651 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1652 }
1653
1654 // Make sure that all Read() calls after we get an MarkEndOfStream()
1655 // call return an "end of stream" buffer.
TEST_F(ChunkDemuxerTest,ReadsAfterEndOfStream)1656 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1657 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1658
1659 AppendCluster(GenerateCluster(0, 2));
1660
1661 bool audio_read_done_1 = false;
1662 bool video_read_done_1 = false;
1663 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1664 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1665 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1666
1667 ReadAudio(base::Bind(&OnReadDone,
1668 base::TimeDelta::FromMilliseconds(0),
1669 &audio_read_done_1));
1670 ReadVideo(base::Bind(&OnReadDone,
1671 base::TimeDelta::FromMilliseconds(0),
1672 &video_read_done_1));
1673
1674 end_of_stream_helper_1.RequestReads();
1675
1676 EXPECT_TRUE(audio_read_done_1);
1677 EXPECT_TRUE(video_read_done_1);
1678 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1679
1680 EXPECT_CALL(host_, SetDuration(
1681 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1682 MarkEndOfStream(PIPELINE_OK);
1683
1684 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1685
1686 // Request a few more reads and make sure we immediately get
1687 // end of stream buffers.
1688 end_of_stream_helper_2.RequestReads();
1689 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1690
1691 end_of_stream_helper_3.RequestReads();
1692 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1693 }
1694
TEST_F(ChunkDemuxerTest,EndOfStreamDuringCanceledSeek)1695 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1696 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1697
1698 AppendCluster(0, 10);
1699 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1700 MarkEndOfStream(PIPELINE_OK);
1701
1702 // Start the first seek.
1703 Seek(base::TimeDelta::FromMilliseconds(20));
1704
1705 // Simulate another seek being requested before the first
1706 // seek has finished prerolling.
1707 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1708 demuxer_->CancelPendingSeek(seek_time2);
1709
1710 // Finish second seek.
1711 Seek(seek_time2);
1712
1713 DemuxerStream::Status status;
1714 base::TimeDelta last_timestamp;
1715
1716 // Make sure audio can reach end of stream.
1717 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1718 ASSERT_EQ(status, DemuxerStream::kOk);
1719
1720 // Make sure video can reach end of stream.
1721 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1722 ASSERT_EQ(status, DemuxerStream::kOk);
1723 }
1724
1725 // Verify buffered range change behavior for audio/video/text tracks.
TEST_F(ChunkDemuxerTest,EndOfStreamRangeChanges)1726 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1727 DemuxerStream* text_stream = NULL;
1728
1729 EXPECT_CALL(host_, AddTextStream(_, _))
1730 .WillOnce(SaveArg<0>(&text_stream));
1731 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1732
1733 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
1734 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
1735
1736 // Check expected ranges and verify that an empty text track does not
1737 // affect the expected ranges.
1738 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1739
1740 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1741 MarkEndOfStream(PIPELINE_OK);
1742
1743 // Check expected ranges and verify that an empty text track does not
1744 // affect the expected ranges.
1745 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1746
1747 // Unmark end of stream state and verify that the ranges return to
1748 // their pre-"end of stream" values.
1749 demuxer_->UnmarkEndOfStream();
1750 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1751
1752 // Add text track data and verify that the buffered ranges don't change
1753 // since the intersection of all the tracks doesn't change.
1754 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1755 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
1756 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1757
1758 // Mark end of stream and verify that text track data is reflected in
1759 // the new range.
1760 MarkEndOfStream(PIPELINE_OK);
1761 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1762 }
1763
1764 // Make sure AppendData() will accept elements that span multiple calls.
TEST_F(ChunkDemuxerTest,AppendingInPieces)1765 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1766 EXPECT_CALL(*this, DemuxerOpened());
1767 demuxer_->Initialize(
1768 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1769
1770 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1771
1772 scoped_ptr<uint8[]> info_tracks;
1773 int info_tracks_size = 0;
1774 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1775 false, false, &info_tracks, &info_tracks_size);
1776
1777 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1778 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1779
1780 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1781 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1782 uint8* dst = buffer.get();
1783 memcpy(dst, info_tracks.get(), info_tracks_size);
1784 dst += info_tracks_size;
1785
1786 memcpy(dst, cluster_a->data(), cluster_a->size());
1787 dst += cluster_a->size();
1788
1789 memcpy(dst, cluster_b->data(), cluster_b->size());
1790 dst += cluster_b->size();
1791
1792 AppendDataInPieces(buffer.get(), buffer_size);
1793
1794 GenerateExpectedReads(0, 9);
1795 }
1796
TEST_F(ChunkDemuxerTest,WebMFile_AudioAndVideo)1797 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1798 struct BufferTimestamps buffer_timestamps[] = {
1799 {0, 0},
1800 {33, 3},
1801 {67, 6},
1802 {100, 9},
1803 {133, 12},
1804 {kSkip, kSkip},
1805 };
1806
1807 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1808 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1809 // have the correct duration in the init segment. See http://crbug.com/354284.
1810 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1811
1812 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1813 base::TimeDelta::FromMilliseconds(2744)));
1814 }
1815
TEST_F(ChunkDemuxerTest,WebMFile_LiveAudioAndVideo)1816 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1817 struct BufferTimestamps buffer_timestamps[] = {
1818 {0, 0},
1819 {33, 3},
1820 {67, 6},
1821 {100, 9},
1822 {133, 12},
1823 {kSkip, kSkip},
1824 };
1825
1826 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
1827 kInfiniteDuration()));
1828 }
1829
TEST_F(ChunkDemuxerTest,WebMFile_AudioOnly)1830 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
1831 struct BufferTimestamps buffer_timestamps[] = {
1832 {kSkip, 0},
1833 {kSkip, 3},
1834 {kSkip, 6},
1835 {kSkip, 9},
1836 {kSkip, 12},
1837 {kSkip, kSkip},
1838 };
1839
1840 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1841 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1842 // have the correct duration in the init segment. See http://crbug.com/354284.
1843 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1844
1845 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
1846 base::TimeDelta::FromMilliseconds(2744),
1847 HAS_AUDIO));
1848 }
1849
TEST_F(ChunkDemuxerTest,WebMFile_VideoOnly)1850 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
1851 struct BufferTimestamps buffer_timestamps[] = {
1852 {0, kSkip},
1853 {33, kSkip},
1854 {67, kSkip},
1855 {100, kSkip},
1856 {133, kSkip},
1857 {kSkip, kSkip},
1858 };
1859
1860 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1861 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1862 // have the correct duration in the init segment. See http://crbug.com/354284.
1863 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
1864
1865 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
1866 base::TimeDelta::FromMilliseconds(2703),
1867 HAS_VIDEO));
1868 }
1869
TEST_F(ChunkDemuxerTest,WebMFile_AltRefFrames)1870 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
1871 struct BufferTimestamps buffer_timestamps[] = {
1872 {0, 0},
1873 {33, 3},
1874 {33, 6},
1875 {67, 9},
1876 {100, 12},
1877 {kSkip, kSkip},
1878 };
1879
1880 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
1881 base::TimeDelta::FromMilliseconds(2767)));
1882 }
1883
1884 // Verify that we output buffers before the entire cluster has been parsed.
TEST_F(ChunkDemuxerTest,IncrementalClusterParsing)1885 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
1886 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1887 AppendEmptyCluster(0);
1888
1889 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
1890
1891 bool audio_read_done = false;
1892 bool video_read_done = false;
1893 ReadAudio(base::Bind(&OnReadDone,
1894 base::TimeDelta::FromMilliseconds(0),
1895 &audio_read_done));
1896 ReadVideo(base::Bind(&OnReadDone,
1897 base::TimeDelta::FromMilliseconds(0),
1898 &video_read_done));
1899
1900 // Make sure the reads haven't completed yet.
1901 EXPECT_FALSE(audio_read_done);
1902 EXPECT_FALSE(video_read_done);
1903
1904 // Append data one byte at a time until one or both reads complete.
1905 int i = 0;
1906 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
1907 AppendData(cluster->data() + i, 1);
1908 message_loop_.RunUntilIdle();
1909 }
1910
1911 EXPECT_TRUE(audio_read_done || video_read_done);
1912 EXPECT_GT(i, 0);
1913 EXPECT_LT(i, cluster->size());
1914
1915 audio_read_done = false;
1916 video_read_done = false;
1917 ReadAudio(base::Bind(&OnReadDone,
1918 base::TimeDelta::FromMilliseconds(23),
1919 &audio_read_done));
1920 ReadVideo(base::Bind(&OnReadDone,
1921 base::TimeDelta::FromMilliseconds(33),
1922 &video_read_done));
1923
1924 // Make sure the reads haven't completed yet.
1925 EXPECT_FALSE(audio_read_done);
1926 EXPECT_FALSE(video_read_done);
1927
1928 // Append the remaining data.
1929 ASSERT_LT(i, cluster->size());
1930 AppendData(cluster->data() + i, cluster->size() - i);
1931
1932 message_loop_.RunUntilIdle();
1933
1934 EXPECT_TRUE(audio_read_done);
1935 EXPECT_TRUE(video_read_done);
1936 }
1937
TEST_F(ChunkDemuxerTest,ParseErrorDuringInit)1938 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
1939 EXPECT_CALL(*this, DemuxerOpened());
1940 demuxer_->Initialize(
1941 &host_, CreateInitDoneCB(
1942 kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1943
1944 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1945
1946 uint8 tmp = 0;
1947 demuxer_->AppendData(kSourceId, &tmp, 1,
1948 append_window_start_for_next_append_,
1949 append_window_end_for_next_append_,
1950 ×tamp_offset_map_[kSourceId]);
1951 }
1952
TEST_F(ChunkDemuxerTest,AVHeadersWithAudioOnlyType)1953 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
1954 EXPECT_CALL(*this, DemuxerOpened());
1955 demuxer_->Initialize(
1956 &host_, CreateInitDoneCB(kNoTimestamp(),
1957 DEMUXER_ERROR_COULD_NOT_OPEN), true);
1958
1959 std::vector<std::string> codecs(1);
1960 codecs[0] = "vorbis";
1961 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
1962 ChunkDemuxer::kOk);
1963
1964 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1965 }
1966
TEST_F(ChunkDemuxerTest,AVHeadersWithVideoOnlyType)1967 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
1968 EXPECT_CALL(*this, DemuxerOpened());
1969 demuxer_->Initialize(
1970 &host_, CreateInitDoneCB(kNoTimestamp(),
1971 DEMUXER_ERROR_COULD_NOT_OPEN), true);
1972
1973 std::vector<std::string> codecs(1);
1974 codecs[0] = "vp8";
1975 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
1976 ChunkDemuxer::kOk);
1977
1978 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1979 }
1980
TEST_F(ChunkDemuxerTest,MultipleHeaders)1981 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
1982 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1983
1984 AppendCluster(kDefaultFirstCluster());
1985
1986 // Append another identical initialization segment.
1987 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1988
1989 AppendCluster(kDefaultSecondCluster());
1990
1991 GenerateExpectedReads(0, 9);
1992 }
1993
TEST_F(ChunkDemuxerTest,AddSeparateSourcesForAudioAndVideo)1994 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
1995 std::string audio_id = "audio1";
1996 std::string video_id = "video1";
1997 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1998
1999 // Append audio and video data into separate source ids.
2000 AppendCluster(audio_id,
2001 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2002 GenerateAudioStreamExpectedReads(0, 4);
2003 AppendCluster(video_id,
2004 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2005 GenerateVideoStreamExpectedReads(0, 4);
2006 }
2007
TEST_F(ChunkDemuxerTest,AddSeparateSourcesForAudioAndVideoText)2008 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2009 // TODO(matthewjheaney): Here and elsewhere, we need more tests
2010 // for inband text tracks (http://crbug/321455).
2011
2012 std::string audio_id = "audio1";
2013 std::string video_id = "video1";
2014
2015 EXPECT_CALL(host_, AddTextStream(_, _))
2016 .Times(Exactly(2));
2017 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2018
2019 // Append audio and video data into separate source ids.
2020 AppendCluster(audio_id,
2021 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2022 GenerateAudioStreamExpectedReads(0, 4);
2023 AppendCluster(video_id,
2024 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2025 GenerateVideoStreamExpectedReads(0, 4);
2026 }
2027
TEST_F(ChunkDemuxerTest,AddIdFailures)2028 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2029 EXPECT_CALL(*this, DemuxerOpened());
2030 demuxer_->Initialize(
2031 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2032
2033 std::string audio_id = "audio1";
2034 std::string video_id = "video1";
2035
2036 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2037
2038 // Adding an id with audio/video should fail because we already added audio.
2039 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2040
2041 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2042
2043 // Adding an id after append should fail.
2044 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2045 }
2046
2047 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
TEST_F(ChunkDemuxerTest,RemoveId)2048 TEST_F(ChunkDemuxerTest, RemoveId) {
2049 std::string audio_id = "audio1";
2050 std::string video_id = "video1";
2051 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2052
2053 // Append audio and video data into separate source ids.
2054 AppendCluster(audio_id,
2055 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2056 AppendCluster(video_id,
2057 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2058
2059 // Read() from audio should return normal buffers.
2060 GenerateAudioStreamExpectedReads(0, 4);
2061
2062 // Remove the audio id.
2063 demuxer_->RemoveId(audio_id);
2064
2065 // Read() from audio should return "end of stream" buffers.
2066 bool audio_read_done = false;
2067 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2068 message_loop_.RunUntilIdle();
2069 EXPECT_TRUE(audio_read_done);
2070
2071 // Read() from video should still return normal buffers.
2072 GenerateVideoStreamExpectedReads(0, 4);
2073 }
2074
2075 // Test that removing an ID immediately after adding it does not interfere with
2076 // quota for new IDs in the future.
TEST_F(ChunkDemuxerTest,RemoveAndAddId)2077 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2078 std::string audio_id_1 = "audio1";
2079 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2080 demuxer_->RemoveId(audio_id_1);
2081
2082 std::string audio_id_2 = "audio2";
2083 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2084 }
2085
TEST_F(ChunkDemuxerTest,SeekCanceled)2086 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2087 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2088
2089 // Append cluster at the beginning of the stream.
2090 AppendCluster(GenerateCluster(0, 4));
2091
2092 // Seek to an unbuffered region.
2093 Seek(base::TimeDelta::FromSeconds(50));
2094
2095 // Attempt to read in unbuffered area; should not fulfill the read.
2096 bool audio_read_done = false;
2097 bool video_read_done = false;
2098 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2099 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2100 EXPECT_FALSE(audio_read_done);
2101 EXPECT_FALSE(video_read_done);
2102
2103 // Now cancel the pending seek, which should flush the reads with empty
2104 // buffers.
2105 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2106 demuxer_->CancelPendingSeek(seek_time);
2107 message_loop_.RunUntilIdle();
2108 EXPECT_TRUE(audio_read_done);
2109 EXPECT_TRUE(video_read_done);
2110
2111 // A seek back to the buffered region should succeed.
2112 Seek(seek_time);
2113 GenerateExpectedReads(0, 4);
2114 }
2115
TEST_F(ChunkDemuxerTest,SeekCanceledWhileWaitingForSeek)2116 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2117 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2118
2119 // Append cluster at the beginning of the stream.
2120 AppendCluster(GenerateCluster(0, 4));
2121
2122 // Start waiting for a seek.
2123 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2124 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2125 demuxer_->StartWaitingForSeek(seek_time1);
2126
2127 // Now cancel the upcoming seek to an unbuffered region.
2128 demuxer_->CancelPendingSeek(seek_time2);
2129 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2130
2131 // Read requests should be fulfilled with empty buffers.
2132 bool audio_read_done = false;
2133 bool video_read_done = false;
2134 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2135 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2136 EXPECT_TRUE(audio_read_done);
2137 EXPECT_TRUE(video_read_done);
2138
2139 // A seek back to the buffered region should succeed.
2140 Seek(seek_time2);
2141 GenerateExpectedReads(0, 4);
2142 }
2143
2144 // Test that Seek() successfully seeks to all source IDs.
TEST_F(ChunkDemuxerTest,SeekAudioAndVideoSources)2145 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2146 std::string audio_id = "audio1";
2147 std::string video_id = "video1";
2148 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2149
2150 AppendCluster(
2151 audio_id,
2152 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2153 AppendCluster(
2154 video_id,
2155 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2156
2157 // Read() should return buffers at 0.
2158 bool audio_read_done = false;
2159 bool video_read_done = false;
2160 ReadAudio(base::Bind(&OnReadDone,
2161 base::TimeDelta::FromMilliseconds(0),
2162 &audio_read_done));
2163 ReadVideo(base::Bind(&OnReadDone,
2164 base::TimeDelta::FromMilliseconds(0),
2165 &video_read_done));
2166 EXPECT_TRUE(audio_read_done);
2167 EXPECT_TRUE(video_read_done);
2168
2169 // Seek to 3 (an unbuffered region).
2170 Seek(base::TimeDelta::FromSeconds(3));
2171
2172 audio_read_done = false;
2173 video_read_done = false;
2174 ReadAudio(base::Bind(&OnReadDone,
2175 base::TimeDelta::FromSeconds(3),
2176 &audio_read_done));
2177 ReadVideo(base::Bind(&OnReadDone,
2178 base::TimeDelta::FromSeconds(3),
2179 &video_read_done));
2180 // Read()s should not return until after data is appended at the Seek point.
2181 EXPECT_FALSE(audio_read_done);
2182 EXPECT_FALSE(video_read_done);
2183
2184 AppendCluster(audio_id,
2185 GenerateSingleStreamCluster(
2186 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2187 AppendCluster(video_id,
2188 GenerateSingleStreamCluster(
2189 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2190
2191 message_loop_.RunUntilIdle();
2192
2193 // Read() should return buffers at 3.
2194 EXPECT_TRUE(audio_read_done);
2195 EXPECT_TRUE(video_read_done);
2196 }
2197
2198 // Test that Seek() completes successfully when EndOfStream
2199 // is called before data is available for that seek point.
2200 // This scenario might be useful if seeking past the end of stream
2201 // of either audio or video (or both).
TEST_F(ChunkDemuxerTest,EndOfStreamAfterPastEosSeek)2202 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2203 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2204
2205 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2206 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2207
2208 // Seeking past the end of video.
2209 // Note: audio data is available for that seek point.
2210 bool seek_cb_was_called = false;
2211 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2212 demuxer_->StartWaitingForSeek(seek_time);
2213 demuxer_->Seek(seek_time,
2214 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2215 message_loop_.RunUntilIdle();
2216
2217 EXPECT_FALSE(seek_cb_was_called);
2218
2219 EXPECT_CALL(host_, SetDuration(
2220 base::TimeDelta::FromMilliseconds(120)));
2221 MarkEndOfStream(PIPELINE_OK);
2222 message_loop_.RunUntilIdle();
2223
2224 EXPECT_TRUE(seek_cb_was_called);
2225
2226 ShutdownDemuxer();
2227 }
2228
2229 // Test that EndOfStream is ignored if coming during a pending seek
2230 // whose seek time is before some existing ranges.
TEST_F(ChunkDemuxerTest,EndOfStreamDuringPendingSeek)2231 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2232 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2233
2234 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2235 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2236 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2237 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2238
2239 bool seek_cb_was_called = false;
2240 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2241 demuxer_->StartWaitingForSeek(seek_time);
2242 demuxer_->Seek(seek_time,
2243 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2244 message_loop_.RunUntilIdle();
2245
2246 EXPECT_FALSE(seek_cb_was_called);
2247
2248 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2249 MarkEndOfStream(PIPELINE_OK);
2250 message_loop_.RunUntilIdle();
2251
2252 EXPECT_FALSE(seek_cb_was_called);
2253
2254 demuxer_->UnmarkEndOfStream();
2255
2256 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2257 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2258
2259 message_loop_.RunUntilIdle();
2260
2261 EXPECT_TRUE(seek_cb_was_called);
2262
2263 ShutdownDemuxer();
2264 }
2265
2266 // Test ranges in an audio-only stream.
TEST_F(ChunkDemuxerTest,GetBufferedRanges_AudioIdOnly)2267 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2268 EXPECT_CALL(*this, DemuxerOpened());
2269 demuxer_->Initialize(
2270 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2271
2272 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2273 AppendInitSegment(HAS_AUDIO);
2274
2275 // Test a simple cluster.
2276 AppendCluster(
2277 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2278
2279 CheckExpectedRanges("{ [0,92) }");
2280
2281 // Append a disjoint cluster to check for two separate ranges.
2282 AppendCluster(GenerateSingleStreamCluster(
2283 150, 219, kAudioTrackNum, kAudioBlockDuration));
2284
2285 CheckExpectedRanges("{ [0,92) [150,219) }");
2286 }
2287
2288 // Test ranges in a video-only stream.
TEST_F(ChunkDemuxerTest,GetBufferedRanges_VideoIdOnly)2289 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2290 EXPECT_CALL(*this, DemuxerOpened());
2291 demuxer_->Initialize(
2292 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2293
2294 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2295 AppendInitSegment(HAS_VIDEO);
2296
2297 // Test a simple cluster.
2298 AppendCluster(
2299 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2300
2301 CheckExpectedRanges("{ [0,132) }");
2302
2303 // Append a disjoint cluster to check for two separate ranges.
2304 AppendCluster(GenerateSingleStreamCluster(
2305 200, 299, kVideoTrackNum, kVideoBlockDuration));
2306
2307 CheckExpectedRanges("{ [0,132) [200,299) }");
2308 }
2309
TEST_F(ChunkDemuxerTest,GetBufferedRanges_AudioVideo)2310 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2311 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2312
2313 // Audio: 0 -> 23
2314 // Video: 0 -> 33
2315 // Buffered Range: 0 -> 23
2316 // Audio block duration is smaller than video block duration,
2317 // so the buffered ranges should correspond to the audio blocks.
2318 AppendCluster(GenerateSingleStreamCluster(
2319 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2320 AppendCluster(GenerateSingleStreamCluster(
2321 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2322
2323 CheckExpectedRanges("{ [0,23) }");
2324
2325 // Audio: 300 -> 400
2326 // Video: 320 -> 420
2327 // Buffered Range: 320 -> 400 (end overlap)
2328 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2329 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2330
2331 CheckExpectedRanges("{ [0,23) [320,400) }");
2332
2333 // Audio: 520 -> 590
2334 // Video: 500 -> 570
2335 // Buffered Range: 520 -> 570 (front overlap)
2336 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2337 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2338
2339 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2340
2341 // Audio: 720 -> 750
2342 // Video: 700 -> 770
2343 // Buffered Range: 720 -> 750 (complete overlap, audio)
2344 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2345 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2346
2347 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2348
2349 // Audio: 900 -> 970
2350 // Video: 920 -> 950
2351 // Buffered Range: 920 -> 950 (complete overlap, video)
2352 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2353 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2354
2355 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2356
2357 // Appending within buffered range should not affect buffered ranges.
2358 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2359 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2360
2361 // Appending to single stream outside buffered ranges should not affect
2362 // buffered ranges.
2363 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2364 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2365 }
2366
TEST_F(ChunkDemuxerTest,GetBufferedRanges_AudioVideoText)2367 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2368 EXPECT_CALL(host_, AddTextStream(_, _));
2369 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2370
2371 // Append audio & video data
2372 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
2373 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2374
2375 // Verify that a text track with no cues does not result in an empty buffered
2376 // range.
2377 CheckExpectedRanges("{ [0,46) }");
2378
2379 // Add some text cues.
2380 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
2381
2382 // Verify that the new cues did not affect the buffered ranges.
2383 CheckExpectedRanges("{ [0,46) }");
2384
2385 // Remove the buffered range.
2386 demuxer_->Remove(kSourceId, base::TimeDelta(),
2387 base::TimeDelta::FromMilliseconds(46));
2388 CheckExpectedRanges("{ }");
2389 }
2390
2391 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2392 // over-hanging tails at the end of the ranges as this is likely due to block
2393 // duration differences.
TEST_F(ChunkDemuxerTest,GetBufferedRanges_EndOfStream)2394 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2395 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2396
2397 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
2398 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2399
2400 CheckExpectedRanges("{ [0,46) }");
2401
2402 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2403 MarkEndOfStream(PIPELINE_OK);
2404
2405 // Verify that the range extends to the end of the video data.
2406 CheckExpectedRanges("{ [0,66) }");
2407
2408 // Verify that the range reverts to the intersection when end of stream
2409 // has been cancelled.
2410 demuxer_->UnmarkEndOfStream();
2411 CheckExpectedRanges("{ [0,46) }");
2412
2413 // Append and remove data so that the 2 streams' end ranges do not overlap.
2414
2415 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
2416 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2417 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2418 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
2419 "200K 233 266 299 332K 365");
2420
2421 // At this point, the per-stream ranges are as follows:
2422 // Audio: [0,46) [200,246)
2423 // Video: [0,66) [200,398)
2424 CheckExpectedRanges("{ [0,46) [200,246) }");
2425
2426 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2427 base::TimeDelta::FromMilliseconds(300));
2428
2429 // At this point, the per-stream ranges are as follows:
2430 // Audio: [0,46)
2431 // Video: [0,66) [332,398)
2432 CheckExpectedRanges("{ [0,46) }");
2433
2434 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2435 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
2436
2437 // At this point, the per-stream ranges are as follows:
2438 // Audio: [0,46) [200,246)
2439 // Video: [0,66) [200,266) [332,398)
2440 // NOTE: The last range on each stream do not overlap in time.
2441 CheckExpectedRanges("{ [0,46) [200,246) }");
2442
2443 MarkEndOfStream(PIPELINE_OK);
2444
2445 // NOTE: The last range on each stream gets extended to the highest
2446 // end timestamp according to the spec. The last audio range gets extended
2447 // from [200,246) to [200,398) which is why the intersection results in the
2448 // middle range getting larger AND the new range appearing.
2449 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2450 }
2451
TEST_F(ChunkDemuxerTest,DifferentStreamTimecodes)2452 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2453 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2454
2455 // Create a cluster where the video timecode begins 25ms after the audio.
2456 AppendCluster(GenerateCluster(0, 25, 8));
2457
2458 Seek(base::TimeDelta::FromSeconds(0));
2459 GenerateExpectedReads(0, 25, 8);
2460
2461 // Seek to 5 seconds.
2462 Seek(base::TimeDelta::FromSeconds(5));
2463
2464 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2465 // after the video.
2466 AppendCluster(GenerateCluster(5025, 5000, 8));
2467 GenerateExpectedReads(5025, 5000, 8);
2468 }
2469
TEST_F(ChunkDemuxerTest,DifferentStreamTimecodesSeparateSources)2470 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2471 std::string audio_id = "audio1";
2472 std::string video_id = "video1";
2473 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2474
2475 // Generate two streams where the video stream starts 5ms after the audio
2476 // stream and append them.
2477 AppendCluster(audio_id, GenerateSingleStreamCluster(
2478 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2479 AppendCluster(video_id, GenerateSingleStreamCluster(
2480 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2481
2482 // Both streams should be able to fulfill a seek to 25.
2483 Seek(base::TimeDelta::FromMilliseconds(25));
2484 GenerateAudioStreamExpectedReads(25, 4);
2485 GenerateVideoStreamExpectedReads(30, 4);
2486 }
2487
TEST_F(ChunkDemuxerTest,DifferentStreamTimecodesOutOfRange)2488 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2489 std::string audio_id = "audio1";
2490 std::string video_id = "video1";
2491 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2492
2493 // Generate two streams where the video stream starts 10s after the audio
2494 // stream and append them.
2495 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2496 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2497 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2498 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2499
2500 // Should not be able to fulfill a seek to 0.
2501 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2502 demuxer_->StartWaitingForSeek(seek_time);
2503 demuxer_->Seek(seek_time,
2504 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2505 ExpectRead(DemuxerStream::AUDIO, 0);
2506 ExpectEndOfStream(DemuxerStream::VIDEO);
2507 }
2508
TEST_F(ChunkDemuxerTest,ClusterWithNoBuffers)2509 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2510 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2511
2512 // Generate and append an empty cluster beginning at 0.
2513 AppendEmptyCluster(0);
2514
2515 // Sanity check that data can be appended after this cluster correctly.
2516 AppendCluster(GenerateCluster(0, 2));
2517 ExpectRead(DemuxerStream::AUDIO, 0);
2518 ExpectRead(DemuxerStream::VIDEO, 0);
2519 }
2520
TEST_F(ChunkDemuxerTest,CodecPrefixMatching)2521 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2522 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2523
2524 #if defined(USE_PROPRIETARY_CODECS)
2525 expected = ChunkDemuxer::kOk;
2526 #endif
2527
2528 std::vector<std::string> codecs;
2529 codecs.push_back("avc1.4D4041");
2530
2531 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2532 }
2533
2534 // Test codec ID's that are not compliant with RFC6381, but have been
2535 // seen in the wild.
TEST_F(ChunkDemuxerTest,CodecIDsThatAreNotRFC6381Compliant)2536 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2537 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2538
2539 #if defined(USE_PROPRIETARY_CODECS)
2540 expected = ChunkDemuxer::kOk;
2541 #endif
2542 const char* codec_ids[] = {
2543 // GPAC places leading zeros on the audio object type.
2544 "mp4a.40.02",
2545 "mp4a.40.05"
2546 };
2547
2548 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2549 std::vector<std::string> codecs;
2550 codecs.push_back(codec_ids[i]);
2551
2552 ChunkDemuxer::Status result =
2553 demuxer_->AddId("source_id", "audio/mp4", codecs);
2554
2555 EXPECT_EQ(result, expected)
2556 << "Fail to add codec_id '" << codec_ids[i] << "'";
2557
2558 if (result == ChunkDemuxer::kOk)
2559 demuxer_->RemoveId("source_id");
2560 }
2561 }
2562
TEST_F(ChunkDemuxerTest,EndOfStreamStillSetAfterSeek)2563 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2564 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2565
2566 EXPECT_CALL(host_, SetDuration(_))
2567 .Times(AnyNumber());
2568
2569 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2570 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2571
2572 AppendCluster(kDefaultFirstCluster());
2573 AppendCluster(kDefaultSecondCluster());
2574 MarkEndOfStream(PIPELINE_OK);
2575
2576 DemuxerStream::Status status;
2577 base::TimeDelta last_timestamp;
2578
2579 // Verify that we can read audio & video to the end w/o problems.
2580 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2581 EXPECT_EQ(DemuxerStream::kOk, status);
2582 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2583
2584 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2585 EXPECT_EQ(DemuxerStream::kOk, status);
2586 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2587
2588 // Seek back to 0 and verify that we can read to the end again..
2589 Seek(base::TimeDelta::FromMilliseconds(0));
2590
2591 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2592 EXPECT_EQ(DemuxerStream::kOk, status);
2593 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2594
2595 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2596 EXPECT_EQ(DemuxerStream::kOk, status);
2597 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2598 }
2599
TEST_F(ChunkDemuxerTest,GetBufferedRangesBeforeInitSegment)2600 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2601 EXPECT_CALL(*this, DemuxerOpened());
2602 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2603 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2604 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2605
2606 CheckExpectedRanges("audio", "{ }");
2607 CheckExpectedRanges("video", "{ }");
2608 }
2609
2610 // Test that Seek() completes successfully when the first cluster
2611 // arrives.
TEST_F(ChunkDemuxerTest,EndOfStreamDuringSeek)2612 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2613 InSequence s;
2614
2615 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2616
2617 AppendCluster(kDefaultFirstCluster());
2618
2619 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2620 demuxer_->StartWaitingForSeek(seek_time);
2621
2622 AppendCluster(kDefaultSecondCluster());
2623 EXPECT_CALL(host_, SetDuration(
2624 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2625 MarkEndOfStream(PIPELINE_OK);
2626
2627 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2628
2629 GenerateExpectedReads(0, 4);
2630 GenerateExpectedReads(46, 66, 5);
2631
2632 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2633 end_of_stream_helper.RequestReads();
2634 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2635 }
2636
TEST_F(ChunkDemuxerTest,ConfigChange_Video)2637 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2638 InSequence s;
2639
2640 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2641
2642 DemuxerStream::Status status;
2643 base::TimeDelta last_timestamp;
2644
2645 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2646
2647 // Fetch initial video config and verify it matches what we expect.
2648 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2649 ASSERT_TRUE(video_config_1.IsValidConfig());
2650 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2651 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2652
2653 ExpectRead(DemuxerStream::VIDEO, 0);
2654
2655 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2656
2657 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2658 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2659
2660 // Fetch the new decoder config.
2661 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2662 ASSERT_TRUE(video_config_2.IsValidConfig());
2663 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2664 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2665
2666 ExpectRead(DemuxerStream::VIDEO, 527);
2667
2668 // Read until the next config change.
2669 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2670 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2671 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2672
2673 // Get the new config and verify that it matches the first one.
2674 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2675
2676 ExpectRead(DemuxerStream::VIDEO, 801);
2677
2678 // Read until the end of the stream just to make sure there aren't any other
2679 // config changes.
2680 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2681 ASSERT_EQ(status, DemuxerStream::kOk);
2682 }
2683
TEST_F(ChunkDemuxerTest,ConfigChange_Audio)2684 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2685 InSequence s;
2686
2687 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2688
2689 DemuxerStream::Status status;
2690 base::TimeDelta last_timestamp;
2691
2692 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2693
2694 // Fetch initial audio config and verify it matches what we expect.
2695 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2696 ASSERT_TRUE(audio_config_1.IsValidConfig());
2697 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2698 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2699
2700 ExpectRead(DemuxerStream::AUDIO, 0);
2701
2702 // The first config change seen is from a splice frame representing an overlap
2703 // of buffer from config 1 by buffers from config 2.
2704 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2705 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2706 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2707
2708 // Fetch the new decoder config.
2709 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2710 ASSERT_TRUE(audio_config_2.IsValidConfig());
2711 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2712 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2713
2714 // The next config change is from a splice frame representing an overlap of
2715 // buffers from config 2 by buffers from config 1.
2716 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2717 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2718 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2719 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2720
2721 // Read until the end of the stream just to make sure there aren't any other
2722 // config changes.
2723 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2724 ASSERT_EQ(status, DemuxerStream::kOk);
2725 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2726 }
2727
TEST_F(ChunkDemuxerTest,ConfigChange_Seek)2728 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2729 InSequence s;
2730
2731 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2732
2733 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2734
2735 // Fetch initial video config and verify it matches what we expect.
2736 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2737 ASSERT_TRUE(video_config_1.IsValidConfig());
2738 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2739 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2740
2741 ExpectRead(DemuxerStream::VIDEO, 0);
2742
2743 // Seek to a location with a different config.
2744 Seek(base::TimeDelta::FromMilliseconds(527));
2745
2746 // Verify that the config change is signalled.
2747 ExpectConfigChanged(DemuxerStream::VIDEO);
2748
2749 // Fetch the new decoder config and verify it is what we expect.
2750 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2751 ASSERT_TRUE(video_config_2.IsValidConfig());
2752 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2753 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2754
2755 // Verify that Read() will return a buffer now.
2756 ExpectRead(DemuxerStream::VIDEO, 527);
2757
2758 // Seek back to the beginning and verify we get another config change.
2759 Seek(base::TimeDelta::FromMilliseconds(0));
2760 ExpectConfigChanged(DemuxerStream::VIDEO);
2761 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2762 ExpectRead(DemuxerStream::VIDEO, 0);
2763
2764 // Seek to a location that requires a config change and then
2765 // seek to a new location that has the same configuration as
2766 // the start of the file without a Read() in the middle.
2767 Seek(base::TimeDelta::FromMilliseconds(527));
2768 Seek(base::TimeDelta::FromMilliseconds(801));
2769
2770 // Verify that no config change is signalled.
2771 ExpectRead(DemuxerStream::VIDEO, 801);
2772 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2773 }
2774
TEST_F(ChunkDemuxerTest,TimestampPositiveOffset)2775 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2776 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2777
2778 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2779 AppendCluster(GenerateCluster(0, 2));
2780
2781 Seek(base::TimeDelta::FromMilliseconds(30000));
2782
2783 GenerateExpectedReads(30000, 2);
2784 }
2785
TEST_F(ChunkDemuxerTest,TimestampNegativeOffset)2786 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2787 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2788
2789 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2790 AppendCluster(GenerateCluster(1000, 2));
2791
2792 GenerateExpectedReads(0, 2);
2793 }
2794
TEST_F(ChunkDemuxerTest,TimestampOffsetSeparateStreams)2795 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2796 std::string audio_id = "audio1";
2797 std::string video_id = "video1";
2798 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2799
2800 ASSERT_TRUE(SetTimestampOffset(
2801 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2802 ASSERT_TRUE(SetTimestampOffset(
2803 video_id, base::TimeDelta::FromMilliseconds(-2500)));
2804 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2805 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2806 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2807 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2808 GenerateAudioStreamExpectedReads(0, 4);
2809 GenerateVideoStreamExpectedReads(0, 4);
2810
2811 Seek(base::TimeDelta::FromMilliseconds(27300));
2812
2813 ASSERT_TRUE(SetTimestampOffset(
2814 audio_id, base::TimeDelta::FromMilliseconds(27300)));
2815 ASSERT_TRUE(SetTimestampOffset(
2816 video_id, base::TimeDelta::FromMilliseconds(27300)));
2817 AppendCluster(audio_id, GenerateSingleStreamCluster(
2818 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2819 AppendCluster(video_id, GenerateSingleStreamCluster(
2820 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2821 GenerateVideoStreamExpectedReads(27300, 4);
2822 GenerateAudioStreamExpectedReads(27300, 4);
2823 }
2824
TEST_F(ChunkDemuxerTest,IsParsingMediaSegmentMidMediaSegment)2825 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
2826 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2827
2828 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2829 // Append only part of the cluster data.
2830 AppendData(cluster->data(), cluster->size() - 13);
2831
2832 // Confirm we're in the middle of parsing a media segment.
2833 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2834
2835 demuxer_->Abort(kSourceId,
2836 append_window_start_for_next_append_,
2837 append_window_end_for_next_append_,
2838 ×tamp_offset_map_[kSourceId]);
2839
2840 // After Abort(), parsing should no longer be in the middle of a media
2841 // segment.
2842 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
2843 }
2844
2845 #if defined(USE_PROPRIETARY_CODECS)
2846 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
TEST_F(ChunkDemuxerTest,EmitBuffersDuringAbort)2847 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
2848 EXPECT_CALL(*this, DemuxerOpened());
2849 demuxer_->Initialize(
2850 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
2851 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
2852
2853 // For info:
2854 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
2855 // Video: first PES:
2856 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
2857 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
2858 // Audio: first PES:
2859 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
2860 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
2861 // Video: last PES:
2862 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
2863 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
2864 // Audio: last PES:
2865 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
2866
2867 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
2868 AppendData(kSourceId, buffer->data(), buffer->data_size());
2869
2870 // Confirm we're in the middle of parsing a media segment.
2871 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2872
2873 // Abort on the Mpeg2 TS parser triggers the emission of the last video
2874 // buffer which is pending in the stream parser.
2875 Ranges<base::TimeDelta> range_before_abort =
2876 demuxer_->GetBufferedRanges(kSourceId);
2877 demuxer_->Abort(kSourceId,
2878 append_window_start_for_next_append_,
2879 append_window_end_for_next_append_,
2880 ×tamp_offset_map_[kSourceId]);
2881 Ranges<base::TimeDelta> range_after_abort =
2882 demuxer_->GetBufferedRanges(kSourceId);
2883
2884 ASSERT_EQ(range_before_abort.size(), 1u);
2885 ASSERT_EQ(range_after_abort.size(), 1u);
2886 EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
2887 EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
2888 }
2889 #endif
2890 #endif
2891
TEST_F(ChunkDemuxerTest,WebMIsParsingMediaSegmentDetection)2892 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
2893 const uint8 kBuffer[] = {
2894 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
2895 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
2896
2897 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
2898 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
2899 /* e.g. put some blocks here... */
2900 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
2901 };
2902
2903 // This array indicates expected return value of IsParsingMediaSegment()
2904 // following each incrementally appended byte in |kBuffer|.
2905 const bool kExpectedReturnValues[] = {
2906 false, false, false, false, true,
2907 true, true, false,
2908
2909 false, false, false, false, true,
2910 true, true, true,
2911
2912 true, true, true, true, false,
2913 };
2914
2915 COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
2916 test_arrays_out_of_sync);
2917 COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
2918
2919 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2920
2921 for (size_t i = 0; i < sizeof(kBuffer); i++) {
2922 DVLOG(3) << "Appending and testing index " << i;
2923 AppendData(kBuffer + i, 1);
2924 bool expected_return_value = kExpectedReturnValues[i];
2925 EXPECT_EQ(expected_return_value,
2926 demuxer_->IsParsingMediaSegment(kSourceId));
2927 }
2928 }
2929
TEST_F(ChunkDemuxerTest,DurationChange)2930 TEST_F(ChunkDemuxerTest, DurationChange) {
2931 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2932 const int kStreamDuration = kDefaultDuration().InMilliseconds();
2933
2934 // Add data leading up to the currently set duration.
2935 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
2936 kStreamDuration - kVideoBlockDuration,
2937 2));
2938
2939 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
2940
2941 // Add data beginning at the currently set duration and expect a new duration
2942 // to be signaled. Note that the last video block will have a higher end
2943 // timestamp than the last audio block.
2944 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
2945 EXPECT_CALL(host_, SetDuration(
2946 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
2947 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
2948
2949 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
2950
2951 // Add more data to the end of each media type. Note that the last audio block
2952 // will have a higher end timestamp than the last video block.
2953 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
2954 EXPECT_CALL(host_, SetDuration(
2955 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
2956 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
2957 kStreamDuration + kVideoBlockDuration,
2958 3));
2959
2960 // See that the range has increased appropriately (but not to the full
2961 // duration of 201293, since there is not enough video appended for that).
2962 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
2963 }
2964
TEST_F(ChunkDemuxerTest,DurationChangeTimestampOffset)2965 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
2966 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2967 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
2968 EXPECT_CALL(host_, SetDuration(
2969 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2970 kVideoBlockDuration * 2)));
2971 AppendCluster(GenerateCluster(0, 4));
2972 }
2973
TEST_F(ChunkDemuxerTest,EndOfStreamTruncateDuration)2974 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
2975 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2976
2977 AppendCluster(kDefaultFirstCluster());
2978
2979 EXPECT_CALL(host_, SetDuration(
2980 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
2981 MarkEndOfStream(PIPELINE_OK);
2982 }
2983
2984
TEST_F(ChunkDemuxerTest,ZeroLengthAppend)2985 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
2986 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2987 AppendData(NULL, 0);
2988 }
2989
TEST_F(ChunkDemuxerTest,AppendAfterEndOfStream)2990 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
2991 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2992
2993 EXPECT_CALL(host_, SetDuration(_))
2994 .Times(AnyNumber());
2995
2996 AppendCluster(kDefaultFirstCluster());
2997 MarkEndOfStream(PIPELINE_OK);
2998
2999 demuxer_->UnmarkEndOfStream();
3000
3001 AppendCluster(kDefaultSecondCluster());
3002 MarkEndOfStream(PIPELINE_OK);
3003 }
3004
3005 // Test receiving a Shutdown() call before we get an Initialize()
3006 // call. This can happen if video element gets destroyed before
3007 // the pipeline has a chance to initialize the demuxer.
TEST_F(ChunkDemuxerTest,Shutdown_BeforeInitialize)3008 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3009 demuxer_->Shutdown();
3010 demuxer_->Initialize(
3011 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3012 message_loop_.RunUntilIdle();
3013 }
3014
3015 // Verifies that signaling end of stream while stalled at a gap
3016 // boundary does not trigger end of stream buffers to be returned.
TEST_F(ChunkDemuxerTest,EndOfStreamWhileWaitingForGapToBeFilled)3017 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3018 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3019
3020 AppendCluster(0, 10);
3021 AppendCluster(300, 10);
3022 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3023
3024 GenerateExpectedReads(0, 10);
3025
3026 bool audio_read_done = false;
3027 bool video_read_done = false;
3028 ReadAudio(base::Bind(&OnReadDone,
3029 base::TimeDelta::FromMilliseconds(138),
3030 &audio_read_done));
3031 ReadVideo(base::Bind(&OnReadDone,
3032 base::TimeDelta::FromMilliseconds(138),
3033 &video_read_done));
3034
3035 // Verify that the reads didn't complete
3036 EXPECT_FALSE(audio_read_done);
3037 EXPECT_FALSE(video_read_done);
3038
3039 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3040 MarkEndOfStream(PIPELINE_OK);
3041
3042 // Verify that the reads still haven't completed.
3043 EXPECT_FALSE(audio_read_done);
3044 EXPECT_FALSE(video_read_done);
3045
3046 demuxer_->UnmarkEndOfStream();
3047
3048 AppendCluster(138, 22);
3049
3050 message_loop_.RunUntilIdle();
3051
3052 CheckExpectedRanges(kSourceId, "{ [0,435) }");
3053
3054 // Verify that the reads have completed.
3055 EXPECT_TRUE(audio_read_done);
3056 EXPECT_TRUE(video_read_done);
3057
3058 // Read the rest of the buffers.
3059 GenerateExpectedReads(161, 171, 20);
3060
3061 // Verify that reads block because the append cleared the end of stream state.
3062 audio_read_done = false;
3063 video_read_done = false;
3064 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3065 &audio_read_done));
3066 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3067 &video_read_done));
3068
3069 // Verify that the reads don't complete.
3070 EXPECT_FALSE(audio_read_done);
3071 EXPECT_FALSE(video_read_done);
3072
3073 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3074 MarkEndOfStream(PIPELINE_OK);
3075
3076 EXPECT_TRUE(audio_read_done);
3077 EXPECT_TRUE(video_read_done);
3078 }
3079
TEST_F(ChunkDemuxerTest,CanceledSeekDuringInitialPreroll)3080 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3081 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3082
3083 // Cancel preroll.
3084 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3085 demuxer_->CancelPendingSeek(seek_time);
3086
3087 // Initiate the seek to the new location.
3088 Seek(seek_time);
3089
3090 // Append data to satisfy the seek.
3091 AppendCluster(seek_time.InMilliseconds(), 10);
3092 }
3093
TEST_F(ChunkDemuxerTest,GCDuringSeek)3094 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3095 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3096
3097 demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
3098
3099 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3100 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3101
3102 // Initiate a seek to |seek_time1|.
3103 Seek(seek_time1);
3104
3105 // Append data to satisfy the first seek request.
3106 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3107 seek_time1.InMilliseconds(), 5);
3108 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3109
3110 // Signal that the second seek is starting.
3111 demuxer_->StartWaitingForSeek(seek_time2);
3112
3113 // Append data to satisfy the second seek. This append triggers
3114 // the garbage collection logic since we set the memory limit to
3115 // 5 blocks.
3116 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3117 seek_time2.InMilliseconds(), 5);
3118
3119 // Verify that the buffers that cover |seek_time2| do not get
3120 // garbage collected.
3121 CheckExpectedRanges(kSourceId, "{ [500,615) }");
3122
3123 // Complete the seek.
3124 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3125
3126
3127 // Append more data and make sure that the blocks for |seek_time2|
3128 // don't get removed.
3129 //
3130 // NOTE: The current GC algorithm tries to preserve the GOP at the
3131 // current position as well as the last appended GOP. This is
3132 // why there are 2 ranges in the expectations.
3133 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3134 CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
3135 }
3136
TEST_F(ChunkDemuxerTest,AppendWindow_Video)3137 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3138 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3139 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3140
3141 // Set the append window to [50,280).
3142 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3143 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3144
3145 // Append a cluster that starts before and ends after the append window.
3146 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3147 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3148
3149 // Verify that GOPs that start outside the window are not included
3150 // in the buffer. Also verify that buffers that start inside the
3151 // window and extend beyond the end of the window are not included.
3152 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3153 CheckExpectedBuffers(stream, "120 150 180 210 240");
3154
3155 // Extend the append window to [50,650).
3156 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3157
3158 // Append more data and verify that adding buffers start at the next
3159 // keyframe.
3160 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3161 "360 390 420K 450 480 510 540K 570 600 630K");
3162 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3163 }
3164
TEST_F(ChunkDemuxerTest,AppendWindow_Audio)3165 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3166 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3167 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3168
3169 // Set the append window to [50,280).
3170 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3171 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3172
3173 // Append a cluster that starts before and ends after the append window.
3174 AppendSingleStreamCluster(
3175 kSourceId, kAudioTrackNum,
3176 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3177
3178 // Verify that frames that end outside the window are not included
3179 // in the buffer. Also verify that buffers that start inside the
3180 // window and extend beyond the end of the window are not included.
3181 //
3182 // The first 50ms of the range should be truncated since it overlaps
3183 // the start of the append window.
3184 CheckExpectedRanges(kSourceId, "{ [50,270) }");
3185
3186 // The "50P" buffer is the "0" buffer marked for complete discard. The next
3187 // "50" buffer is the "30" buffer marked with 20ms of start discard.
3188 CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
3189
3190 // Extend the append window to [50,650).
3191 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3192
3193 // Append more data and verify that a new range is created.
3194 AppendSingleStreamCluster(
3195 kSourceId, kAudioTrackNum,
3196 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3197 CheckExpectedRanges(kSourceId, "{ [50,270) [360,630) }");
3198 }
3199
TEST_F(ChunkDemuxerTest,AppendWindow_AudioOverlapStartAndEnd)3200 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3201 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3202
3203 // Set the append window to [10,20).
3204 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3205 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3206
3207 // Append a cluster that starts before and ends after the append window.
3208 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3209
3210 // Verify that everything is dropped in this case. No partial append should
3211 // be generated.
3212 CheckExpectedRanges(kSourceId, "{ }");
3213 }
3214
TEST_F(ChunkDemuxerTest,AppendWindow_WebMFile_AudioOnly)3215 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3216 EXPECT_CALL(*this, DemuxerOpened());
3217 demuxer_->Initialize(
3218 &host_,
3219 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3220 true);
3221 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3222
3223 // Set the append window to [50,150).
3224 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3225 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3226
3227 // Read a WebM file into memory and send the data to the demuxer. The chunk
3228 // size has been chosen carefully to ensure the preroll buffer used by the
3229 // partial append window trim must come from a previous Append() call.
3230 scoped_refptr<DecoderBuffer> buffer =
3231 ReadTestDataFile("bear-320x240-audio-only.webm");
3232 AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3233
3234 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3235 CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
3236 }
3237
TEST_F(ChunkDemuxerTest,AppendWindow_AudioConfigUpdateRemovesPreroll)3238 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3239 EXPECT_CALL(*this, DemuxerOpened());
3240 demuxer_->Initialize(
3241 &host_,
3242 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3243 true);
3244 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3245
3246 // Set the append window such that the first file is completely before the
3247 // append window.
3248 // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3249 // have the correct duration in their init segments, and the
3250 // CreateInitDoneCB() call, above, is fixed to used that duration. See
3251 // http://crbug.com/354284.
3252 const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3253 append_window_start_for_next_append_ = duration_1;
3254
3255 // Read a WebM file into memory and append the data.
3256 scoped_refptr<DecoderBuffer> buffer =
3257 ReadTestDataFile("bear-320x240-audio-only.webm");
3258 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3259 CheckExpectedRanges(kSourceId, "{ }");
3260
3261 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3262 AudioDecoderConfig config_1 = stream->audio_decoder_config();
3263
3264 // Read a second WebM with a different config in and append the data.
3265 scoped_refptr<DecoderBuffer> buffer2 =
3266 ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3267 EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3268 ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3269 AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3270 CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3271
3272 Seek(duration_1);
3273 ExpectConfigChanged(DemuxerStream::AUDIO);
3274 ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3275 CheckExpectedBuffers(stream, "2746 2767 2789 2810");
3276 }
3277
TEST_F(ChunkDemuxerTest,AppendWindow_Text)3278 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3279 DemuxerStream* text_stream = NULL;
3280 EXPECT_CALL(host_, AddTextStream(_, _))
3281 .WillOnce(SaveArg<0>(&text_stream));
3282 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3283 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3284
3285 // Set the append window to [20,280).
3286 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3287 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3288
3289 // Append a cluster that starts before and ends after the append
3290 // window.
3291 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3292 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3293 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
3294
3295 // Verify that text cues that start outside the window are not included
3296 // in the buffer. Also verify that cues that extend beyond the
3297 // window are not included.
3298 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3299 CheckExpectedBuffers(video_stream, "120 150 180 210 240");
3300 CheckExpectedBuffers(text_stream, "100");
3301
3302 // Extend the append window to [20,650).
3303 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3304
3305 // Append more data and verify that a new range is created.
3306 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3307 "360 390 420K 450 480 510 540K 570 600 630K");
3308 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
3309 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3310
3311 // Seek to the new range and verify that the expected buffers are returned.
3312 Seek(base::TimeDelta::FromMilliseconds(420));
3313 CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
3314 CheckExpectedBuffers(text_stream, "400 500");
3315 }
3316
TEST_F(ChunkDemuxerTest,StartWaitingForSeekAfterParseError)3317 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3318 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3319 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3320 AppendGarbage();
3321 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3322 demuxer_->StartWaitingForSeek(seek_time);
3323 }
3324
TEST_F(ChunkDemuxerTest,Remove_AudioVideoText)3325 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3326 DemuxerStream* text_stream = NULL;
3327 EXPECT_CALL(host_, AddTextStream(_, _))
3328 .WillOnce(SaveArg<0>(&text_stream));
3329 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3330
3331 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3332 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3333
3334 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3335 "0K 20K 40K 60K 80K 100K 120K 140K");
3336 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3337 "0K 30 60 90 120K 150 180");
3338 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
3339
3340 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3341 CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3342 CheckExpectedBuffers(text_stream, "0 100 200");
3343
3344 // Remove the buffers that were added.
3345 demuxer_->Remove(kSourceId, base::TimeDelta(),
3346 base::TimeDelta::FromMilliseconds(300));
3347
3348 // Verify that all the appended data has been removed.
3349 CheckExpectedRanges(kSourceId, "{ }");
3350
3351 // Append new buffers that are clearly different than the original
3352 // ones and verify that only the new buffers are returned.
3353 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3354 "1K 21K 41K 61K 81K 101K 121K 141K");
3355 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3356 "1K 31 61 91 121K 151 181");
3357 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
3358
3359 Seek(base::TimeDelta());
3360 CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3361 CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3362 CheckExpectedBuffers(text_stream, "1 101 201");
3363 }
3364
TEST_F(ChunkDemuxerTest,Remove_StartAtDuration)3365 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3366 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3367 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3368
3369 // Set the duration to something small so that the append that
3370 // follows updates the duration to reflect the end of the appended data.
3371 EXPECT_CALL(host_, SetDuration(
3372 base::TimeDelta::FromMilliseconds(1)));
3373 demuxer_->SetDuration(0.001);
3374
3375 EXPECT_CALL(host_, SetDuration(
3376 base::TimeDelta::FromMilliseconds(160)));
3377 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3378 "0K 20K 40K 60K 80K 100K 120K 140K");
3379
3380 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3381 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3382
3383 demuxer_->Remove(kSourceId,
3384 base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3385 kInfiniteDuration());
3386
3387 Seek(base::TimeDelta());
3388 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3389 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3390 }
3391
3392 // Verifies that a Seek() will complete without text cues for
3393 // the seek point and will return cues after the seek position
3394 // when they are eventually appended.
TEST_F(ChunkDemuxerTest,SeekCompletesWithoutTextCues)3395 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3396 DemuxerStream* text_stream = NULL;
3397 EXPECT_CALL(host_, AddTextStream(_, _))
3398 .WillOnce(SaveArg<0>(&text_stream));
3399 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3400
3401 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3402 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3403
3404 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3405 bool seek_cb_was_called = false;
3406 demuxer_->StartWaitingForSeek(seek_time);
3407 demuxer_->Seek(seek_time,
3408 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3409 message_loop_.RunUntilIdle();
3410
3411 EXPECT_FALSE(seek_cb_was_called);
3412
3413 bool text_read_done = false;
3414 text_stream->Read(base::Bind(&OnReadDone,
3415 base::TimeDelta::FromMilliseconds(125),
3416 &text_read_done));
3417
3418 // Append audio & video data so the seek completes.
3419 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3420 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
3421 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3422 "0K 30 60 90 120K 150 180 210");
3423
3424 message_loop_.RunUntilIdle();
3425 EXPECT_TRUE(seek_cb_was_called);
3426 EXPECT_FALSE(text_read_done);
3427
3428 // Read some audio & video buffers to further verify seek completion.
3429 CheckExpectedBuffers(audio_stream, "120 140");
3430 CheckExpectedBuffers(video_stream, "120 150");
3431
3432 EXPECT_FALSE(text_read_done);
3433
3434 // Append text cues that start after the seek point and verify that
3435 // they are returned by Read() calls.
3436 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
3437
3438 message_loop_.RunUntilIdle();
3439 EXPECT_TRUE(text_read_done);
3440
3441 // NOTE: we start at 175 here because the buffer at 125 was returned
3442 // to the pending read initiated above.
3443 CheckExpectedBuffers(text_stream, "175 225");
3444
3445 // Verify that audio & video streams continue to return expected values.
3446 CheckExpectedBuffers(audio_stream, "160 180");
3447 CheckExpectedBuffers(video_stream, "180 210");
3448 }
3449
TEST_F(ChunkDemuxerTest,ClusterWithUnknownSize)3450 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3451 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3452
3453 AppendCluster(GenerateCluster(0, 0, 4, true));
3454 CheckExpectedRanges(kSourceId, "{ [0,46) }");
3455
3456 // A new cluster indicates end of the previous cluster with unknown size.
3457 AppendCluster(GenerateCluster(46, 66, 5, true));
3458 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3459 }
3460
TEST_F(ChunkDemuxerTest,CuesBetweenClustersWithUnknownSize)3461 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3462 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3463
3464 // Add two clusters separated by Cues in a single Append() call.
3465 scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3466 std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3467 data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3468 cluster = GenerateCluster(46, 66, 5, true);
3469 data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3470 AppendData(&*data.begin(), data.size());
3471
3472 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3473 }
3474
TEST_F(ChunkDemuxerTest,CuesBetweenClusters)3475 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3476 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3477
3478 AppendCluster(GenerateCluster(0, 0, 4));
3479 AppendData(kCuesHeader, sizeof(kCuesHeader));
3480 AppendCluster(GenerateCluster(46, 66, 5));
3481 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3482 }
3483
3484 } // namespace media
3485