1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "MPEG2TSExtractor"
19
20 #include <inttypes.h>
21 #include <utils/Log.h>
22
23 #include <android-base/macros.h>
24
25 #include "MPEG2TSExtractor.h"
26
27 #include <media/IStreamSource.h>
28 #include <media/stagefright/foundation/ABuffer.h>
29 #include <media/stagefright/foundation/ADebug.h>
30 #include <media/stagefright/foundation/ALooper.h>
31 #include <media/stagefright/foundation/AUtils.h>
32 #include <media/stagefright/foundation/MediaKeys.h>
33 #include <media/stagefright/DataSourceBase.h>
34 #include <media/stagefright/MediaDefs.h>
35 #include <media/stagefright/MediaErrors.h>
36 #include <media/stagefright/MetaData.h>
37 #include <media/stagefright/Utils.h>
38 #include <mpeg2ts/AnotherPacketSource.h>
39 #include <utils/String8.h>
40
41 #include <hidl/HybridInterface.h>
42 #include <android/hardware/cas/1.0/ICas.h>
43
44 namespace android {
45
46 using hardware::cas::V1_0::ICas;
47
48 static const size_t kTSPacketSize = 188;
49 static const int kMaxDurationReadSize = 250000LL;
50 static const int kMaxDurationRetry = 6;
51
52 struct MPEG2TSSource : public MediaTrackHelper {
53 MPEG2TSSource(
54 MPEG2TSExtractor *extractor,
55 const sp<AnotherPacketSource> &impl,
56 bool doesSeek);
57 virtual ~MPEG2TSSource();
58
59 virtual media_status_t start();
60 virtual media_status_t stop();
61 virtual media_status_t getFormat(AMediaFormat *);
62
63 virtual media_status_t read(
64 MediaBufferHelper **buffer, const ReadOptions *options = NULL);
65
66 private:
67 MPEG2TSExtractor *mExtractor;
68 sp<AnotherPacketSource> mImpl;
69
70 // If there are both audio and video streams, only the video stream
71 // will signal seek on the extractor; otherwise the single stream will seek.
72 bool mDoesSeek;
73
74 DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSSource);
75 };
76
MPEG2TSSource(MPEG2TSExtractor * extractor,const sp<AnotherPacketSource> & impl,bool doesSeek)77 MPEG2TSSource::MPEG2TSSource(
78 MPEG2TSExtractor *extractor,
79 const sp<AnotherPacketSource> &impl,
80 bool doesSeek)
81 : mExtractor(extractor),
82 mImpl(impl),
83 mDoesSeek(doesSeek) {
84 }
85
~MPEG2TSSource()86 MPEG2TSSource::~MPEG2TSSource() {
87 }
88
start()89 media_status_t MPEG2TSSource::start() {
90 // initialize with one small buffer, but allow growth
91 mBufferGroup->init(1 /* one buffer */, 256 /* buffer size */, 64 /* max number of buffers */);
92
93 if (!mImpl->start(NULL)) { // AnotherPacketSource::start() doesn't use its argument
94 return AMEDIA_OK;
95 }
96 return AMEDIA_ERROR_UNKNOWN;
97 }
98
stop()99 media_status_t MPEG2TSSource::stop() {
100 if (!mImpl->stop()) {
101 return AMEDIA_OK;
102 }
103 return AMEDIA_ERROR_UNKNOWN;
104 }
105
copyAMessageToAMediaFormat(AMediaFormat * format,sp<AMessage> msg)106 void copyAMessageToAMediaFormat(AMediaFormat *format, sp<AMessage> msg) {
107 size_t numEntries = msg->countEntries();
108 for (size_t i = 0; i < numEntries; i++) {
109 AMessage::Type type;
110 const char *name = msg->getEntryNameAt(i, &type);
111 AMessage::ItemData id = msg->getEntryAt(i);
112
113 switch (type) {
114 case AMessage::kTypeInt32:
115 int32_t val32;
116 if (id.find(&val32)) {
117 AMediaFormat_setInt32(format, name, val32);
118 }
119 break;
120 case AMessage::kTypeInt64:
121 int64_t val64;
122 if (id.find(&val64)) {
123 AMediaFormat_setInt64(format, name, val64);
124 }
125 break;
126 case AMessage::kTypeFloat:
127 float valfloat;
128 if (id.find(&valfloat)) {
129 AMediaFormat_setFloat(format, name, valfloat);
130 }
131 break;
132 case AMessage::kTypeDouble:
133 double valdouble;
134 if (id.find(&valdouble)) {
135 AMediaFormat_setDouble(format, name, valdouble);
136 }
137 break;
138 case AMessage::kTypeString:
139 if (AString s; id.find(&s)) {
140 AMediaFormat_setString(format, name, s.c_str());
141 }
142 break;
143 case AMessage::kTypeBuffer:
144 {
145 sp<ABuffer> buffer;
146 if (id.find(&buffer)) {
147 AMediaFormat_setBuffer(format, name, buffer->data(), buffer->size());
148 }
149 break;
150 }
151 default:
152 ALOGW("ignoring unsupported type %d '%s'", type, name);
153 }
154 }
155 }
156
getFormat(AMediaFormat * meta)157 media_status_t MPEG2TSSource::getFormat(AMediaFormat *meta) {
158 sp<MetaData> implMeta = mImpl->getFormat();
159 sp<AMessage> msg;
160 convertMetaDataToMessage(implMeta, &msg);
161 copyAMessageToAMediaFormat(meta, msg);
162 return AMEDIA_OK;
163 }
164
read(MediaBufferHelper ** out,const ReadOptions * options)165 media_status_t MPEG2TSSource::read(
166 MediaBufferHelper **out, const ReadOptions *options) {
167 *out = NULL;
168
169 int64_t seekTimeUs;
170 ReadOptions::SeekMode seekMode;
171 if (mDoesSeek && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
172 // seek is needed
173 status_t err = mExtractor->seek(seekTimeUs, (ReadOptions::SeekMode)seekMode);
174 if (err == ERROR_END_OF_STREAM) {
175 return AMEDIA_ERROR_END_OF_STREAM;
176 } else if (err != OK) {
177 return AMEDIA_ERROR_UNKNOWN;
178 }
179 }
180
181 if (mExtractor->feedUntilBufferAvailable(mImpl) != OK) {
182 return AMEDIA_ERROR_END_OF_STREAM;
183 }
184
185 MediaBufferBase *mbuf = nullptr;
186 status_t err_read = mImpl->read(&mbuf, (MediaTrack::ReadOptions*) options);
187 if (mbuf == nullptr) {
188 ALOGE("Track::read: null buffer read from source");
189 return AMEDIA_ERROR_UNKNOWN;
190 }
191 if (err_read != OK) {
192 ALOGE("Track::read: no buffer read from source");
193 mbuf->release();
194 return AMEDIA_ERROR_UNKNOWN;
195 }
196
197 size_t length = mbuf->range_length();
198 MediaBufferHelper *outbuf = nullptr;
199 status_t err = mBufferGroup->acquire_buffer(&outbuf, false, length);
200 if (err != OK || outbuf == nullptr) {
201 ALOGE("read: no buffer");
202 mbuf->release();
203 return AMEDIA_ERROR_UNKNOWN;
204 }
205 memcpy(outbuf->data(), mbuf->data(), length);
206 outbuf->set_range(0, length);
207 *out = outbuf;
208 MetaDataBase &inMeta = mbuf->meta_data();
209 AMediaFormat *outMeta = outbuf->meta_data();
210 AMediaFormat_clear(outMeta);
211 int64_t val64;
212 if (inMeta.findInt64(kKeyTime, &val64)) {
213 AMediaFormat_setInt64(outMeta, AMEDIAFORMAT_KEY_TIME_US, val64);
214 }
215 int32_t val32;
216 if (inMeta.findInt32(kKeyIsSyncFrame, &val32)) {
217 AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, val32);
218 }
219 if (inMeta.findInt32(kKeyCryptoMode, &val32)) {
220 AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_CRYPTO_MODE, val32);
221 }
222 uint32_t bufType;
223 const void *bufData;
224 size_t bufSize;
225 if (inMeta.findData(kKeyCryptoIV, &bufType, &bufData, &bufSize)) {
226 AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_IV, bufData, bufSize);
227 }
228 if (inMeta.findData(kKeyCryptoKey, &bufType, &bufData, &bufSize)) {
229 AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_KEY, bufData, bufSize);
230 }
231 if (inMeta.findData(kKeyPlainSizes, &bufType, &bufData, &bufSize)) {
232 AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES, bufData, bufSize);
233 }
234 if (inMeta.findData(kKeyEncryptedSizes, &bufType, &bufData, &bufSize)) {
235 AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES, bufData, bufSize);
236 }
237 if (inMeta.findData(kKeySEI, &bufType, &bufData, &bufSize)) {
238 AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_SEI, bufData, bufSize);
239 }
240 if (inMeta.findData(kKeyAudioPresentationInfo, &bufType, &bufData, &bufSize)) {
241 AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO, bufData, bufSize);
242 }
243 mbuf->release();
244 return AMEDIA_OK;
245 }
246
247 ////////////////////////////////////////////////////////////////////////////////
248
MPEG2TSExtractor(DataSourceHelper * source)249 MPEG2TSExtractor::MPEG2TSExtractor(DataSourceHelper *source)
250 : mDataSource(source),
251 mParser(new ATSParser),
252 mLastSyncEvent(0),
253 mOffset(0) {
254 char header;
255 if (source->readAt(0, &header, 1) == 1 && header == 0x47) {
256 mHeaderSkip = 0;
257 } else {
258 mHeaderSkip = 4;
259 }
260 init();
261 }
262
~MPEG2TSExtractor()263 MPEG2TSExtractor::~MPEG2TSExtractor() {
264 delete mDataSource;
265 }
266
countTracks()267 size_t MPEG2TSExtractor::countTracks() {
268 return mSourceImpls.size();
269 }
270
getTrack(size_t index)271 MediaTrackHelper *MPEG2TSExtractor::getTrack(size_t index) {
272 if (index >= mSourceImpls.size()) {
273 return NULL;
274 }
275
276 // The seek reference track (video if present; audio otherwise) performs
277 // seek requests, while other tracks ignore requests.
278 return new MPEG2TSSource(this, mSourceImpls.editItemAt(index),
279 (mSeekSyncPoints == &mSyncPoints.editItemAt(index)));
280 }
281
getTrackMetaData(AMediaFormat * meta,size_t index,uint32_t)282 media_status_t MPEG2TSExtractor::getTrackMetaData(
283 AMediaFormat *meta,
284 size_t index, uint32_t /* flags */) {
285 if (meta == nullptr) {
286 return AMEDIA_ERROR_INVALID_PARAMETER;
287 }
288 sp<MetaData> implMeta = index < mSourceImpls.size()
289 ? mSourceImpls.editItemAt(index)->getFormat() : NULL;
290 if (implMeta == NULL) {
291 return AMEDIA_ERROR_UNKNOWN;
292 }
293 sp<AMessage> msg = new AMessage;
294 convertMetaDataToMessage(implMeta, &msg);
295 copyAMessageToAMediaFormat(meta, msg);
296 return AMEDIA_OK;
297 }
298
getMetaData(AMediaFormat * meta)299 media_status_t MPEG2TSExtractor::getMetaData(AMediaFormat *meta) {
300 AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
301 return AMEDIA_OK;
302 }
303
304 //static
isScrambledFormat(MetaDataBase & format)305 bool MPEG2TSExtractor::isScrambledFormat(MetaDataBase &format) {
306 const char *mime;
307 return format.findCString(kKeyMIMEType, &mime)
308 && (!strcasecmp(MEDIA_MIMETYPE_VIDEO_SCRAMBLED, mime)
309 || !strcasecmp(MEDIA_MIMETYPE_AUDIO_SCRAMBLED, mime));
310 }
311
setMediaCas(const uint8_t * casToken,size_t size)312 media_status_t MPEG2TSExtractor::setMediaCas(const uint8_t* casToken, size_t size) {
313 HalToken halToken;
314 halToken.setToExternal((uint8_t*)casToken, size);
315 sp<ICas> cas = ICas::castFrom(retrieveHalInterface(halToken));
316 ALOGD("setMediaCas: %p", cas.get());
317
318 status_t err = mParser->setMediaCas(cas);
319 if (err == OK) {
320 ALOGI("All tracks now have descramblers");
321 init();
322 return AMEDIA_OK;
323 }
324 return AMEDIA_ERROR_UNKNOWN;
325 }
326
findIndexOfSource(const sp<AnotherPacketSource> & impl,size_t * index)327 status_t MPEG2TSExtractor::findIndexOfSource(const sp<AnotherPacketSource> &impl, size_t *index) {
328 for (size_t i = 0; i < mSourceImpls.size(); i++) {
329 if (mSourceImpls[i] == impl) {
330 *index = i;
331 return OK;
332 }
333 }
334 return NAME_NOT_FOUND;
335 }
336
addSource(const sp<AnotherPacketSource> & impl)337 void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
338 size_t index;
339 if (findIndexOfSource(impl, &index) != OK) {
340 mSourceImpls.push(impl);
341 mSyncPoints.push();
342 }
343 }
344
init()345 void MPEG2TSExtractor::init() {
346 bool haveAudio = false;
347 bool haveVideo = false;
348 int64_t startTime = ALooper::GetNowUs();
349 size_t index;
350
351 status_t err;
352 while ((err = feedMore(true /* isInit */)) == OK
353 || err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
354 if (haveAudio && haveVideo) {
355 addSyncPoint_l(mLastSyncEvent);
356 mLastSyncEvent.reset();
357 break;
358 }
359 if (!haveVideo) {
360 sp<AnotherPacketSource> impl = mParser->getSource(ATSParser::VIDEO);
361
362 if (impl != NULL) {
363 sp<MetaData> format = impl->getFormat();
364 if (format != NULL) {
365 haveVideo = true;
366 addSource(impl);
367 if (!isScrambledFormat(*(format.get()))) {
368 if (findIndexOfSource(impl, &index) == OK) {
369 mSeekSyncPoints = &mSyncPoints.editItemAt(index);
370 }
371 }
372 }
373 }
374 }
375
376 if (!haveAudio) {
377 sp<AnotherPacketSource> impl = mParser->getSource(ATSParser::AUDIO);
378
379 if (impl != NULL) {
380 sp<MetaData> format = impl->getFormat();
381 if (format != NULL) {
382 haveAudio = true;
383 addSource(impl);
384 if (!isScrambledFormat(*(format.get())) && !haveVideo) {
385 if (findIndexOfSource(impl, &index) == OK) {
386 mSeekSyncPoints = &mSyncPoints.editItemAt(index);
387 }
388 }
389 }
390 }
391 }
392
393 addSyncPoint_l(mLastSyncEvent);
394 mLastSyncEvent.reset();
395
396 // ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED is returned when the mpeg2ts
397 // is scrambled but we don't have a MediaCas object set. The extraction
398 // will only continue when setMediaCas() is called successfully.
399 if (err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
400 ALOGI("stopped parsing scrambled content, "
401 "haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
402 haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
403 return;
404 }
405
406 // Wait only for 2 seconds to detect audio/video streams.
407 if (ALooper::GetNowUs() - startTime > 2000000LL) {
408 break;
409 }
410 }
411
412 off64_t size;
413 if (mDataSource->getSize(&size) == OK && (haveAudio || haveVideo)) {
414 size_t prevSyncSize = 1;
415 int64_t durationUs = -1;
416 List<int64_t> durations;
417 // Estimate duration --- stabilize until you get <500ms deviation.
418 while (feedMore() == OK
419 && ALooper::GetNowUs() - startTime <= 2000000LL) {
420 if (mSeekSyncPoints->size() > prevSyncSize) {
421 prevSyncSize = mSeekSyncPoints->size();
422 int64_t diffUs = mSeekSyncPoints->keyAt(prevSyncSize - 1)
423 - mSeekSyncPoints->keyAt(0);
424 off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1)
425 - mSeekSyncPoints->valueAt(0);
426 int64_t currentDurationUs = size * diffUs / diffOffset;
427 durations.push_back(currentDurationUs);
428 if (durations.size() > 5) {
429 durations.erase(durations.begin());
430 int64_t min = *durations.begin();
431 int64_t max = *durations.begin();
432 for (auto duration : durations) {
433 if (min > duration) {
434 min = duration;
435 }
436 if (max < duration) {
437 max = duration;
438 }
439 }
440 if (max - min < 500 * 1000) {
441 durationUs = currentDurationUs;
442 break;
443 }
444 }
445 }
446 }
447
448 bool found = false;
449 for (int i = 0; i < ATSParser::NUM_SOURCE_TYPES; ++i) {
450 ATSParser::SourceType type = static_cast<ATSParser::SourceType>(i);
451 sp<AnotherPacketSource> impl = mParser->getSource(type);
452 if (impl == NULL) {
453 continue;
454 }
455
456 int64_t trackDurationUs = durationUs;
457
458 status_t err;
459 int64_t bufferedDurationUs = impl->getBufferedDurationUs(&err);
460 if (err == ERROR_END_OF_STREAM) {
461 trackDurationUs = bufferedDurationUs;
462 }
463 if (trackDurationUs > 0) {
464 ALOGV("[SourceType%d] durationUs=%" PRId64 "", type, trackDurationUs);
465 const sp<MetaData> meta = impl->getFormat();
466 meta->setInt64(kKeyDuration, trackDurationUs);
467 impl->setFormat(meta);
468
469 found = true;
470 }
471 }
472 if (!found) {
473 estimateDurationsFromTimesUsAtEnd();
474 }
475 }
476
477 ALOGI("haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
478 haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
479 }
480
feedMore(bool isInit)481 status_t MPEG2TSExtractor::feedMore(bool isInit) {
482 Mutex::Autolock autoLock(mLock);
483
484 uint8_t packet[kTSPacketSize];
485 ssize_t n = mDataSource->readAt(mOffset + mHeaderSkip, packet, kTSPacketSize);
486
487 if (n < (ssize_t)kTSPacketSize) {
488 if (n >= 0) {
489 mParser->signalEOS(ERROR_END_OF_STREAM);
490 }
491 return (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
492 }
493
494 ATSParser::SyncEvent event(mOffset);
495 mOffset += mHeaderSkip + n;
496 status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
497 if (event.hasReturnedData()) {
498 if (isInit) {
499 mLastSyncEvent = event;
500 } else {
501 addSyncPoint_l(event);
502 }
503 }
504 return err;
505 }
506
addSyncPoint_l(const ATSParser::SyncEvent & event)507 void MPEG2TSExtractor::addSyncPoint_l(const ATSParser::SyncEvent &event) {
508 if (!event.hasReturnedData()) {
509 return;
510 }
511
512 for (size_t i = 0; i < mSourceImpls.size(); ++i) {
513 if (mSourceImpls[i].get() == event.getMediaSource().get()) {
514 KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
515 syncPoints->add(event.getTimeUs(), event.getOffset());
516 // We're keeping the size of the sync points at most 5mb per a track.
517 size_t size = syncPoints->size();
518 if (size >= 327680) {
519 int64_t firstTimeUs = syncPoints->keyAt(0);
520 int64_t lastTimeUs = syncPoints->keyAt(size - 1);
521 if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
522 syncPoints->removeItemsAt(0, 4096);
523 } else {
524 syncPoints->removeItemsAt(size - 4096, 4096);
525 }
526 }
527 break;
528 }
529 }
530 }
531
estimateDurationsFromTimesUsAtEnd()532 status_t MPEG2TSExtractor::estimateDurationsFromTimesUsAtEnd() {
533 if (!(mDataSource->flags() & DataSourceBase::kIsLocalFileSource)) {
534 return ERROR_UNSUPPORTED;
535 }
536
537 off64_t size = 0;
538 status_t err = mDataSource->getSize(&size);
539 if (err != OK) {
540 return err;
541 }
542
543 uint8_t packet[kTSPacketSize];
544 const off64_t zero = 0;
545 off64_t offset = max(zero, size - kMaxDurationReadSize);
546 if (mDataSource->readAt(offset, &packet, 0) < 0) {
547 return ERROR_IO;
548 }
549
550 int retry = 0;
551 bool allDurationsFound = false;
552 int64_t timeAnchorUs = mParser->getFirstPTSTimeUs();
553 do {
554 int bytesRead = 0;
555 sp<ATSParser> parser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
556 ATSParser::SyncEvent ev(0);
557 offset = max(zero, size - (kMaxDurationReadSize << retry));
558 offset = (offset / kTSPacketSize) * kTSPacketSize;
559 for (;;) {
560 if (bytesRead >= kMaxDurationReadSize << max(0, retry - 1)) {
561 break;
562 }
563
564 ssize_t n = mDataSource->readAt(offset+mHeaderSkip, packet, kTSPacketSize);
565 if (n < 0) {
566 return n;
567 } else if (n < (ssize_t)kTSPacketSize) {
568 break;
569 }
570
571 offset += kTSPacketSize + mHeaderSkip;
572 bytesRead += kTSPacketSize + mHeaderSkip;
573 err = parser->feedTSPacket(packet, kTSPacketSize, &ev);
574 if (err != OK) {
575 return err;
576 }
577
578 if (ev.hasReturnedData()) {
579 int64_t durationUs = ev.getTimeUs();
580 ATSParser::SourceType type = ev.getType();
581 ev.reset();
582
583 int64_t firstTimeUs;
584 sp<AnotherPacketSource> src = mParser->getSource(type);
585 if (src == NULL || src->nextBufferTime(&firstTimeUs) != OK) {
586 continue;
587 }
588 durationUs += src->getEstimatedBufferDurationUs();
589 durationUs -= timeAnchorUs;
590 durationUs -= firstTimeUs;
591 if (durationUs > 0) {
592 int64_t origDurationUs, lastDurationUs;
593 const sp<MetaData> meta = src->getFormat();
594 const uint32_t kKeyLastDuration = 'ldur';
595 // Require two consecutive duration calculations to be within 1 sec before
596 // updating; use MetaData to store previous duration estimate in per-stream
597 // context.
598 if (!meta->findInt64(kKeyDuration, &origDurationUs)
599 || !meta->findInt64(kKeyLastDuration, &lastDurationUs)
600 || (origDurationUs < durationUs
601 && abs(durationUs - lastDurationUs) < 60000000)) {
602 meta->setInt64(kKeyDuration, durationUs);
603 }
604 meta->setInt64(kKeyLastDuration, durationUs);
605 }
606 }
607 }
608
609 if (!allDurationsFound) {
610 allDurationsFound = true;
611 for (auto t: {ATSParser::VIDEO, ATSParser::AUDIO}) {
612 sp<AnotherPacketSource> src = mParser->getSource(t);
613 if (src == NULL) {
614 continue;
615 }
616 int64_t durationUs;
617 const sp<MetaData> meta = src->getFormat();
618 if (!meta->findInt64(kKeyDuration, &durationUs)) {
619 allDurationsFound = false;
620 break;
621 }
622 }
623 }
624
625 ++retry;
626 } while(!allDurationsFound && offset > 0 && retry <= kMaxDurationRetry);
627
628 return allDurationsFound? OK : ERROR_UNSUPPORTED;
629 }
630
flags() const631 uint32_t MPEG2TSExtractor::flags() const {
632 return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
633 }
634
seek(int64_t seekTimeUs,const MediaTrackHelper::ReadOptions::SeekMode & seekMode)635 status_t MPEG2TSExtractor::seek(int64_t seekTimeUs,
636 const MediaTrackHelper::ReadOptions::SeekMode &seekMode) {
637 if (mSeekSyncPoints == NULL || mSeekSyncPoints->isEmpty()) {
638 ALOGW("No sync point to seek to.");
639 // ... and therefore we have nothing useful to do here.
640 return OK;
641 }
642
643 // Determine whether we're seeking beyond the known area.
644 bool shouldSeekBeyond =
645 (seekTimeUs > mSeekSyncPoints->keyAt(mSeekSyncPoints->size() - 1));
646
647 // Determine the sync point to seek.
648 size_t index = 0;
649 for (; index < mSeekSyncPoints->size(); ++index) {
650 int64_t timeUs = mSeekSyncPoints->keyAt(index);
651 if (timeUs > seekTimeUs) {
652 break;
653 }
654 }
655
656 switch (seekMode) {
657 case MediaTrackHelper::ReadOptions::SEEK_NEXT_SYNC:
658 if (index == mSeekSyncPoints->size()) {
659 ALOGW("Next sync not found; starting from the latest sync.");
660 --index;
661 }
662 break;
663 case MediaTrackHelper::ReadOptions::SEEK_CLOSEST_SYNC:
664 case MediaTrackHelper::ReadOptions::SEEK_CLOSEST:
665 ALOGW("seekMode not supported: %d; falling back to PREVIOUS_SYNC",
666 seekMode);
667 FALLTHROUGH_INTENDED;
668 case MediaTrackHelper::ReadOptions::SEEK_PREVIOUS_SYNC:
669 if (index == 0) {
670 ALOGW("Previous sync not found; starting from the earliest "
671 "sync.");
672 } else {
673 --index;
674 }
675 break;
676 default:
677 return ERROR_UNSUPPORTED;
678 }
679 if (!shouldSeekBeyond || mOffset <= mSeekSyncPoints->valueAt(index)) {
680 int64_t actualSeekTimeUs = mSeekSyncPoints->keyAt(index);
681 mOffset = mSeekSyncPoints->valueAt(index);
682 status_t err = queueDiscontinuityForSeek(actualSeekTimeUs);
683 if (err != OK) {
684 return err;
685 }
686 }
687
688 if (shouldSeekBeyond) {
689 status_t err = seekBeyond(seekTimeUs);
690 if (err != OK) {
691 return err;
692 }
693 }
694
695 // Fast-forward to sync frame.
696 for (size_t i = 0; i < mSourceImpls.size(); ++i) {
697 const sp<AnotherPacketSource> &impl = mSourceImpls[i];
698 status_t err;
699 feedUntilBufferAvailable(impl);
700 while (impl->hasBufferAvailable(&err)) {
701 sp<AMessage> meta = impl->getMetaAfterLastDequeued(0);
702 sp<ABuffer> buffer;
703 if (meta == NULL) {
704 return UNKNOWN_ERROR;
705 }
706 int32_t sync;
707 if (meta->findInt32("isSync", &sync) && sync) {
708 break;
709 }
710 err = impl->dequeueAccessUnit(&buffer);
711 if (err != OK) {
712 return err;
713 }
714 feedUntilBufferAvailable(impl);
715 }
716 }
717
718 return OK;
719 }
720
queueDiscontinuityForSeek(int64_t actualSeekTimeUs)721 status_t MPEG2TSExtractor::queueDiscontinuityForSeek(int64_t actualSeekTimeUs) {
722 // Signal discontinuity
723 sp<AMessage> extra(new AMessage);
724 extra->setInt64(kATSParserKeyMediaTimeUs, actualSeekTimeUs);
725 mParser->signalDiscontinuity(ATSParser::DISCONTINUITY_TIME, extra);
726
727 // After discontinuity, impl should only have discontinuities
728 // with the last being what we queued. Dequeue them all here.
729 for (size_t i = 0; i < mSourceImpls.size(); ++i) {
730 const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
731 sp<ABuffer> buffer;
732 status_t err;
733 while (impl->hasBufferAvailable(&err)) {
734 if (err != OK) {
735 return err;
736 }
737 err = impl->dequeueAccessUnit(&buffer);
738 // If the source contains anything but discontinuity, that's
739 // a programming mistake.
740 CHECK(err == INFO_DISCONTINUITY);
741 }
742 }
743
744 // Feed until we have a buffer for each source.
745 for (size_t i = 0; i < mSourceImpls.size(); ++i) {
746 const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
747 sp<ABuffer> buffer;
748 status_t err = feedUntilBufferAvailable(impl);
749 if (err != OK) {
750 return err;
751 }
752 }
753
754 return OK;
755 }
756
seekBeyond(int64_t seekTimeUs)757 status_t MPEG2TSExtractor::seekBeyond(int64_t seekTimeUs) {
758 // If we're seeking beyond where we know --- read until we reach there.
759 size_t syncPointsSize = mSeekSyncPoints->size();
760
761 while (seekTimeUs > mSeekSyncPoints->keyAt(
762 mSeekSyncPoints->size() - 1)) {
763 status_t err;
764 if (syncPointsSize < mSeekSyncPoints->size()) {
765 syncPointsSize = mSeekSyncPoints->size();
766 int64_t syncTimeUs = mSeekSyncPoints->keyAt(syncPointsSize - 1);
767 // Dequeue buffers before sync point in order to avoid too much
768 // cache building up.
769 sp<ABuffer> buffer;
770 for (size_t i = 0; i < mSourceImpls.size(); ++i) {
771 const sp<AnotherPacketSource> &impl = mSourceImpls[i];
772 int64_t timeUs;
773 while ((err = impl->nextBufferTime(&timeUs)) == OK) {
774 if (timeUs < syncTimeUs) {
775 impl->dequeueAccessUnit(&buffer);
776 } else {
777 break;
778 }
779 }
780 if (err != OK && err != -EWOULDBLOCK) {
781 return err;
782 }
783 }
784 }
785 if (feedMore() != OK) {
786 return ERROR_END_OF_STREAM;
787 }
788 }
789
790 return OK;
791 }
792
feedUntilBufferAvailable(const sp<AnotherPacketSource> & impl)793 status_t MPEG2TSExtractor::feedUntilBufferAvailable(
794 const sp<AnotherPacketSource> &impl) {
795 status_t finalResult;
796 while (!impl->hasBufferAvailable(&finalResult)) {
797 if (finalResult != OK) {
798 return finalResult;
799 }
800
801 status_t err = feedMore();
802 if (err != OK) {
803 impl->signalEOS(err);
804 }
805 }
806 return OK;
807 }
808
809 ////////////////////////////////////////////////////////////////////////////////
810
SniffMPEG2TS(DataSourceHelper * source,float * confidence)811 bool SniffMPEG2TS(DataSourceHelper *source, float *confidence) {
812 for (int i = 0; i < 5; ++i) {
813 char header;
814 if (source->readAt(kTSPacketSize * i, &header, 1) != 1
815 || header != 0x47) {
816 // not ts file, check if m2ts file
817 for (int j = 0; j < 5; ++j) {
818 char headers[5];
819 if (source->readAt((kTSPacketSize + 4) * j, &headers, 5) != 5
820 || headers[4] != 0x47) {
821 // not m2ts file too, return
822 return false;
823 }
824 }
825 ALOGV("this is m2ts file\n");
826 break;
827 }
828 }
829
830 *confidence = 0.1f;
831
832 return true;
833 }
834
835 } // namespace android
836