1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define USE_LOG SLAndroidLogLevel_Verbose
18
19 #include "sles_allinclusive.h"
20 #include "android/android_AudioSfDecoder.h"
21
22 #include <binder/IServiceManager.h>
23 #include <media/stagefright/foundation/ADebug.h>
24
25
26 #define SIZE_CACHED_HIGH_BYTES 1000000
27 #define SIZE_CACHED_MED_BYTES 700000
28 #define SIZE_CACHED_LOW_BYTES 400000
29
30 namespace android {
31
32 //--------------------------------------------------------------------------------------------------
AudioSfDecoder(const AudioPlayback_Parameters * params)33 AudioSfDecoder::AudioSfDecoder(const AudioPlayback_Parameters* params) : GenericPlayer(params),
34 mDataSource(0),
35 mAudioSource(0),
36 mAudioSourceStarted(false),
37 mBitrate(-1),
38 mDurationUsec(ANDROID_UNKNOWN_TIME),
39 mDecodeBuffer(NULL),
40 mSeekTimeMsec(0),
41 // play event logic depends on the initial time being zero not ANDROID_UNKNOWN_TIME
42 mLastDecodedPositionUs(0)
43 {
44 SL_LOGD("AudioSfDecoder::AudioSfDecoder()");
45 }
46
47
~AudioSfDecoder()48 AudioSfDecoder::~AudioSfDecoder() {
49 SL_LOGD("AudioSfDecoder::~AudioSfDecoder()");
50 }
51
52
preDestroy()53 void AudioSfDecoder::preDestroy() {
54 GenericPlayer::preDestroy();
55 SL_LOGD("AudioSfDecoder::preDestroy()");
56 {
57 Mutex::Autolock _l(mBufferSourceLock);
58
59 if (NULL != mDecodeBuffer) {
60 mDecodeBuffer->release();
61 mDecodeBuffer = NULL;
62 }
63
64 if ((mAudioSource != 0) && mAudioSourceStarted) {
65 mAudioSource->stop();
66 mAudioSourceStarted = false;
67 }
68 }
69 }
70
71
72 //--------------------------------------------------
play()73 void AudioSfDecoder::play() {
74 SL_LOGD("AudioSfDecoder::play");
75
76 GenericPlayer::play();
77 (new AMessage(kWhatDecode, id()))->post();
78 }
79
80
getPositionMsec(int * msec)81 void AudioSfDecoder::getPositionMsec(int* msec) {
82 int64_t timeUsec = getPositionUsec();
83 if (timeUsec == ANDROID_UNKNOWN_TIME) {
84 *msec = ANDROID_UNKNOWN_TIME;
85 } else {
86 *msec = timeUsec / 1000;
87 }
88 }
89
90
91 //--------------------------------------------------
getPcmFormatKeyCount() const92 uint32_t AudioSfDecoder::getPcmFormatKeyCount() const {
93 return NB_PCMMETADATA_KEYS;
94 }
95
96
97 //--------------------------------------------------
getPcmFormatKeySize(uint32_t index,uint32_t * pKeySize)98 bool AudioSfDecoder::getPcmFormatKeySize(uint32_t index, uint32_t* pKeySize) {
99 if (index >= NB_PCMMETADATA_KEYS) {
100 return false;
101 } else {
102 *pKeySize = strlen(kPcmDecodeMetadataKeys[index]) +1;
103 return true;
104 }
105 }
106
107
108 //--------------------------------------------------
getPcmFormatKeyName(uint32_t index,uint32_t keySize,char * keyName)109 bool AudioSfDecoder::getPcmFormatKeyName(uint32_t index, uint32_t keySize, char* keyName) {
110 uint32_t actualKeySize;
111 if (!getPcmFormatKeySize(index, &actualKeySize)) {
112 return false;
113 }
114 if (keySize < actualKeySize) {
115 return false;
116 }
117 strncpy(keyName, kPcmDecodeMetadataKeys[index], actualKeySize);
118 return true;
119 }
120
121
122 //--------------------------------------------------
getPcmFormatValueSize(uint32_t index,uint32_t * pValueSize)123 bool AudioSfDecoder::getPcmFormatValueSize(uint32_t index, uint32_t* pValueSize) {
124 if (index >= NB_PCMMETADATA_KEYS) {
125 *pValueSize = 0;
126 return false;
127 } else {
128 *pValueSize = sizeof(uint32_t);
129 return true;
130 }
131 }
132
133
134 //--------------------------------------------------
getPcmFormatKeyValue(uint32_t index,uint32_t size,uint32_t * pValue)135 bool AudioSfDecoder::getPcmFormatKeyValue(uint32_t index, uint32_t size, uint32_t* pValue) {
136 uint32_t valueSize = 0;
137 if (!getPcmFormatValueSize(index, &valueSize)) {
138 return false;
139 } else if (size != valueSize) {
140 // this ensures we are accessing mPcmFormatValues with a valid size for that index
141 SL_LOGE("Error retrieving metadata value at index %d: using size of %d, should be %d",
142 index, size, valueSize);
143 return false;
144 } else {
145 android::Mutex::Autolock autoLock(mPcmFormatLock);
146 *pValue = mPcmFormatValues[index];
147 return true;
148 }
149 }
150
151
152 //--------------------------------------------------
153 // Event handlers
154 // it is strictly verboten to call those methods outside of the event loop
155
156 // Initializes the data and audio sources, and update the PCM format info
157 // post-condition: upon successful initialization based on the player data locator
158 // GenericPlayer::onPrepare() was called
159 // mDataSource != 0
160 // mAudioSource != 0
161 // mAudioSourceStarted == true
162 // All error returns from this method are via notifyPrepared(status) followed by "return".
onPrepare()163 void AudioSfDecoder::onPrepare() {
164 SL_LOGD("AudioSfDecoder::onPrepare()");
165 Mutex::Autolock _l(mBufferSourceLock);
166
167 {
168 android::Mutex::Autolock autoLock(mPcmFormatLock);
169 // Initialize the PCM format info with the known parameters before the start of the decode
170 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_BITSPERSAMPLE] = SL_PCMSAMPLEFORMAT_FIXED_16;
171 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_CONTAINERSIZE] = 16;
172 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_ENDIANNESS] = SL_BYTEORDER_LITTLEENDIAN;
173 // initialization with the default values: they will be replaced by the actual values
174 // once the decoder has figured them out
175 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_NUMCHANNELS] = UNKNOWN_NUMCHANNELS;
176 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_SAMPLERATE] = UNKNOWN_SAMPLERATE;
177 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_CHANNELMASK] = UNKNOWN_CHANNELMASK;
178 }
179
180 //---------------------------------
181 // Instantiate and initialize the data source for the decoder
182 sp<DataSource> dataSource;
183
184 switch (mDataLocatorType) {
185
186 case kDataLocatorNone:
187 SL_LOGE("AudioSfDecoder::onPrepare: no data locator set");
188 notifyPrepared(MEDIA_ERROR_BASE);
189 return;
190
191 case kDataLocatorUri:
192 dataSource = DataSource::CreateFromURI(mDataLocator.uriRef);
193 if (dataSource == NULL) {
194 SL_LOGE("AudioSfDecoder::onPrepare(): Error opening %s", mDataLocator.uriRef);
195 notifyPrepared(MEDIA_ERROR_BASE);
196 return;
197 }
198 break;
199
200 case kDataLocatorFd:
201 {
202 // As FileSource unconditionally takes ownership of the fd and closes it, then
203 // we have to make a dup for FileSource if the app wants to keep ownership itself
204 int fd = mDataLocator.fdi.fd;
205 if (mDataLocator.fdi.mCloseAfterUse) {
206 mDataLocator.fdi.mCloseAfterUse = false;
207 } else {
208 fd = ::dup(fd);
209 }
210 dataSource = new FileSource(fd, mDataLocator.fdi.offset, mDataLocator.fdi.length);
211 status_t err = dataSource->initCheck();
212 if (err != OK) {
213 notifyPrepared(err);
214 return;
215 }
216 break;
217 }
218
219 // AndroidBufferQueue data source is handled by a subclass,
220 // which does not call up to this method. Hence, the missing case.
221 default:
222 TRESPASS();
223 }
224
225 //---------------------------------
226 // Instantiate and initialize the decoder attached to the data source
227 sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
228 if (extractor == NULL) {
229 SL_LOGE("AudioSfDecoder::onPrepare: Could not instantiate extractor.");
230 notifyPrepared(ERROR_UNSUPPORTED);
231 return;
232 }
233
234 ssize_t audioTrackIndex = -1;
235 bool isRawAudio = false;
236 for (size_t i = 0; i < extractor->countTracks(); ++i) {
237 sp<MetaData> meta = extractor->getTrackMetaData(i);
238
239 const char *mime;
240 CHECK(meta->findCString(kKeyMIMEType, &mime));
241
242 if (!strncasecmp("audio/", mime, 6)) {
243 if (isSupportedCodec(mime)) {
244 audioTrackIndex = i;
245
246 if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime)) {
247 isRawAudio = true;
248 }
249 break;
250 }
251 }
252 }
253
254 if (audioTrackIndex < 0) {
255 SL_LOGE("AudioSfDecoder::onPrepare: Could not find a supported audio track.");
256 notifyPrepared(ERROR_UNSUPPORTED);
257 return;
258 }
259
260 sp<MediaSource> source = extractor->getTrack(audioTrackIndex);
261 sp<MetaData> meta = source->getFormat();
262
263 // we can't trust the OMXCodec (if there is one) to issue a INFO_FORMAT_CHANGED so we want
264 // to have some meaningful values as soon as possible.
265 int32_t channelCount;
266 bool hasChannelCount = meta->findInt32(kKeyChannelCount, &channelCount);
267 int32_t sr;
268 bool hasSampleRate = meta->findInt32(kKeySampleRate, &sr);
269
270 // first compute the duration
271 off64_t size;
272 int64_t durationUs;
273 int32_t durationMsec;
274 if (dataSource->getSize(&size) == OK
275 && meta->findInt64(kKeyDuration, &durationUs)) {
276 if (durationUs != 0) {
277 mBitrate = size * 8000000ll / durationUs; // in bits/sec
278 } else {
279 mBitrate = -1;
280 }
281 mDurationUsec = durationUs;
282 durationMsec = durationUs / 1000;
283 } else {
284 mBitrate = -1;
285 mDurationUsec = ANDROID_UNKNOWN_TIME;
286 durationMsec = ANDROID_UNKNOWN_TIME;
287 }
288
289 // then assign the duration under the settings lock
290 {
291 Mutex::Autolock _l(mSettingsLock);
292 mDurationMsec = durationMsec;
293 }
294
295 // the audio content is not raw PCM, so we need a decoder
296 if (!isRawAudio) {
297 OMXClient client;
298 CHECK_EQ(client.connect(), (status_t)OK);
299
300 source = OMXCodec::Create(
301 client.interface(), meta, false /* createEncoder */,
302 source);
303
304 if (source == NULL) {
305 SL_LOGE("AudioSfDecoder::onPrepare: Could not instantiate decoder.");
306 notifyPrepared(ERROR_UNSUPPORTED);
307 return;
308 }
309
310 meta = source->getFormat();
311 }
312
313
314 if (source->start() != OK) {
315 SL_LOGE("AudioSfDecoder::onPrepare: Failed to start source/decoder.");
316 notifyPrepared(MEDIA_ERROR_BASE);
317 return;
318 }
319
320 //---------------------------------
321 // The data source, and audio source (a decoder if required) are ready to be used
322 mDataSource = dataSource;
323 mAudioSource = source;
324 mAudioSourceStarted = true;
325
326 if (!hasChannelCount) {
327 CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
328 }
329
330 if (!hasSampleRate) {
331 CHECK(meta->findInt32(kKeySampleRate, &sr));
332 }
333 // FIXME add code below once channel mask support is in, currently initialized to default
334 // value computed from the channel count
335 // if (!hasChannelMask) {
336 // CHECK(meta->findInt32(kKeyChannelMask, &channelMask));
337 // }
338
339 if (!wantPrefetch()) {
340 SL_LOGV("AudioSfDecoder::onPrepare: no need to prefetch");
341 // doesn't need prefetching, notify good to go
342 mCacheStatus = kStatusHigh;
343 mCacheFill = 1000;
344 notifyStatus();
345 notifyCacheFill();
346 }
347
348 {
349 android::Mutex::Autolock autoLock(mPcmFormatLock);
350 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_SAMPLERATE] = sr;
351 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_NUMCHANNELS] = channelCount;
352 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_CHANNELMASK] =
353 channelCountToMask(channelCount);
354 }
355
356 // at this point we have enough information about the source to create the sink that
357 // will consume the data
358 createAudioSink();
359
360 // signal successful completion of prepare
361 mStateFlags |= kFlagPrepared;
362
363 GenericPlayer::onPrepare();
364 SL_LOGD("AudioSfDecoder::onPrepare() done, mStateFlags=0x%x", mStateFlags);
365 }
366
367
onPause()368 void AudioSfDecoder::onPause() {
369 SL_LOGV("AudioSfDecoder::onPause()");
370 GenericPlayer::onPause();
371 pauseAudioSink();
372 }
373
374
onPlay()375 void AudioSfDecoder::onPlay() {
376 SL_LOGV("AudioSfDecoder::onPlay()");
377 GenericPlayer::onPlay();
378 startAudioSink();
379 }
380
381
onSeek(const sp<AMessage> & msg)382 void AudioSfDecoder::onSeek(const sp<AMessage> &msg) {
383 SL_LOGV("AudioSfDecoder::onSeek");
384 int64_t timeMsec;
385 CHECK(msg->findInt64(WHATPARAM_SEEK_SEEKTIME_MS, &timeMsec));
386
387 Mutex::Autolock _l(mTimeLock);
388 mStateFlags |= kFlagSeeking;
389 mSeekTimeMsec = timeMsec;
390 // don't set mLastDecodedPositionUs to ANDROID_UNKNOWN_TIME; getPositionUsec
391 // ignores mLastDecodedPositionUs while seeking, and substitutes the seek goal instead
392
393 // nop for now
394 GenericPlayer::onSeek(msg);
395 }
396
397
onLoop(const sp<AMessage> & msg)398 void AudioSfDecoder::onLoop(const sp<AMessage> &msg) {
399 SL_LOGV("AudioSfDecoder::onLoop");
400 int32_t loop;
401 CHECK(msg->findInt32(WHATPARAM_LOOP_LOOPING, &loop));
402
403 if (loop) {
404 //SL_LOGV("AudioSfDecoder::onLoop start looping");
405 mStateFlags |= kFlagLooping;
406 } else {
407 //SL_LOGV("AudioSfDecoder::onLoop stop looping");
408 mStateFlags &= ~kFlagLooping;
409 }
410
411 // nop for now
412 GenericPlayer::onLoop(msg);
413 }
414
415
onCheckCache(const sp<AMessage> & msg)416 void AudioSfDecoder::onCheckCache(const sp<AMessage> &msg) {
417 //SL_LOGV("AudioSfDecoder::onCheckCache");
418 bool eos;
419 CacheStatus_t status = getCacheRemaining(&eos);
420
421 if (eos || status == kStatusHigh
422 || ((mStateFlags & kFlagPreparing) && (status >= kStatusEnough))) {
423 if (mStateFlags & kFlagPlaying) {
424 startAudioSink();
425 }
426 mStateFlags &= ~kFlagBuffering;
427
428 SL_LOGV("AudioSfDecoder::onCheckCache: buffering done.");
429
430 if (mStateFlags & kFlagPreparing) {
431 //SL_LOGV("AudioSfDecoder::onCheckCache: preparation done.");
432 mStateFlags &= ~kFlagPreparing;
433 }
434
435 if (mStateFlags & kFlagPlaying) {
436 (new AMessage(kWhatDecode, id()))->post();
437 }
438 return;
439 }
440
441 msg->post(100000);
442 }
443
444
onDecode()445 void AudioSfDecoder::onDecode() {
446 SL_LOGV("AudioSfDecoder::onDecode");
447
448 //-------------------------------- Need to buffer some more before decoding?
449 bool eos;
450 if (mDataSource == 0) {
451 // application set play state to paused which failed, then set play state to playing
452 return;
453 }
454
455 if (wantPrefetch()
456 && (getCacheRemaining(&eos) == kStatusLow)
457 && !eos) {
458 SL_LOGV("buffering more.");
459
460 if (mStateFlags & kFlagPlaying) {
461 pauseAudioSink();
462 }
463 mStateFlags |= kFlagBuffering;
464 (new AMessage(kWhatCheckCache, id()))->post(100000);
465 return;
466 }
467
468 if (!(mStateFlags & (kFlagPlaying | kFlagBuffering | kFlagPreparing))) {
469 // don't decode if we're not buffering, prefetching or playing
470 //SL_LOGV("don't decode: not buffering, prefetching or playing");
471 return;
472 }
473
474 //-------------------------------- Decode
475 status_t err;
476 MediaSource::ReadOptions readOptions;
477 if (mStateFlags & kFlagSeeking) {
478 assert(mSeekTimeMsec != ANDROID_UNKNOWN_TIME);
479 readOptions.setSeekTo(mSeekTimeMsec * 1000);
480 }
481
482 int64_t timeUsec = ANDROID_UNKNOWN_TIME;
483 {
484 Mutex::Autolock _l(mBufferSourceLock);
485
486 if (NULL != mDecodeBuffer) {
487 // the current decoded buffer hasn't been rendered, drop it
488 mDecodeBuffer->release();
489 mDecodeBuffer = NULL;
490 }
491 if (!mAudioSourceStarted) {
492 return;
493 }
494 err = mAudioSource->read(&mDecodeBuffer, &readOptions);
495 if (err == OK) {
496 // FIXME workaround apparent bug in AAC decoder: kKeyTime is 3 frames old if length is 0
497 if (mDecodeBuffer->range_length() == 0) {
498 timeUsec = ANDROID_UNKNOWN_TIME;
499 } else {
500 CHECK(mDecodeBuffer->meta_data()->findInt64(kKeyTime, &timeUsec));
501 }
502 } else {
503 // errors are handled below
504 }
505 }
506
507 {
508 Mutex::Autolock _l(mTimeLock);
509 if (mStateFlags & kFlagSeeking) {
510 mStateFlags &= ~kFlagSeeking;
511 mSeekTimeMsec = ANDROID_UNKNOWN_TIME;
512 }
513 if (timeUsec != ANDROID_UNKNOWN_TIME) {
514 // Note that though we've decoded this position, we haven't rendered it yet.
515 // So a GetPosition called after this point will observe the advanced position,
516 // even though the PCM may not have been supplied to the sink. That's OK as
517 // we don't claim to provide AAC frame-accurate (let alone sample-accurate) GetPosition.
518 mLastDecodedPositionUs = timeUsec;
519 }
520 }
521
522 //-------------------------------- Handle return of decode
523 if (err != OK) {
524 bool continueDecoding = false;
525 switch(err) {
526 case ERROR_END_OF_STREAM:
527 if (0 < mDurationUsec) {
528 Mutex::Autolock _l(mTimeLock);
529 mLastDecodedPositionUs = mDurationUsec;
530 }
531 // handle notification and looping at end of stream
532 if (mStateFlags & kFlagPlaying) {
533 notify(PLAYEREVENT_ENDOFSTREAM, 1, true /*async*/);
534 }
535 if (mStateFlags & kFlagLooping) {
536 seek(0);
537 // kick-off decoding again
538 continueDecoding = true;
539 }
540 break;
541 case INFO_FORMAT_CHANGED:
542 SL_LOGD("MediaSource::read encountered INFO_FORMAT_CHANGED");
543 // reconfigure output
544 {
545 Mutex::Autolock _l(mBufferSourceLock);
546 hasNewDecodeParams();
547 }
548 continueDecoding = true;
549 break;
550 case INFO_DISCONTINUITY:
551 SL_LOGD("MediaSource::read encountered INFO_DISCONTINUITY");
552 continueDecoding = true;
553 break;
554 default:
555 SL_LOGE("MediaSource::read returned error %d", err);
556 break;
557 }
558 if (continueDecoding) {
559 if (NULL == mDecodeBuffer) {
560 (new AMessage(kWhatDecode, id()))->post();
561 return;
562 }
563 } else {
564 return;
565 }
566 }
567
568 //-------------------------------- Render
569 sp<AMessage> msg = new AMessage(kWhatRender, id());
570 msg->post();
571
572 }
573
574
onMessageReceived(const sp<AMessage> & msg)575 void AudioSfDecoder::onMessageReceived(const sp<AMessage> &msg) {
576 switch (msg->what()) {
577 case kWhatDecode:
578 onDecode();
579 break;
580
581 case kWhatRender:
582 onRender();
583 break;
584
585 case kWhatCheckCache:
586 onCheckCache(msg);
587 break;
588
589 default:
590 GenericPlayer::onMessageReceived(msg);
591 break;
592 }
593 }
594
595 //--------------------------------------------------
596 // Prepared state, prefetch status notifications
notifyPrepared(status_t prepareRes)597 void AudioSfDecoder::notifyPrepared(status_t prepareRes) {
598 assert(!(mStateFlags & (kFlagPrepared | kFlagPreparedUnsuccessfully)));
599 if (NO_ERROR == prepareRes) {
600 // The "then" fork is not currently used, but is kept here to make it easier
601 // to replace by a new signalPrepareCompletion(status) if we re-visit this later.
602 mStateFlags |= kFlagPrepared;
603 } else {
604 mStateFlags |= kFlagPreparedUnsuccessfully;
605 }
606 // Do not call the superclass onPrepare to notify, because it uses a default error
607 // status code but we can provide a more specific one.
608 // GenericPlayer::onPrepare();
609 notify(PLAYEREVENT_PREPARED, (int32_t)prepareRes, true /*async*/);
610 SL_LOGD("AudioSfDecoder::onPrepare() done, mStateFlags=0x%x", mStateFlags);
611 }
612
613
onNotify(const sp<AMessage> & msg)614 void AudioSfDecoder::onNotify(const sp<AMessage> &msg) {
615 notif_cbf_t notifyClient;
616 void* notifyUser;
617 {
618 android::Mutex::Autolock autoLock(mNotifyClientLock);
619 if (NULL == mNotifyClient) {
620 return;
621 } else {
622 notifyClient = mNotifyClient;
623 notifyUser = mNotifyUser;
624 }
625 }
626 int32_t val;
627 if (msg->findInt32(PLAYEREVENT_PREFETCHSTATUSCHANGE, &val)) {
628 SL_LOGV("\tASfPlayer notifying %s = %d", PLAYEREVENT_PREFETCHSTATUSCHANGE, val);
629 notifyClient(kEventPrefetchStatusChange, val, 0, notifyUser);
630 }
631 else if (msg->findInt32(PLAYEREVENT_PREFETCHFILLLEVELUPDATE, &val)) {
632 SL_LOGV("\tASfPlayer notifying %s = %d", PLAYEREVENT_PREFETCHFILLLEVELUPDATE, val);
633 notifyClient(kEventPrefetchFillLevelUpdate, val, 0, notifyUser);
634 }
635 else if (msg->findInt32(PLAYEREVENT_ENDOFSTREAM, &val)) {
636 SL_LOGV("\tASfPlayer notifying %s = %d", PLAYEREVENT_ENDOFSTREAM, val);
637 notifyClient(kEventEndOfStream, val, 0, notifyUser);
638 }
639 else {
640 GenericPlayer::onNotify(msg);
641 }
642 }
643
644
645 //--------------------------------------------------
646 // Private utility functions
647
wantPrefetch()648 bool AudioSfDecoder::wantPrefetch() {
649 if (mDataSource != 0) {
650 return (mDataSource->flags() & DataSource::kWantsPrefetching);
651 } else {
652 // happens if an improper data locator was passed, if the media extractor couldn't be
653 // initialized, if there is no audio track in the media, if the OMX decoder couldn't be
654 // instantiated, if the source couldn't be opened, or if the MediaSource
655 // couldn't be started
656 SL_LOGV("AudioSfDecoder::wantPrefetch() tries to access NULL mDataSource");
657 return false;
658 }
659 }
660
661
getPositionUsec()662 int64_t AudioSfDecoder::getPositionUsec() {
663 Mutex::Autolock _l(mTimeLock);
664 if (mStateFlags & kFlagSeeking) {
665 return mSeekTimeMsec * 1000;
666 } else {
667 return mLastDecodedPositionUs;
668 }
669 }
670
671
getCacheRemaining(bool * eos)672 CacheStatus_t AudioSfDecoder::getCacheRemaining(bool *eos) {
673 sp<NuCachedSource2> cachedSource =
674 static_cast<NuCachedSource2 *>(mDataSource.get());
675
676 CacheStatus_t oldStatus = mCacheStatus;
677
678 status_t finalStatus;
679 size_t dataRemaining = cachedSource->approxDataRemaining(&finalStatus);
680 *eos = (finalStatus != OK);
681
682 CHECK_GE(mBitrate, 0);
683
684 int64_t dataRemainingUs = dataRemaining * 8000000ll / mBitrate;
685 //SL_LOGV("AudioSfDecoder::getCacheRemaining: approx %.2f secs remaining (eos=%d)",
686 // dataRemainingUs / 1E6, *eos);
687
688 if (*eos) {
689 // data is buffered up to the end of the stream, it can't get any better than this
690 mCacheStatus = kStatusHigh;
691 mCacheFill = 1000;
692
693 } else {
694 if (mDurationUsec > 0) {
695 // known duration:
696
697 // fill level is ratio of how much has been played + how much is
698 // cached, divided by total duration
699 int64_t currentPositionUsec = getPositionUsec();
700 if (currentPositionUsec == ANDROID_UNKNOWN_TIME) {
701 // if we don't know where we are, assume the worst for the fill ratio
702 currentPositionUsec = 0;
703 }
704 if (mDurationUsec > 0) {
705 mCacheFill = (int16_t) ((1000.0
706 * (double)(currentPositionUsec + dataRemainingUs) / mDurationUsec));
707 } else {
708 mCacheFill = 0;
709 }
710 //SL_LOGV("cacheFill = %d", mCacheFill);
711
712 // cache status is evaluated against duration thresholds
713 if (dataRemainingUs > DURATION_CACHED_HIGH_MS*1000) {
714 mCacheStatus = kStatusHigh;
715 //ALOGV("high");
716 } else if (dataRemainingUs > DURATION_CACHED_MED_MS*1000) {
717 //ALOGV("enough");
718 mCacheStatus = kStatusEnough;
719 } else if (dataRemainingUs < DURATION_CACHED_LOW_MS*1000) {
720 //ALOGV("low");
721 mCacheStatus = kStatusLow;
722 } else {
723 mCacheStatus = kStatusIntermediate;
724 }
725
726 } else {
727 // unknown duration:
728
729 // cache status is evaluated against cache amount thresholds
730 // (no duration so we don't have the bitrate either, could be derived from format?)
731 if (dataRemaining > SIZE_CACHED_HIGH_BYTES) {
732 mCacheStatus = kStatusHigh;
733 } else if (dataRemaining > SIZE_CACHED_MED_BYTES) {
734 mCacheStatus = kStatusEnough;
735 } else if (dataRemaining < SIZE_CACHED_LOW_BYTES) {
736 mCacheStatus = kStatusLow;
737 } else {
738 mCacheStatus = kStatusIntermediate;
739 }
740 }
741
742 }
743
744 if (oldStatus != mCacheStatus) {
745 notifyStatus();
746 }
747
748 if (abs(mCacheFill - mLastNotifiedCacheFill) > mCacheFillNotifThreshold) {
749 notifyCacheFill();
750 }
751
752 return mCacheStatus;
753 }
754
755
hasNewDecodeParams()756 void AudioSfDecoder::hasNewDecodeParams() {
757
758 if ((mAudioSource != 0) && mAudioSourceStarted) {
759 sp<MetaData> meta = mAudioSource->getFormat();
760
761 int32_t channelCount;
762 CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
763 int32_t sr;
764 CHECK(meta->findInt32(kKeySampleRate, &sr));
765
766 // FIXME similar to onPrepare()
767 {
768 android::Mutex::Autolock autoLock(mPcmFormatLock);
769 SL_LOGV("format changed: old sr=%d, channels=%d; new sr=%d, channels=%d",
770 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_SAMPLERATE],
771 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_NUMCHANNELS],
772 sr, channelCount);
773 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_NUMCHANNELS] = channelCount;
774 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_SAMPLERATE] = sr;
775 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_CHANNELMASK] =
776 channelCountToMask(channelCount);
777 }
778 // there's no need to do a notify of PLAYEREVENT_CHANNEL_COUNT,
779 // because the only listener is for volume updates, and decoders don't support that
780 }
781
782 // alert users of those params
783 updateAudioSink();
784 }
785
786 static const char* const kPlaybackOnlyCodecs[] = { MEDIA_MIMETYPE_AUDIO_AMR_NB,
787 MEDIA_MIMETYPE_AUDIO_AMR_WB };
788 #define NB_PLAYBACK_ONLY_CODECS (sizeof(kPlaybackOnlyCodecs)/sizeof(kPlaybackOnlyCodecs[0]))
789
isSupportedCodec(const char * mime)790 bool AudioSfDecoder::isSupportedCodec(const char* mime) {
791 bool codecRequiresPermission = false;
792 for (unsigned int i = 0 ; i < NB_PLAYBACK_ONLY_CODECS ; i++) {
793 if (!strcasecmp(mime, kPlaybackOnlyCodecs[i])) {
794 codecRequiresPermission = true;
795 break;
796 }
797 }
798 if (codecRequiresPermission) {
799 // verify only the system can decode, for playback only
800 return checkCallingPermission(
801 String16("android.permission.ALLOW_ANY_CODEC_FOR_PLAYBACK"));
802 } else {
803 return true;
804 }
805 }
806
807 } // namespace android
808