• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayerRenderer"
19 #include <utils/Log.h>
20 
21 #include "AWakeLock.h"
22 #include "NuPlayerRenderer.h"
23 #include <algorithm>
24 #include <cutils/properties.h>
25 #include <media/stagefright/foundation/ADebug.h>
26 #include <media/stagefright/foundation/AMessage.h>
27 #include <media/stagefright/foundation/AUtils.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaCodecConstants.h>
30 #include <media/stagefright/MediaDefs.h>
31 #include <media/stagefright/MediaErrors.h>
32 #include <media/stagefright/MetaData.h>
33 #include <media/stagefright/Utils.h>
34 #include <media/stagefright/VideoFrameScheduler.h>
35 #include <media/MediaCodecBuffer.h>
36 #include <utils/SystemClock.h>
37 
38 #include <inttypes.h>
39 
40 namespace android {
41 
42 /*
43  * Example of common configuration settings in shell script form
44 
45    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
46    adb shell setprop audio.offload.disable 1
47 
48    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
49    adb shell setprop audio.offload.video 1
50 
51    #Use audio callbacks for PCM data
52    adb shell setprop media.stagefright.audio.cbk 1
53 
54    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
55    adb shell setprop media.stagefright.audio.deep 1
56 
57    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
58    adb shell setprop media.stagefright.audio.sink 1000
59 
60  * These configurations take effect for the next track played (not the current track).
61  */
62 
getUseAudioCallbackSetting()63 static inline bool getUseAudioCallbackSetting() {
64     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
65 }
66 
getAudioSinkPcmMsSetting()67 static inline int32_t getAudioSinkPcmMsSetting() {
68     return property_get_int32(
69             "media.stagefright.audio.sink", 500 /* default_value */);
70 }
71 
72 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
73 // is closed to allow the audio DSP to power down.
74 static const int64_t kOffloadPauseMaxUs = 10000000LL;
75 
76 // Additional delay after teardown before releasing the wake lock to allow time for the audio path
77 // to be completely released
78 static const int64_t kWakelockReleaseDelayUs = 2000000LL;
79 
80 // Maximum allowed delay from AudioSink, 1.5 seconds.
81 static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
82 
83 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
84 
85 // Default video frame display duration when only video exists.
86 // Used to set max media time in MediaClock.
87 static const int64_t kDefaultVideoFrameIntervalUs = 100000LL;
88 
89 // static
90 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
91         AUDIO_CHANNEL_NONE,
92         AUDIO_OUTPUT_FLAG_NONE,
93         AUDIO_FORMAT_INVALID,
94         0, // mNumChannels
95         0 // mSampleRate
96 };
97 
98 // static
99 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
100 
audioFormatFromEncoding(int32_t pcmEncoding)101 static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
102     switch (pcmEncoding) {
103     case kAudioEncodingPcmFloat:
104         return AUDIO_FORMAT_PCM_FLOAT;
105     case kAudioEncodingPcm16bit:
106         return AUDIO_FORMAT_PCM_16_BIT;
107     case kAudioEncodingPcm8bit:
108         return AUDIO_FORMAT_PCM_8_BIT; // TODO: do we want to support this?
109     default:
110         ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
111         return AUDIO_FORMAT_INVALID;
112     }
113 }
114 
Renderer(const sp<MediaPlayerBase::AudioSink> & sink,const sp<MediaClock> & mediaClock,const sp<AMessage> & notify,uint32_t flags)115 NuPlayer::Renderer::Renderer(
116         const sp<MediaPlayerBase::AudioSink> &sink,
117         const sp<MediaClock> &mediaClock,
118         const sp<AMessage> &notify,
119         uint32_t flags)
120     : mAudioSink(sink),
121       mUseVirtualAudioSink(false),
122       mNotify(notify),
123       mFlags(flags),
124       mNumFramesWritten(0),
125       mDrainAudioQueuePending(false),
126       mDrainVideoQueuePending(false),
127       mAudioQueueGeneration(0),
128       mVideoQueueGeneration(0),
129       mAudioDrainGeneration(0),
130       mVideoDrainGeneration(0),
131       mAudioEOSGeneration(0),
132       mMediaClock(mediaClock),
133       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
134       mAudioFirstAnchorTimeMediaUs(-1),
135       mAnchorTimeMediaUs(-1),
136       mAnchorNumFramesWritten(-1),
137       mVideoLateByUs(0LL),
138       mNextVideoTimeMediaUs(-1),
139       mHasAudio(false),
140       mHasVideo(false),
141       mNotifyCompleteAudio(false),
142       mNotifyCompleteVideo(false),
143       mSyncQueues(false),
144       mPaused(false),
145       mPauseDrainAudioAllowedUs(0),
146       mVideoSampleReceived(false),
147       mVideoRenderingStarted(false),
148       mVideoRenderingStartGeneration(0),
149       mAudioRenderingStartGeneration(0),
150       mRenderingDataDelivered(false),
151       mNextAudioClockUpdateTimeUs(-1),
152       mLastAudioMediaTimeUs(-1),
153       mAudioOffloadPauseTimeoutGeneration(0),
154       mAudioTornDown(false),
155       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
156       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
157       mTotalBuffersQueued(0),
158       mLastAudioBufferDrained(0),
159       mUseAudioCallback(false),
160       mWakeLock(new AWakeLock()) {
161     CHECK(mediaClock != NULL);
162     mPlaybackRate = mPlaybackSettings.mSpeed;
163     mMediaClock->setPlaybackRate(mPlaybackRate);
164     (void)mSyncFlag.test_and_set();
165 }
166 
~Renderer()167 NuPlayer::Renderer::~Renderer() {
168     if (offloadingAudio()) {
169         mAudioSink->stop();
170         mAudioSink->flush();
171         mAudioSink->close();
172     }
173 
174     // Try to avoid racing condition in case callback is still on.
175     Mutex::Autolock autoLock(mLock);
176     if (mUseAudioCallback) {
177         flushQueue(&mAudioQueue);
178         flushQueue(&mVideoQueue);
179     }
180     mWakeLock.clear();
181     mVideoScheduler.clear();
182     mNotify.clear();
183     mAudioSink.clear();
184 }
185 
queueBuffer(bool audio,const sp<MediaCodecBuffer> & buffer,const sp<AMessage> & notifyConsumed)186 void NuPlayer::Renderer::queueBuffer(
187         bool audio,
188         const sp<MediaCodecBuffer> &buffer,
189         const sp<AMessage> &notifyConsumed) {
190     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
191     msg->setInt32("queueGeneration", getQueueGeneration(audio));
192     msg->setInt32("audio", static_cast<int32_t>(audio));
193     msg->setObject("buffer", buffer);
194     msg->setMessage("notifyConsumed", notifyConsumed);
195     msg->post();
196 }
197 
queueEOS(bool audio,status_t finalResult)198 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
199     CHECK_NE(finalResult, (status_t)OK);
200 
201     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
202     msg->setInt32("queueGeneration", getQueueGeneration(audio));
203     msg->setInt32("audio", static_cast<int32_t>(audio));
204     msg->setInt32("finalResult", finalResult);
205     msg->post();
206 }
207 
setPlaybackSettings(const AudioPlaybackRate & rate)208 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
209     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
210     writeToAMessage(msg, rate);
211     sp<AMessage> response;
212     status_t err = msg->postAndAwaitResponse(&response);
213     if (err == OK && response != NULL) {
214         CHECK(response->findInt32("err", &err));
215     }
216     return err;
217 }
218 
onConfigPlayback(const AudioPlaybackRate & rate)219 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
220     if (rate.mSpeed == 0.f) {
221         onPause();
222         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
223         // have to correspond to the any non-0 speed (e.g old speed). Keep
224         // settings nonetheless, using the old speed, in case audiosink changes.
225         AudioPlaybackRate newRate = rate;
226         newRate.mSpeed = mPlaybackSettings.mSpeed;
227         mPlaybackSettings = newRate;
228         return OK;
229     }
230 
231     if (mAudioSink != NULL && mAudioSink->ready()) {
232         status_t err = mAudioSink->setPlaybackRate(rate);
233         if (err != OK) {
234             return err;
235         }
236     }
237     mPlaybackSettings = rate;
238     mPlaybackRate = rate.mSpeed;
239     mMediaClock->setPlaybackRate(mPlaybackRate);
240     return OK;
241 }
242 
getPlaybackSettings(AudioPlaybackRate * rate)243 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
244     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
245     sp<AMessage> response;
246     status_t err = msg->postAndAwaitResponse(&response);
247     if (err == OK && response != NULL) {
248         CHECK(response->findInt32("err", &err));
249         if (err == OK) {
250             readFromAMessage(response, rate);
251         }
252     }
253     return err;
254 }
255 
onGetPlaybackSettings(AudioPlaybackRate * rate)256 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
257     if (mAudioSink != NULL && mAudioSink->ready()) {
258         status_t err = mAudioSink->getPlaybackRate(rate);
259         if (err == OK) {
260             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
261                 ALOGW("correcting mismatch in internal/external playback rate");
262             }
263             // get playback settings used by audiosink, as it may be
264             // slightly off due to audiosink not taking small changes.
265             mPlaybackSettings = *rate;
266             if (mPaused) {
267                 rate->mSpeed = 0.f;
268             }
269         }
270         return err;
271     }
272     *rate = mPlaybackSettings;
273     return OK;
274 }
275 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)276 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
277     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
278     writeToAMessage(msg, sync, videoFpsHint);
279     sp<AMessage> response;
280     status_t err = msg->postAndAwaitResponse(&response);
281     if (err == OK && response != NULL) {
282         CHECK(response->findInt32("err", &err));
283     }
284     return err;
285 }
286 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)287 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
288     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
289         return BAD_VALUE;
290     }
291     // TODO: support sync sources
292     return INVALID_OPERATION;
293 }
294 
getSyncSettings(AVSyncSettings * sync,float * videoFps)295 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
296     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
297     sp<AMessage> response;
298     status_t err = msg->postAndAwaitResponse(&response);
299     if (err == OK && response != NULL) {
300         CHECK(response->findInt32("err", &err));
301         if (err == OK) {
302             readFromAMessage(response, sync, videoFps);
303         }
304     }
305     return err;
306 }
307 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)308 status_t NuPlayer::Renderer::onGetSyncSettings(
309         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
310     *sync = mSyncSettings;
311     *videoFps = -1.f;
312     return OK;
313 }
314 
flush(bool audio,bool notifyComplete)315 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
316     {
317         Mutex::Autolock autoLock(mLock);
318         if (audio) {
319             mNotifyCompleteAudio |= notifyComplete;
320             clearAudioFirstAnchorTime_l();
321             ++mAudioQueueGeneration;
322             ++mAudioDrainGeneration;
323         } else {
324             mNotifyCompleteVideo |= notifyComplete;
325             ++mVideoQueueGeneration;
326             ++mVideoDrainGeneration;
327             mNextVideoTimeMediaUs = -1;
328         }
329 
330         mMediaClock->clearAnchor();
331         mVideoLateByUs = 0;
332         mSyncQueues = false;
333     }
334 
335     // Wait until the current job in the message queue is done, to make sure
336     // buffer processing from the old generation is finished. After the current
337     // job is finished, access to buffers are protected by generation.
338     Mutex::Autolock syncLock(mSyncLock);
339     int64_t syncCount = mSyncCount;
340     mSyncFlag.clear();
341 
342     // Make sure message queue is not empty after mSyncFlag is cleared.
343     sp<AMessage> msg = new AMessage(kWhatFlush, this);
344     msg->setInt32("audio", static_cast<int32_t>(audio));
345     msg->post();
346 
347     int64_t uptimeMs = uptimeMillis();
348     while (mSyncCount == syncCount) {
349         (void)mSyncCondition.waitRelative(mSyncLock, ms2ns(1000));
350         if (uptimeMillis() - uptimeMs > 1000) {
351             ALOGW("flush(): no wake-up from sync point for 1s; stop waiting to "
352                   "prevent being stuck indefinitely.");
353             break;
354         }
355     }
356 }
357 
signalTimeDiscontinuity()358 void NuPlayer::Renderer::signalTimeDiscontinuity() {
359 }
360 
signalDisableOffloadAudio()361 void NuPlayer::Renderer::signalDisableOffloadAudio() {
362     (new AMessage(kWhatDisableOffloadAudio, this))->post();
363 }
364 
signalEnableOffloadAudio()365 void NuPlayer::Renderer::signalEnableOffloadAudio() {
366     (new AMessage(kWhatEnableOffloadAudio, this))->post();
367 }
368 
pause()369 void NuPlayer::Renderer::pause() {
370     (new AMessage(kWhatPause, this))->post();
371 }
372 
resume()373 void NuPlayer::Renderer::resume() {
374     (new AMessage(kWhatResume, this))->post();
375 }
376 
setVideoFrameRate(float fps)377 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
378     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
379     msg->setFloat("frame-rate", fps);
380     msg->post();
381 }
382 
383 // Called on any threads without mLock acquired.
getCurrentPosition(int64_t * mediaUs)384 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
385     status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
386     if (result == OK) {
387         return result;
388     }
389 
390     // MediaClock has not started yet. Try to start it if possible.
391     {
392         Mutex::Autolock autoLock(mLock);
393         if (mAudioFirstAnchorTimeMediaUs == -1) {
394             return result;
395         }
396 
397         AudioTimestamp ts;
398         status_t res = mAudioSink->getTimestamp(ts);
399         if (res != OK) {
400             return result;
401         }
402 
403         // AudioSink has rendered some frames.
404         int64_t nowUs = ALooper::GetNowUs();
405         int64_t playedOutDurationUs = mAudioSink->getPlayedOutDurationUs(nowUs);
406         if (playedOutDurationUs == 0) {
407             *mediaUs = mAudioFirstAnchorTimeMediaUs;
408             return OK;
409         }
410         int64_t nowMediaUs = playedOutDurationUs + mAudioFirstAnchorTimeMediaUs;
411         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
412     }
413 
414     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
415 }
416 
clearAudioFirstAnchorTime_l()417 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
418     mAudioFirstAnchorTimeMediaUs = -1;
419     mMediaClock->setStartingTimeMedia(-1);
420 }
421 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)422 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
423     if (mAudioFirstAnchorTimeMediaUs == -1) {
424         mAudioFirstAnchorTimeMediaUs = mediaUs;
425         mMediaClock->setStartingTimeMedia(mediaUs);
426     }
427 }
428 
429 // Called on renderer looper.
clearAnchorTime()430 void NuPlayer::Renderer::clearAnchorTime() {
431     mMediaClock->clearAnchor();
432     mAnchorTimeMediaUs = -1;
433     mAnchorNumFramesWritten = -1;
434 }
435 
setVideoLateByUs(int64_t lateUs)436 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
437     Mutex::Autolock autoLock(mLock);
438     mVideoLateByUs = lateUs;
439 }
440 
getVideoLateByUs()441 int64_t NuPlayer::Renderer::getVideoLateByUs() {
442     Mutex::Autolock autoLock(mLock);
443     return mVideoLateByUs;
444 }
445 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded,bool isStreaming)446 status_t NuPlayer::Renderer::openAudioSink(
447         const sp<AMessage> &format,
448         bool offloadOnly,
449         bool hasVideo,
450         uint32_t flags,
451         bool *isOffloaded,
452         bool isStreaming) {
453     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
454     msg->setMessage("format", format);
455     msg->setInt32("offload-only", offloadOnly);
456     msg->setInt32("has-video", hasVideo);
457     msg->setInt32("flags", flags);
458     msg->setInt32("isStreaming", isStreaming);
459 
460     sp<AMessage> response;
461     status_t postStatus = msg->postAndAwaitResponse(&response);
462 
463     int32_t err;
464     if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
465         err = INVALID_OPERATION;
466     } else if (err == OK && isOffloaded != NULL) {
467         int32_t offload;
468         CHECK(response->findInt32("offload", &offload));
469         *isOffloaded = (offload != 0);
470     }
471     return err;
472 }
473 
closeAudioSink()474 void NuPlayer::Renderer::closeAudioSink() {
475     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
476 
477     sp<AMessage> response;
478     msg->postAndAwaitResponse(&response);
479 }
480 
changeAudioFormat(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming,const sp<AMessage> & notify)481 void NuPlayer::Renderer::changeAudioFormat(
482         const sp<AMessage> &format,
483         bool offloadOnly,
484         bool hasVideo,
485         uint32_t flags,
486         bool isStreaming,
487         const sp<AMessage> &notify) {
488     sp<AMessage> meta = new AMessage;
489     meta->setMessage("format", format);
490     meta->setInt32("offload-only", offloadOnly);
491     meta->setInt32("has-video", hasVideo);
492     meta->setInt32("flags", flags);
493     meta->setInt32("isStreaming", isStreaming);
494 
495     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
496     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
497     msg->setMessage("notify", notify);
498     msg->setMessage("meta", meta);
499     msg->post();
500 }
501 
onMessageReceived(const sp<AMessage> & msg)502 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
503     switch (msg->what()) {
504         case kWhatOpenAudioSink:
505         {
506             sp<AMessage> format;
507             CHECK(msg->findMessage("format", &format));
508 
509             int32_t offloadOnly;
510             CHECK(msg->findInt32("offload-only", &offloadOnly));
511 
512             int32_t hasVideo;
513             CHECK(msg->findInt32("has-video", &hasVideo));
514 
515             uint32_t flags;
516             CHECK(msg->findInt32("flags", (int32_t *)&flags));
517 
518             uint32_t isStreaming;
519             CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
520 
521             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
522 
523             sp<AMessage> response = new AMessage;
524             response->setInt32("err", err);
525             response->setInt32("offload", offloadingAudio());
526 
527             sp<AReplyToken> replyID;
528             CHECK(msg->senderAwaitsResponse(&replyID));
529             response->postReply(replyID);
530 
531             break;
532         }
533 
534         case kWhatCloseAudioSink:
535         {
536             sp<AReplyToken> replyID;
537             CHECK(msg->senderAwaitsResponse(&replyID));
538 
539             onCloseAudioSink();
540 
541             sp<AMessage> response = new AMessage;
542             response->postReply(replyID);
543             break;
544         }
545 
546         case kWhatStopAudioSink:
547         {
548             mAudioSink->stop();
549             break;
550         }
551 
552         case kWhatChangeAudioFormat:
553         {
554             int32_t queueGeneration;
555             CHECK(msg->findInt32("queueGeneration", &queueGeneration));
556 
557             sp<AMessage> notify;
558             CHECK(msg->findMessage("notify", &notify));
559 
560             if (offloadingAudio()) {
561                 ALOGW("changeAudioFormat should NOT be called in offload mode");
562                 notify->setInt32("err", INVALID_OPERATION);
563                 notify->post();
564                 break;
565             }
566 
567             sp<AMessage> meta;
568             CHECK(msg->findMessage("meta", &meta));
569 
570             if (queueGeneration != getQueueGeneration(true /* audio */)
571                     || mAudioQueue.empty()) {
572                 onChangeAudioFormat(meta, notify);
573                 break;
574             }
575 
576             QueueEntry entry;
577             entry.mNotifyConsumed = notify;
578             entry.mMeta = meta;
579 
580             Mutex::Autolock autoLock(mLock);
581             mAudioQueue.push_back(entry);
582             postDrainAudioQueue_l();
583 
584             break;
585         }
586 
587         case kWhatDrainAudioQueue:
588         {
589             mDrainAudioQueuePending = false;
590 
591             int32_t generation;
592             CHECK(msg->findInt32("drainGeneration", &generation));
593             if (generation != getDrainGeneration(true /* audio */)) {
594                 break;
595             }
596 
597             if (onDrainAudioQueue()) {
598                 uint32_t numFramesPlayed;
599                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
600                          (status_t)OK);
601 
602                 // Handle AudioTrack race when start is immediately called after flush.
603                 uint32_t numFramesPendingPlayout =
604                     (mNumFramesWritten > numFramesPlayed ?
605                         mNumFramesWritten - numFramesPlayed : 0);
606 
607                 // This is how long the audio sink will have data to
608                 // play back.
609                 int64_t delayUs =
610                     mAudioSink->msecsPerFrame()
611                         * numFramesPendingPlayout * 1000LL;
612                 if (mPlaybackRate > 1.0f) {
613                     delayUs /= mPlaybackRate;
614                 }
615 
616                 // Let's give it more data after about half that time
617                 // has elapsed.
618                 delayUs /= 2;
619                 // check the buffer size to estimate maximum delay permitted.
620                 const int64_t maxDrainDelayUs = std::max(
621                         mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
622                 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
623                         (long long)delayUs, (long long)maxDrainDelayUs);
624                 Mutex::Autolock autoLock(mLock);
625                 postDrainAudioQueue_l(delayUs);
626             }
627             break;
628         }
629 
630         case kWhatDrainVideoQueue:
631         {
632             int32_t generation;
633             CHECK(msg->findInt32("drainGeneration", &generation));
634             if (generation != getDrainGeneration(false /* audio */)) {
635                 break;
636             }
637 
638             mDrainVideoQueuePending = false;
639 
640             onDrainVideoQueue();
641 
642             postDrainVideoQueue();
643             break;
644         }
645 
646         case kWhatPostDrainVideoQueue:
647         {
648             int32_t generation;
649             CHECK(msg->findInt32("drainGeneration", &generation));
650             if (generation != getDrainGeneration(false /* audio */)) {
651                 break;
652             }
653 
654             mDrainVideoQueuePending = false;
655             postDrainVideoQueue();
656             break;
657         }
658 
659         case kWhatQueueBuffer:
660         {
661             onQueueBuffer(msg);
662             break;
663         }
664 
665         case kWhatQueueEOS:
666         {
667             onQueueEOS(msg);
668             break;
669         }
670 
671         case kWhatEOS:
672         {
673             int32_t generation;
674             CHECK(msg->findInt32("audioEOSGeneration", &generation));
675             if (generation != mAudioEOSGeneration) {
676                 break;
677             }
678             status_t finalResult;
679             CHECK(msg->findInt32("finalResult", &finalResult));
680             notifyEOS(true /* audio */, finalResult);
681             break;
682         }
683 
684         case kWhatConfigPlayback:
685         {
686             sp<AReplyToken> replyID;
687             CHECK(msg->senderAwaitsResponse(&replyID));
688             AudioPlaybackRate rate;
689             readFromAMessage(msg, &rate);
690             status_t err = onConfigPlayback(rate);
691             sp<AMessage> response = new AMessage;
692             response->setInt32("err", err);
693             response->postReply(replyID);
694             break;
695         }
696 
697         case kWhatGetPlaybackSettings:
698         {
699             sp<AReplyToken> replyID;
700             CHECK(msg->senderAwaitsResponse(&replyID));
701             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
702             status_t err = onGetPlaybackSettings(&rate);
703             sp<AMessage> response = new AMessage;
704             if (err == OK) {
705                 writeToAMessage(response, rate);
706             }
707             response->setInt32("err", err);
708             response->postReply(replyID);
709             break;
710         }
711 
712         case kWhatConfigSync:
713         {
714             sp<AReplyToken> replyID;
715             CHECK(msg->senderAwaitsResponse(&replyID));
716             AVSyncSettings sync;
717             float videoFpsHint;
718             readFromAMessage(msg, &sync, &videoFpsHint);
719             status_t err = onConfigSync(sync, videoFpsHint);
720             sp<AMessage> response = new AMessage;
721             response->setInt32("err", err);
722             response->postReply(replyID);
723             break;
724         }
725 
726         case kWhatGetSyncSettings:
727         {
728             sp<AReplyToken> replyID;
729             CHECK(msg->senderAwaitsResponse(&replyID));
730 
731             ALOGV("kWhatGetSyncSettings");
732             AVSyncSettings sync;
733             float videoFps = -1.f;
734             status_t err = onGetSyncSettings(&sync, &videoFps);
735             sp<AMessage> response = new AMessage;
736             if (err == OK) {
737                 writeToAMessage(response, sync, videoFps);
738             }
739             response->setInt32("err", err);
740             response->postReply(replyID);
741             break;
742         }
743 
744         case kWhatFlush:
745         {
746             onFlush(msg);
747             break;
748         }
749 
750         case kWhatDisableOffloadAudio:
751         {
752             onDisableOffloadAudio();
753             break;
754         }
755 
756         case kWhatEnableOffloadAudio:
757         {
758             onEnableOffloadAudio();
759             break;
760         }
761 
762         case kWhatPause:
763         {
764             onPause();
765             break;
766         }
767 
768         case kWhatResume:
769         {
770             onResume();
771             break;
772         }
773 
774         case kWhatSetVideoFrameRate:
775         {
776             float fps;
777             CHECK(msg->findFloat("frame-rate", &fps));
778             onSetVideoFrameRate(fps);
779             break;
780         }
781 
782         case kWhatAudioTearDown:
783         {
784             int32_t reason;
785             CHECK(msg->findInt32("reason", &reason));
786 
787             onAudioTearDown((AudioTearDownReason)reason);
788             break;
789         }
790 
791         case kWhatAudioOffloadPauseTimeout:
792         {
793             int32_t generation;
794             CHECK(msg->findInt32("drainGeneration", &generation));
795             if (generation != mAudioOffloadPauseTimeoutGeneration) {
796                 break;
797             }
798             ALOGV("Audio Offload tear down due to pause timeout.");
799             onAudioTearDown(kDueToTimeout);
800             sp<AMessage> newMsg = new AMessage(kWhatReleaseWakeLock, this);
801             newMsg->setInt32("drainGeneration", generation);
802             newMsg->post(kWakelockReleaseDelayUs);
803             break;
804         }
805 
806         case kWhatReleaseWakeLock:
807         {
808             int32_t generation;
809             CHECK(msg->findInt32("drainGeneration", &generation));
810             if (generation != mAudioOffloadPauseTimeoutGeneration) {
811                 break;
812             }
813             ALOGV("releasing audio offload pause wakelock.");
814             mWakeLock->release();
815             break;
816         }
817 
818         default:
819             TRESPASS();
820             break;
821     }
822     if (!mSyncFlag.test_and_set()) {
823         Mutex::Autolock syncLock(mSyncLock);
824         ++mSyncCount;
825         mSyncCondition.broadcast();
826     }
827 }
828 
postDrainAudioQueue_l(int64_t delayUs)829 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
830     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
831         return;
832     }
833 
834     if (mAudioQueue.empty()) {
835         return;
836     }
837 
838     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
839     if (mPaused) {
840         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
841         if (diffUs > delayUs) {
842             delayUs = diffUs;
843         }
844     }
845 
846     mDrainAudioQueuePending = true;
847     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
848     msg->setInt32("drainGeneration", mAudioDrainGeneration);
849     msg->post(delayUs);
850 }
851 
prepareForMediaRenderingStart_l()852 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
853     mAudioRenderingStartGeneration = mAudioDrainGeneration;
854     mVideoRenderingStartGeneration = mVideoDrainGeneration;
855     mRenderingDataDelivered = false;
856 }
857 
notifyIfMediaRenderingStarted_l()858 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
859     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
860         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
861         mRenderingDataDelivered = true;
862         if (mPaused) {
863             return;
864         }
865         mVideoRenderingStartGeneration = -1;
866         mAudioRenderingStartGeneration = -1;
867 
868         sp<AMessage> notify = mNotify->dup();
869         notify->setInt32("what", kWhatMediaRenderingStart);
870         notify->post();
871     }
872 }
873 
874 // static
AudioSinkCallback(MediaPlayerBase::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayerBase::AudioSink::cb_event_t event)875 size_t NuPlayer::Renderer::AudioSinkCallback(
876         MediaPlayerBase::AudioSink * /* audioSink */,
877         void *buffer,
878         size_t size,
879         void *cookie,
880         MediaPlayerBase::AudioSink::cb_event_t event) {
881     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
882 
883     switch (event) {
884         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
885         {
886             return me->fillAudioBuffer(buffer, size);
887             break;
888         }
889 
890         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
891         {
892             ALOGV("AudioSink::CB_EVENT_STREAM_END");
893             me->notifyEOSCallback();
894             break;
895         }
896 
897         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
898         {
899             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
900             me->notifyAudioTearDown(kDueToError);
901             break;
902         }
903     }
904 
905     return 0;
906 }
907 
notifyEOSCallback()908 void NuPlayer::Renderer::notifyEOSCallback() {
909     Mutex::Autolock autoLock(mLock);
910 
911     if (!mUseAudioCallback) {
912         return;
913     }
914 
915     notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
916 }
917 
fillAudioBuffer(void * buffer,size_t size)918 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
919     Mutex::Autolock autoLock(mLock);
920 
921     if (!mUseAudioCallback) {
922         return 0;
923     }
924 
925     bool hasEOS = false;
926 
927     size_t sizeCopied = 0;
928     bool firstEntry = true;
929     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
930     while (sizeCopied < size && !mAudioQueue.empty()) {
931         entry = &*mAudioQueue.begin();
932 
933         if (entry->mBuffer == NULL) { // EOS
934             hasEOS = true;
935             mAudioQueue.erase(mAudioQueue.begin());
936             break;
937         }
938 
939         if (firstEntry && entry->mOffset == 0) {
940             firstEntry = false;
941             int64_t mediaTimeUs;
942             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
943             if (mediaTimeUs < 0) {
944                 ALOGD("fillAudioBuffer: reset negative media time %.2f secs to zero",
945                        mediaTimeUs / 1E6);
946                 mediaTimeUs = 0;
947             }
948             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
949             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
950         }
951 
952         size_t copy = entry->mBuffer->size() - entry->mOffset;
953         size_t sizeRemaining = size - sizeCopied;
954         if (copy > sizeRemaining) {
955             copy = sizeRemaining;
956         }
957 
958         memcpy((char *)buffer + sizeCopied,
959                entry->mBuffer->data() + entry->mOffset,
960                copy);
961 
962         entry->mOffset += copy;
963         if (entry->mOffset == entry->mBuffer->size()) {
964             entry->mNotifyConsumed->post();
965             mAudioQueue.erase(mAudioQueue.begin());
966             entry = NULL;
967         }
968         sizeCopied += copy;
969 
970         notifyIfMediaRenderingStarted_l();
971     }
972 
973     if (mAudioFirstAnchorTimeMediaUs >= 0) {
974         int64_t nowUs = ALooper::GetNowUs();
975         int64_t nowMediaUs =
976             mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
977         // we don't know how much data we are queueing for offloaded tracks.
978         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
979     }
980 
981     // for non-offloaded audio, we need to compute the frames written because
982     // there is no EVENT_STREAM_END notification. The frames written gives
983     // an estimate on the pending played out duration.
984     if (!offloadingAudio()) {
985         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
986     }
987 
988     if (hasEOS) {
989         (new AMessage(kWhatStopAudioSink, this))->post();
990         // As there is currently no EVENT_STREAM_END callback notification for
991         // non-offloaded audio tracks, we need to post the EOS ourselves.
992         if (!offloadingAudio()) {
993             int64_t postEOSDelayUs = 0;
994             if (mAudioSink->needsTrailingPadding()) {
995                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
996             }
997             ALOGV("fillAudioBuffer: notifyEOS_l "
998                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
999                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
1000             notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1001         }
1002     }
1003     return sizeCopied;
1004 }
1005 
drainAudioQueueUntilLastEOS()1006 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
1007     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
1008     bool foundEOS = false;
1009     while (it != mAudioQueue.end()) {
1010         int32_t eos;
1011         QueueEntry *entry = &*it++;
1012         if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
1013                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
1014             itEOS = it;
1015             foundEOS = true;
1016         }
1017     }
1018 
1019     if (foundEOS) {
1020         // post all replies before EOS and drop the samples
1021         for (it = mAudioQueue.begin(); it != itEOS; it++) {
1022             if (it->mBuffer == nullptr) {
1023                 if (it->mNotifyConsumed == nullptr) {
1024                     // delay doesn't matter as we don't even have an AudioTrack
1025                     notifyEOS(true /* audio */, it->mFinalResult);
1026                 } else {
1027                     // TAG for re-opening audio sink.
1028                     onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
1029                 }
1030             } else {
1031                 it->mNotifyConsumed->post();
1032             }
1033         }
1034         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
1035     }
1036 }
1037 
onDrainAudioQueue()1038 bool NuPlayer::Renderer::onDrainAudioQueue() {
1039     // do not drain audio during teardown as queued buffers may be invalid.
1040     if (mAudioTornDown) {
1041         return false;
1042     }
1043     // TODO: This call to getPosition checks if AudioTrack has been created
1044     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
1045     // CHECKs on getPosition will fail.
1046     // We still need to figure out why AudioTrack is not created when
1047     // this function is called. One possible reason could be leftover
1048     // audio. Another possible place is to check whether decoder
1049     // has received INFO_FORMAT_CHANGED as the first buffer since
1050     // AudioSink is opened there, and possible interactions with flush
1051     // immediately after start. Investigate error message
1052     // "vorbis_dsp_synthesis returned -135", along with RTSP.
1053     uint32_t numFramesPlayed;
1054     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
1055         // When getPosition fails, renderer will not reschedule the draining
1056         // unless new samples are queued.
1057         // If we have pending EOS (or "eos" marker for discontinuities), we need
1058         // to post these now as NuPlayerDecoder might be waiting for it.
1059         drainAudioQueueUntilLastEOS();
1060 
1061         ALOGW("onDrainAudioQueue(): audio sink is not ready");
1062         return false;
1063     }
1064 
1065 #if 0
1066     ssize_t numFramesAvailableToWrite =
1067         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
1068 
1069     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
1070         ALOGI("audio sink underrun");
1071     } else {
1072         ALOGV("audio queue has %d frames left to play",
1073              mAudioSink->frameCount() - numFramesAvailableToWrite);
1074     }
1075 #endif
1076 
1077     uint32_t prevFramesWritten = mNumFramesWritten;
1078     while (!mAudioQueue.empty()) {
1079         QueueEntry *entry = &*mAudioQueue.begin();
1080 
1081         if (entry->mBuffer == NULL) {
1082             if (entry->mNotifyConsumed != nullptr) {
1083                 // TAG for re-open audio sink.
1084                 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1085                 mAudioQueue.erase(mAudioQueue.begin());
1086                 continue;
1087             }
1088 
1089             // EOS
1090             if (mPaused) {
1091                 // Do not notify EOS when paused.
1092                 // This is needed to avoid switch to next clip while in pause.
1093                 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1094                 return false;
1095             }
1096 
1097             int64_t postEOSDelayUs = 0;
1098             if (mAudioSink->needsTrailingPadding()) {
1099                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1100             }
1101             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1102             mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1103 
1104             mAudioQueue.erase(mAudioQueue.begin());
1105             entry = NULL;
1106             if (mAudioSink->needsTrailingPadding()) {
1107                 // If we're not in gapless playback (i.e. through setNextPlayer), we
1108                 // need to stop the track here, because that will play out the last
1109                 // little bit at the end of the file. Otherwise short files won't play.
1110                 mAudioSink->stop();
1111                 mNumFramesWritten = 0;
1112             }
1113             return false;
1114         }
1115 
1116         mLastAudioBufferDrained = entry->mBufferOrdinal;
1117 
1118         // ignore 0-sized buffer which could be EOS marker with no data
1119         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1120             int64_t mediaTimeUs;
1121             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1122             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1123                     mediaTimeUs / 1E6);
1124             onNewAudioMediaTime(mediaTimeUs);
1125         }
1126 
1127         size_t copy = entry->mBuffer->size() - entry->mOffset;
1128 
1129         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1130                                             copy, false /* blocking */);
1131         if (written < 0) {
1132             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1133             if (written == WOULD_BLOCK) {
1134                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1135             } else {
1136                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1137                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1138                 // true, in which case the NuPlayer will handle the reconnect.
1139                 notifyAudioTearDown(kDueToError);
1140             }
1141             break;
1142         }
1143 
1144         entry->mOffset += written;
1145         size_t remainder = entry->mBuffer->size() - entry->mOffset;
1146         if ((ssize_t)remainder < mAudioSink->frameSize()) {
1147             if (remainder > 0) {
1148                 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1149                         remainder);
1150                 entry->mOffset += remainder;
1151                 copy -= remainder;
1152             }
1153 
1154             entry->mNotifyConsumed->post();
1155             mAudioQueue.erase(mAudioQueue.begin());
1156 
1157             entry = NULL;
1158         }
1159 
1160         size_t copiedFrames = written / mAudioSink->frameSize();
1161         mNumFramesWritten += copiedFrames;
1162 
1163         {
1164             Mutex::Autolock autoLock(mLock);
1165             int64_t maxTimeMedia;
1166             maxTimeMedia =
1167                 mAnchorTimeMediaUs +
1168                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1169                                 * 1000LL * mAudioSink->msecsPerFrame());
1170             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1171 
1172             notifyIfMediaRenderingStarted_l();
1173         }
1174 
1175         if (written != (ssize_t)copy) {
1176             // A short count was received from AudioSink::write()
1177             //
1178             // AudioSink write is called in non-blocking mode.
1179             // It may return with a short count when:
1180             //
1181             // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1182             //    discarded.
1183             // 2) The data to be copied exceeds the available buffer in AudioSink.
1184             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1185             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1186 
1187             // (Case 1)
1188             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1189             // needs to fail, as we should not carry over fractional frames between calls.
1190             CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1191 
1192             // (Case 2, 3, 4)
1193             // Return early to the caller.
1194             // Beware of calling immediately again as this may busy-loop if you are not careful.
1195             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1196             break;
1197         }
1198     }
1199 
1200     // calculate whether we need to reschedule another write.
1201     bool reschedule = !mAudioQueue.empty()
1202             && (!mPaused
1203                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1204     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1205     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1206     return reschedule;
1207 }
1208 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)1209 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1210     int32_t sampleRate = offloadingAudio() ?
1211             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1212     if (sampleRate == 0) {
1213         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1214         return 0;
1215     }
1216 
1217     return (int64_t)(numFrames * 1000000LL / sampleRate);
1218 }
1219 
1220 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)1221 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1222     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1223     if (mUseVirtualAudioSink) {
1224         int64_t nowUs = ALooper::GetNowUs();
1225         int64_t mediaUs;
1226         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1227             return 0LL;
1228         } else {
1229             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1230         }
1231     }
1232 
1233     const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1234     int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1235     if (pendingUs < 0) {
1236         // This shouldn't happen unless the timestamp is stale.
1237         ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1238                 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1239                 __func__, (long long)pendingUs,
1240                 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1241         pendingUs = 0;
1242     }
1243     return pendingUs;
1244 }
1245 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)1246 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1247     int64_t realUs;
1248     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1249         // If failed to get current position, e.g. due to audio clock is
1250         // not ready, then just play out video immediately without delay.
1251         return nowUs;
1252     }
1253     return realUs;
1254 }
1255 
onNewAudioMediaTime(int64_t mediaTimeUs)1256 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1257     Mutex::Autolock autoLock(mLock);
1258     // TRICKY: vorbis decoder generates multiple frames with the same
1259     // timestamp, so only update on the first frame with a given timestamp
1260     if (mediaTimeUs == mAnchorTimeMediaUs) {
1261         return;
1262     }
1263     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1264 
1265     // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1266     if (mNextAudioClockUpdateTimeUs == -1) {
1267         AudioTimestamp ts;
1268         if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1269             mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1270         }
1271     }
1272     int64_t nowUs = ALooper::GetNowUs();
1273     if (mNextAudioClockUpdateTimeUs >= 0) {
1274         if (nowUs >= mNextAudioClockUpdateTimeUs) {
1275             int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1276             mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1277             mUseVirtualAudioSink = false;
1278             mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1279         }
1280     } else {
1281         int64_t unused;
1282         if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1283                 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1284                         > kMaxAllowedAudioSinkDelayUs)) {
1285             // Enough data has been sent to AudioSink, but AudioSink has not rendered
1286             // any data yet. Something is wrong with AudioSink, e.g., the device is not
1287             // connected to audio out.
1288             // Switch to system clock. This essentially creates a virtual AudioSink with
1289             // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1290             // This virtual AudioSink renders audio data starting from the very first sample
1291             // and it's paced by system clock.
1292             ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1293             mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1294             mUseVirtualAudioSink = true;
1295         }
1296     }
1297     mAnchorNumFramesWritten = mNumFramesWritten;
1298     mAnchorTimeMediaUs = mediaTimeUs;
1299 }
1300 
1301 // Called without mLock acquired.
postDrainVideoQueue()1302 void NuPlayer::Renderer::postDrainVideoQueue() {
1303     if (mDrainVideoQueuePending
1304             || getSyncQueues()
1305             || (mPaused && mVideoSampleReceived)) {
1306         return;
1307     }
1308 
1309     if (mVideoQueue.empty()) {
1310         return;
1311     }
1312 
1313     QueueEntry &entry = *mVideoQueue.begin();
1314 
1315     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1316     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1317 
1318     if (entry.mBuffer == NULL) {
1319         // EOS doesn't carry a timestamp.
1320         msg->post();
1321         mDrainVideoQueuePending = true;
1322         return;
1323     }
1324 
1325     int64_t nowUs = ALooper::GetNowUs();
1326     if (mFlags & FLAG_REAL_TIME) {
1327         int64_t realTimeUs;
1328         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1329 
1330         realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1331 
1332         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1333 
1334         int64_t delayUs = realTimeUs - nowUs;
1335 
1336         ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1337         // post 2 display refreshes before rendering is due
1338         msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1339 
1340         mDrainVideoQueuePending = true;
1341         return;
1342     }
1343 
1344     int64_t mediaTimeUs;
1345     CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1346 
1347     {
1348         Mutex::Autolock autoLock(mLock);
1349         if (mAnchorTimeMediaUs < 0) {
1350             mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1351             mAnchorTimeMediaUs = mediaTimeUs;
1352         }
1353     }
1354     mNextVideoTimeMediaUs = mediaTimeUs;
1355     if (!mHasAudio) {
1356         // smooth out videos >= 10fps
1357         mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
1358     }
1359 
1360     if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1361         msg->post();
1362     } else {
1363         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1364 
1365         // post 2 display refreshes before rendering is due
1366         mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1367     }
1368 
1369     mDrainVideoQueuePending = true;
1370 }
1371 
onDrainVideoQueue()1372 void NuPlayer::Renderer::onDrainVideoQueue() {
1373     if (mVideoQueue.empty()) {
1374         return;
1375     }
1376 
1377     QueueEntry *entry = &*mVideoQueue.begin();
1378 
1379     if (entry->mBuffer == NULL) {
1380         // EOS
1381 
1382         notifyEOS(false /* audio */, entry->mFinalResult);
1383 
1384         mVideoQueue.erase(mVideoQueue.begin());
1385         entry = NULL;
1386 
1387         setVideoLateByUs(0);
1388         return;
1389     }
1390 
1391     int64_t nowUs = ALooper::GetNowUs();
1392     int64_t realTimeUs;
1393     int64_t mediaTimeUs = -1;
1394     if (mFlags & FLAG_REAL_TIME) {
1395         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1396     } else {
1397         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1398 
1399         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1400     }
1401     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1402 
1403     bool tooLate = false;
1404 
1405     if (!mPaused) {
1406         setVideoLateByUs(nowUs - realTimeUs);
1407         tooLate = (mVideoLateByUs > 40000);
1408 
1409         if (tooLate) {
1410             ALOGV("video late by %lld us (%.2f secs)",
1411                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1412         } else {
1413             int64_t mediaUs = 0;
1414             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1415             ALOGV("rendering video at media time %.2f secs",
1416                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1417                     mediaUs) / 1E6);
1418 
1419             if (!(mFlags & FLAG_REAL_TIME)
1420                     && mLastAudioMediaTimeUs != -1
1421                     && mediaTimeUs > mLastAudioMediaTimeUs) {
1422                 // If audio ends before video, video continues to drive media clock.
1423                 // Also smooth out videos >= 10fps.
1424                 mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
1425             }
1426         }
1427     } else {
1428         setVideoLateByUs(0);
1429         if (!mVideoSampleReceived && !mHasAudio) {
1430             // This will ensure that the first frame after a flush won't be used as anchor
1431             // when renderer is in paused state, because resume can happen any time after seek.
1432             clearAnchorTime();
1433         }
1434     }
1435 
1436     // Always render the first video frame while keeping stats on A/V sync.
1437     if (!mVideoSampleReceived) {
1438         realTimeUs = nowUs;
1439         tooLate = false;
1440     }
1441 
1442     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
1443     entry->mNotifyConsumed->setInt32("render", !tooLate);
1444     entry->mNotifyConsumed->post();
1445     mVideoQueue.erase(mVideoQueue.begin());
1446     entry = NULL;
1447 
1448     mVideoSampleReceived = true;
1449 
1450     if (!mPaused) {
1451         if (!mVideoRenderingStarted) {
1452             mVideoRenderingStarted = true;
1453             notifyVideoRenderingStart();
1454         }
1455         Mutex::Autolock autoLock(mLock);
1456         notifyIfMediaRenderingStarted_l();
1457     }
1458 }
1459 
notifyVideoRenderingStart()1460 void NuPlayer::Renderer::notifyVideoRenderingStart() {
1461     sp<AMessage> notify = mNotify->dup();
1462     notify->setInt32("what", kWhatVideoRenderingStart);
1463     notify->post();
1464 }
1465 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1466 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1467     Mutex::Autolock autoLock(mLock);
1468     notifyEOS_l(audio, finalResult, delayUs);
1469 }
1470 
notifyEOS_l(bool audio,status_t finalResult,int64_t delayUs)1471 void NuPlayer::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1472     if (audio && delayUs > 0) {
1473         sp<AMessage> msg = new AMessage(kWhatEOS, this);
1474         msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1475         msg->setInt32("finalResult", finalResult);
1476         msg->post(delayUs);
1477         return;
1478     }
1479     sp<AMessage> notify = mNotify->dup();
1480     notify->setInt32("what", kWhatEOS);
1481     notify->setInt32("audio", static_cast<int32_t>(audio));
1482     notify->setInt32("finalResult", finalResult);
1483     notify->post(delayUs);
1484 
1485     if (audio) {
1486         // Video might outlive audio. Clear anchor to enable video only case.
1487         mAnchorTimeMediaUs = -1;
1488         mHasAudio = false;
1489         if (mNextVideoTimeMediaUs >= 0) {
1490             int64_t mediaUs = 0;
1491             int64_t nowUs = ALooper::GetNowUs();
1492             status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
1493             if (result == OK) {
1494                 if (mNextVideoTimeMediaUs > mediaUs) {
1495                     mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1496                 }
1497             } else {
1498                 mMediaClock->updateAnchor(
1499                         mNextVideoTimeMediaUs, nowUs,
1500                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
1501             }
1502         }
1503     }
1504 }
1505 
notifyAudioTearDown(AudioTearDownReason reason)1506 void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1507     sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1508     msg->setInt32("reason", reason);
1509     msg->post();
1510 }
1511 
onQueueBuffer(const sp<AMessage> & msg)1512 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1513     int32_t audio;
1514     CHECK(msg->findInt32("audio", &audio));
1515 
1516     if (dropBufferIfStale(audio, msg)) {
1517         return;
1518     }
1519 
1520     if (audio) {
1521         mHasAudio = true;
1522     } else {
1523         mHasVideo = true;
1524     }
1525 
1526     if (mHasVideo) {
1527         if (mVideoScheduler == NULL) {
1528             mVideoScheduler = new VideoFrameScheduler();
1529             mVideoScheduler->init();
1530         }
1531     }
1532 
1533     sp<RefBase> obj;
1534     CHECK(msg->findObject("buffer", &obj));
1535     sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1536 
1537     sp<AMessage> notifyConsumed;
1538     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1539 
1540     QueueEntry entry;
1541     entry.mBuffer = buffer;
1542     entry.mNotifyConsumed = notifyConsumed;
1543     entry.mOffset = 0;
1544     entry.mFinalResult = OK;
1545     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1546 
1547     if (audio) {
1548         Mutex::Autolock autoLock(mLock);
1549         mAudioQueue.push_back(entry);
1550         postDrainAudioQueue_l();
1551     } else {
1552         mVideoQueue.push_back(entry);
1553         postDrainVideoQueue();
1554     }
1555 
1556     Mutex::Autolock autoLock(mLock);
1557     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1558         return;
1559     }
1560 
1561     sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1562     sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1563 
1564     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1565         // EOS signalled on either queue.
1566         syncQueuesDone_l();
1567         return;
1568     }
1569 
1570     int64_t firstAudioTimeUs;
1571     int64_t firstVideoTimeUs;
1572     CHECK(firstAudioBuffer->meta()
1573             ->findInt64("timeUs", &firstAudioTimeUs));
1574     CHECK(firstVideoBuffer->meta()
1575             ->findInt64("timeUs", &firstVideoTimeUs));
1576 
1577     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1578 
1579     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1580 
1581     if (diff > 100000LL) {
1582         // Audio data starts More than 0.1 secs before video.
1583         // Drop some audio.
1584 
1585         (*mAudioQueue.begin()).mNotifyConsumed->post();
1586         mAudioQueue.erase(mAudioQueue.begin());
1587         return;
1588     }
1589 
1590     syncQueuesDone_l();
1591 }
1592 
syncQueuesDone_l()1593 void NuPlayer::Renderer::syncQueuesDone_l() {
1594     if (!mSyncQueues) {
1595         return;
1596     }
1597 
1598     mSyncQueues = false;
1599 
1600     if (!mAudioQueue.empty()) {
1601         postDrainAudioQueue_l();
1602     }
1603 
1604     if (!mVideoQueue.empty()) {
1605         mLock.unlock();
1606         postDrainVideoQueue();
1607         mLock.lock();
1608     }
1609 }
1610 
onQueueEOS(const sp<AMessage> & msg)1611 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1612     int32_t audio;
1613     CHECK(msg->findInt32("audio", &audio));
1614 
1615     if (dropBufferIfStale(audio, msg)) {
1616         return;
1617     }
1618 
1619     int32_t finalResult;
1620     CHECK(msg->findInt32("finalResult", &finalResult));
1621 
1622     QueueEntry entry;
1623     entry.mOffset = 0;
1624     entry.mFinalResult = finalResult;
1625 
1626     if (audio) {
1627         Mutex::Autolock autoLock(mLock);
1628         if (mAudioQueue.empty() && mSyncQueues) {
1629             syncQueuesDone_l();
1630         }
1631         mAudioQueue.push_back(entry);
1632         postDrainAudioQueue_l();
1633     } else {
1634         if (mVideoQueue.empty() && getSyncQueues()) {
1635             Mutex::Autolock autoLock(mLock);
1636             syncQueuesDone_l();
1637         }
1638         mVideoQueue.push_back(entry);
1639         postDrainVideoQueue();
1640     }
1641 }
1642 
onFlush(const sp<AMessage> & msg)1643 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1644     int32_t audio, notifyComplete;
1645     CHECK(msg->findInt32("audio", &audio));
1646 
1647     {
1648         Mutex::Autolock autoLock(mLock);
1649         if (audio) {
1650             notifyComplete = mNotifyCompleteAudio;
1651             mNotifyCompleteAudio = false;
1652             mLastAudioMediaTimeUs = -1;
1653 
1654             mHasAudio = false;
1655             if (mNextVideoTimeMediaUs >= 0) {
1656                 int64_t nowUs = ALooper::GetNowUs();
1657                 mMediaClock->updateAnchor(
1658                         mNextVideoTimeMediaUs, nowUs,
1659                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
1660             }
1661         } else {
1662             notifyComplete = mNotifyCompleteVideo;
1663             mNotifyCompleteVideo = false;
1664         }
1665 
1666         // If we're currently syncing the queues, i.e. dropping audio while
1667         // aligning the first audio/video buffer times and only one of the
1668         // two queues has data, we may starve that queue by not requesting
1669         // more buffers from the decoder. If the other source then encounters
1670         // a discontinuity that leads to flushing, we'll never find the
1671         // corresponding discontinuity on the other queue.
1672         // Therefore we'll stop syncing the queues if at least one of them
1673         // is flushed.
1674         syncQueuesDone_l();
1675     }
1676     clearAnchorTime();
1677 
1678     ALOGV("flushing %s", audio ? "audio" : "video");
1679     if (audio) {
1680         {
1681             Mutex::Autolock autoLock(mLock);
1682             flushQueue(&mAudioQueue);
1683 
1684             ++mAudioDrainGeneration;
1685             ++mAudioEOSGeneration;
1686             prepareForMediaRenderingStart_l();
1687 
1688             // the frame count will be reset after flush.
1689             clearAudioFirstAnchorTime_l();
1690         }
1691 
1692         mDrainAudioQueuePending = false;
1693 
1694         mAudioSink->pause();
1695         mAudioSink->flush();
1696         if (!offloadingAudio()) {
1697             // Call stop() to signal to the AudioSink to completely fill the
1698             // internal buffer before resuming playback.
1699             // FIXME: this is ignored after flush().
1700             mAudioSink->stop();
1701             mNumFramesWritten = 0;
1702         }
1703         if (!mPaused) {
1704             mAudioSink->start();
1705         }
1706         mNextAudioClockUpdateTimeUs = -1;
1707     } else {
1708         flushQueue(&mVideoQueue);
1709 
1710         mDrainVideoQueuePending = false;
1711 
1712         if (mVideoScheduler != NULL) {
1713             mVideoScheduler->restart();
1714         }
1715 
1716         Mutex::Autolock autoLock(mLock);
1717         ++mVideoDrainGeneration;
1718         prepareForMediaRenderingStart_l();
1719     }
1720 
1721     mVideoSampleReceived = false;
1722 
1723     if (notifyComplete) {
1724         notifyFlushComplete(audio);
1725     }
1726 }
1727 
flushQueue(List<QueueEntry> * queue)1728 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1729     while (!queue->empty()) {
1730         QueueEntry *entry = &*queue->begin();
1731 
1732         if (entry->mBuffer != NULL) {
1733             entry->mNotifyConsumed->post();
1734         } else if (entry->mNotifyConsumed != nullptr) {
1735             // Is it needed to open audio sink now?
1736             onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1737         }
1738 
1739         queue->erase(queue->begin());
1740         entry = NULL;
1741     }
1742 }
1743 
notifyFlushComplete(bool audio)1744 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1745     sp<AMessage> notify = mNotify->dup();
1746     notify->setInt32("what", kWhatFlushComplete);
1747     notify->setInt32("audio", static_cast<int32_t>(audio));
1748     notify->post();
1749 }
1750 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1751 bool NuPlayer::Renderer::dropBufferIfStale(
1752         bool audio, const sp<AMessage> &msg) {
1753     int32_t queueGeneration;
1754     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1755 
1756     if (queueGeneration == getQueueGeneration(audio)) {
1757         return false;
1758     }
1759 
1760     sp<AMessage> notifyConsumed;
1761     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1762         notifyConsumed->post();
1763     }
1764 
1765     return true;
1766 }
1767 
onAudioSinkChanged()1768 void NuPlayer::Renderer::onAudioSinkChanged() {
1769     if (offloadingAudio()) {
1770         return;
1771     }
1772     CHECK(!mDrainAudioQueuePending);
1773     mNumFramesWritten = 0;
1774     mAnchorNumFramesWritten = -1;
1775     uint32_t written;
1776     if (mAudioSink->getFramesWritten(&written) == OK) {
1777         mNumFramesWritten = written;
1778     }
1779 }
1780 
onDisableOffloadAudio()1781 void NuPlayer::Renderer::onDisableOffloadAudio() {
1782     Mutex::Autolock autoLock(mLock);
1783     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1784     ++mAudioDrainGeneration;
1785     if (mAudioRenderingStartGeneration != -1) {
1786         prepareForMediaRenderingStart_l();
1787         // PauseTimeout is applied to offload mode only. Cancel pending timer.
1788         cancelAudioOffloadPauseTimeout();
1789     }
1790 }
1791 
onEnableOffloadAudio()1792 void NuPlayer::Renderer::onEnableOffloadAudio() {
1793     Mutex::Autolock autoLock(mLock);
1794     mFlags |= FLAG_OFFLOAD_AUDIO;
1795     ++mAudioDrainGeneration;
1796     if (mAudioRenderingStartGeneration != -1) {
1797         prepareForMediaRenderingStart_l();
1798     }
1799 }
1800 
onPause()1801 void NuPlayer::Renderer::onPause() {
1802     if (mPaused) {
1803         return;
1804     }
1805 
1806     startAudioOffloadPauseTimeout();
1807 
1808     {
1809         Mutex::Autolock autoLock(mLock);
1810         // we do not increment audio drain generation so that we fill audio buffer during pause.
1811         ++mVideoDrainGeneration;
1812         prepareForMediaRenderingStart_l();
1813         mPaused = true;
1814         mMediaClock->setPlaybackRate(0.0);
1815     }
1816 
1817     mDrainAudioQueuePending = false;
1818     mDrainVideoQueuePending = false;
1819 
1820     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1821     mAudioSink->pause();
1822 
1823     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1824           mAudioQueue.size(), mVideoQueue.size());
1825 }
1826 
onResume()1827 void NuPlayer::Renderer::onResume() {
1828     if (!mPaused) {
1829         return;
1830     }
1831 
1832     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1833     cancelAudioOffloadPauseTimeout();
1834     if (mAudioSink->ready()) {
1835         status_t err = mAudioSink->start();
1836         if (err != OK) {
1837             ALOGE("cannot start AudioSink err %d", err);
1838             notifyAudioTearDown(kDueToError);
1839         }
1840     }
1841 
1842     {
1843         Mutex::Autolock autoLock(mLock);
1844         mPaused = false;
1845         // rendering started message may have been delayed if we were paused.
1846         if (mRenderingDataDelivered) {
1847             notifyIfMediaRenderingStarted_l();
1848         }
1849         // configure audiosink as we did not do it when pausing
1850         if (mAudioSink != NULL && mAudioSink->ready()) {
1851             mAudioSink->setPlaybackRate(mPlaybackSettings);
1852         }
1853 
1854         mMediaClock->setPlaybackRate(mPlaybackRate);
1855 
1856         if (!mAudioQueue.empty()) {
1857             postDrainAudioQueue_l();
1858         }
1859     }
1860 
1861     if (!mVideoQueue.empty()) {
1862         postDrainVideoQueue();
1863     }
1864 }
1865 
onSetVideoFrameRate(float fps)1866 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1867     if (mVideoScheduler == NULL) {
1868         mVideoScheduler = new VideoFrameScheduler();
1869     }
1870     mVideoScheduler->init(fps);
1871 }
1872 
getQueueGeneration(bool audio)1873 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1874     Mutex::Autolock autoLock(mLock);
1875     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1876 }
1877 
getDrainGeneration(bool audio)1878 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1879     Mutex::Autolock autoLock(mLock);
1880     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1881 }
1882 
getSyncQueues()1883 bool NuPlayer::Renderer::getSyncQueues() {
1884     Mutex::Autolock autoLock(mLock);
1885     return mSyncQueues;
1886 }
1887 
onAudioTearDown(AudioTearDownReason reason)1888 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1889     if (mAudioTornDown) {
1890         return;
1891     }
1892 
1893     // TimeoutWhenPaused is only for offload mode.
1894     if (reason == kDueToTimeout && !offloadingAudio()) {
1895         return;
1896     }
1897 
1898     mAudioTornDown = true;
1899 
1900     int64_t currentPositionUs;
1901     sp<AMessage> notify = mNotify->dup();
1902     if (getCurrentPosition(&currentPositionUs) == OK) {
1903         notify->setInt64("positionUs", currentPositionUs);
1904     }
1905 
1906     mAudioSink->stop();
1907     mAudioSink->flush();
1908 
1909     notify->setInt32("what", kWhatAudioTearDown);
1910     notify->setInt32("reason", reason);
1911     notify->post();
1912 }
1913 
startAudioOffloadPauseTimeout()1914 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1915     if (offloadingAudio()) {
1916         mWakeLock->acquire();
1917         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1918         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1919         msg->post(kOffloadPauseMaxUs);
1920     }
1921 }
1922 
cancelAudioOffloadPauseTimeout()1923 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1924     // We may have called startAudioOffloadPauseTimeout() without
1925     // the AudioSink open and with offloadingAudio enabled.
1926     //
1927     // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1928     // we always release the wakelock and increment the pause timeout generation.
1929     //
1930     // Note: The acquired wakelock prevents the device from suspending
1931     // immediately after offload pause (in case a resume happens shortly thereafter).
1932     mWakeLock->release(true);
1933     ++mAudioOffloadPauseTimeoutGeneration;
1934 }
1935 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming)1936 status_t NuPlayer::Renderer::onOpenAudioSink(
1937         const sp<AMessage> &format,
1938         bool offloadOnly,
1939         bool hasVideo,
1940         uint32_t flags,
1941         bool isStreaming) {
1942     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1943             offloadOnly, offloadingAudio());
1944     bool audioSinkChanged = false;
1945 
1946     int32_t numChannels;
1947     CHECK(format->findInt32("channel-count", &numChannels));
1948 
1949     // channel mask info as read from the audio format
1950     int32_t mediaFormatChannelMask;
1951     // channel mask to use for native playback
1952     audio_channel_mask_t channelMask;
1953     if (format->findInt32("channel-mask", &mediaFormatChannelMask)) {
1954         // KEY_CHANNEL_MASK follows the android.media.AudioFormat java mask
1955         channelMask = audio_channel_mask_from_media_format_mask(mediaFormatChannelMask);
1956     } else {
1957         // no mask found: the mask will be derived from the channel count
1958         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1959     }
1960 
1961     int32_t sampleRate;
1962     CHECK(format->findInt32("sample-rate", &sampleRate));
1963 
1964     // read pcm encoding from MediaCodec output format, if available
1965     int32_t pcmEncoding;
1966     audio_format_t audioFormat =
1967             format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
1968                     audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
1969 
1970     if (offloadingAudio()) {
1971         AString mime;
1972         CHECK(format->findString("mime", &mime));
1973         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1974 
1975         if (err != OK) {
1976             ALOGE("Couldn't map mime \"%s\" to a valid "
1977                     "audio_format", mime.c_str());
1978             onDisableOffloadAudio();
1979         } else {
1980             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1981                     mime.c_str(), audioFormat);
1982 
1983             int avgBitRate = 0;
1984             format->findInt32("bitrate", &avgBitRate);
1985 
1986             int32_t aacProfile = -1;
1987             if (audioFormat == AUDIO_FORMAT_AAC
1988                     && format->findInt32("aac-profile", &aacProfile)) {
1989                 // Redefine AAC format as per aac profile
1990                 mapAACProfileToAudioFormat(
1991                         audioFormat,
1992                         aacProfile);
1993             }
1994 
1995             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1996             offloadInfo.duration_us = -1;
1997             format->findInt64(
1998                     "durationUs", &offloadInfo.duration_us);
1999             offloadInfo.sample_rate = sampleRate;
2000             offloadInfo.channel_mask = channelMask;
2001             offloadInfo.format = audioFormat;
2002             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
2003             offloadInfo.bit_rate = avgBitRate;
2004             offloadInfo.has_video = hasVideo;
2005             offloadInfo.is_streaming = isStreaming;
2006 
2007             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
2008                 ALOGV("openAudioSink: no change in offload mode");
2009                 // no change from previous configuration, everything ok.
2010                 return OK;
2011             }
2012             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2013 
2014             ALOGV("openAudioSink: try to open AudioSink in offload mode");
2015             uint32_t offloadFlags = flags;
2016             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
2017             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
2018             audioSinkChanged = true;
2019             mAudioSink->close();
2020 
2021             err = mAudioSink->open(
2022                     sampleRate,
2023                     numChannels,
2024                     (audio_channel_mask_t)channelMask,
2025                     audioFormat,
2026                     0 /* bufferCount - unused */,
2027                     &NuPlayer::Renderer::AudioSinkCallback,
2028                     this,
2029                     (audio_output_flags_t)offloadFlags,
2030                     &offloadInfo);
2031 
2032             if (err == OK) {
2033                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
2034             }
2035 
2036             if (err == OK) {
2037                 // If the playback is offloaded to h/w, we pass
2038                 // the HAL some metadata information.
2039                 // We don't want to do this for PCM because it
2040                 // will be going through the AudioFlinger mixer
2041                 // before reaching the hardware.
2042                 // TODO
2043                 mCurrentOffloadInfo = offloadInfo;
2044                 if (!mPaused) { // for preview mode, don't start if paused
2045                     err = mAudioSink->start();
2046                 }
2047                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
2048             }
2049             if (err != OK) {
2050                 // Clean up, fall back to non offload mode.
2051                 mAudioSink->close();
2052                 onDisableOffloadAudio();
2053                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2054                 ALOGV("openAudioSink: offload failed");
2055                 if (offloadOnly) {
2056                     notifyAudioTearDown(kForceNonOffload);
2057                 }
2058             } else {
2059                 mUseAudioCallback = true;  // offload mode transfers data through callback
2060                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
2061             }
2062         }
2063     }
2064     if (!offloadOnly && !offloadingAudio()) {
2065         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
2066         uint32_t pcmFlags = flags;
2067         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
2068 
2069         const PcmInfo info = {
2070                 (audio_channel_mask_t)channelMask,
2071                 (audio_output_flags_t)pcmFlags,
2072                 audioFormat,
2073                 numChannels,
2074                 sampleRate
2075         };
2076         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
2077             ALOGV("openAudioSink: no change in pcm mode");
2078             // no change from previous configuration, everything ok.
2079             return OK;
2080         }
2081 
2082         audioSinkChanged = true;
2083         mAudioSink->close();
2084         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2085         // Note: It is possible to set up the callback, but not use it to send audio data.
2086         // This requires a fix in AudioSink to explicitly specify the transfer mode.
2087         mUseAudioCallback = getUseAudioCallbackSetting();
2088         if (mUseAudioCallback) {
2089             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
2090         }
2091 
2092         // Compute the desired buffer size.
2093         // For callback mode, the amount of time before wakeup is about half the buffer size.
2094         const uint32_t frameCount =
2095                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2096 
2097         // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
2098         // AudioSink. We don't want this when there's video because it will cause a video seek to
2099         // the previous I frame. But we do want this when there's only audio because it will give
2100         // NuPlayer a chance to switch from non-offload mode to offload mode.
2101         // So we only set doNotReconnect when there's no video.
2102         const bool doNotReconnect = !hasVideo;
2103 
2104         // We should always be able to set our playback settings if the sink is closed.
2105         LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2106                 "onOpenAudioSink: can't set playback rate on closed sink");
2107         status_t err = mAudioSink->open(
2108                     sampleRate,
2109                     numChannels,
2110                     (audio_channel_mask_t)channelMask,
2111                     audioFormat,
2112                     0 /* bufferCount - unused */,
2113                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
2114                     mUseAudioCallback ? this : NULL,
2115                     (audio_output_flags_t)pcmFlags,
2116                     NULL,
2117                     doNotReconnect,
2118                     frameCount);
2119         if (err != OK) {
2120             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2121             mAudioSink->close();
2122             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2123             return err;
2124         }
2125         mCurrentPcmInfo = info;
2126         if (!mPaused) { // for preview mode, don't start if paused
2127             mAudioSink->start();
2128         }
2129     }
2130     if (audioSinkChanged) {
2131         onAudioSinkChanged();
2132     }
2133     mAudioTornDown = false;
2134     return OK;
2135 }
2136 
onCloseAudioSink()2137 void NuPlayer::Renderer::onCloseAudioSink() {
2138     mAudioSink->close();
2139     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2140     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2141 }
2142 
onChangeAudioFormat(const sp<AMessage> & meta,const sp<AMessage> & notify)2143 void NuPlayer::Renderer::onChangeAudioFormat(
2144         const sp<AMessage> &meta, const sp<AMessage> &notify) {
2145     sp<AMessage> format;
2146     CHECK(meta->findMessage("format", &format));
2147 
2148     int32_t offloadOnly;
2149     CHECK(meta->findInt32("offload-only", &offloadOnly));
2150 
2151     int32_t hasVideo;
2152     CHECK(meta->findInt32("has-video", &hasVideo));
2153 
2154     uint32_t flags;
2155     CHECK(meta->findInt32("flags", (int32_t *)&flags));
2156 
2157     uint32_t isStreaming;
2158     CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2159 
2160     status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2161 
2162     if (err != OK) {
2163         notify->setInt32("err", err);
2164     }
2165     notify->post();
2166 }
2167 
2168 }  // namespace android
2169