• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayerRenderer"
19 #include <utils/Log.h>
20 
21 #include "NuPlayerRenderer.h"
22 #include <algorithm>
23 #include <cutils/properties.h>
24 #include <media/stagefright/foundation/ADebug.h>
25 #include <media/stagefright/foundation/AMessage.h>
26 #include <media/stagefright/foundation/AUtils.h>
27 #include <media/stagefright/foundation/AWakeLock.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaErrors.h>
30 #include <media/stagefright/MetaData.h>
31 #include <media/stagefright/Utils.h>
32 #include <media/stagefright/VideoFrameScheduler.h>
33 #include <media/MediaCodecBuffer.h>
34 
35 #include <inttypes.h>
36 
37 namespace android {
38 
39 /*
40  * Example of common configuration settings in shell script form
41 
42    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43    adb shell setprop audio.offload.disable 1
44 
45    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46    adb shell setprop audio.offload.video 1
47 
48    #Use audio callbacks for PCM data
49    adb shell setprop media.stagefright.audio.cbk 1
50 
51    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
52    adb shell setprop media.stagefright.audio.deep 1
53 
54    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
55    adb shell setprop media.stagefright.audio.sink 1000
56 
57  * These configurations take effect for the next track played (not the current track).
58  */
59 
getUseAudioCallbackSetting()60 static inline bool getUseAudioCallbackSetting() {
61     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
62 }
63 
getAudioSinkPcmMsSetting()64 static inline int32_t getAudioSinkPcmMsSetting() {
65     return property_get_int32(
66             "media.stagefright.audio.sink", 500 /* default_value */);
67 }
68 
69 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
70 // is closed to allow the audio DSP to power down.
71 static const int64_t kOffloadPauseMaxUs = 10000000ll;
72 
73 // Maximum allowed delay from AudioSink, 1.5 seconds.
74 static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
75 
76 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
77 
78 // static
79 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
80         AUDIO_CHANNEL_NONE,
81         AUDIO_OUTPUT_FLAG_NONE,
82         AUDIO_FORMAT_INVALID,
83         0, // mNumChannels
84         0 // mSampleRate
85 };
86 
87 // static
88 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
89 
Renderer(const sp<MediaPlayerBase::AudioSink> & sink,const sp<AMessage> & notify,uint32_t flags)90 NuPlayer::Renderer::Renderer(
91         const sp<MediaPlayerBase::AudioSink> &sink,
92         const sp<AMessage> &notify,
93         uint32_t flags)
94     : mAudioSink(sink),
95       mUseVirtualAudioSink(false),
96       mNotify(notify),
97       mFlags(flags),
98       mNumFramesWritten(0),
99       mDrainAudioQueuePending(false),
100       mDrainVideoQueuePending(false),
101       mAudioQueueGeneration(0),
102       mVideoQueueGeneration(0),
103       mAudioDrainGeneration(0),
104       mVideoDrainGeneration(0),
105       mAudioEOSGeneration(0),
106       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
107       mAudioFirstAnchorTimeMediaUs(-1),
108       mAnchorTimeMediaUs(-1),
109       mAnchorNumFramesWritten(-1),
110       mVideoLateByUs(0ll),
111       mHasAudio(false),
112       mHasVideo(false),
113       mNotifyCompleteAudio(false),
114       mNotifyCompleteVideo(false),
115       mSyncQueues(false),
116       mPaused(false),
117       mPauseDrainAudioAllowedUs(0),
118       mVideoSampleReceived(false),
119       mVideoRenderingStarted(false),
120       mVideoRenderingStartGeneration(0),
121       mAudioRenderingStartGeneration(0),
122       mRenderingDataDelivered(false),
123       mNextAudioClockUpdateTimeUs(-1),
124       mLastAudioMediaTimeUs(-1),
125       mAudioOffloadPauseTimeoutGeneration(0),
126       mAudioTornDown(false),
127       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
128       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
129       mTotalBuffersQueued(0),
130       mLastAudioBufferDrained(0),
131       mUseAudioCallback(false),
132       mWakeLock(new AWakeLock()) {
133     mMediaClock = new MediaClock;
134     mPlaybackRate = mPlaybackSettings.mSpeed;
135     mMediaClock->setPlaybackRate(mPlaybackRate);
136 }
137 
~Renderer()138 NuPlayer::Renderer::~Renderer() {
139     if (offloadingAudio()) {
140         mAudioSink->stop();
141         mAudioSink->flush();
142         mAudioSink->close();
143     }
144 
145     // Try to avoid racing condition in case callback is still on.
146     Mutex::Autolock autoLock(mLock);
147     if (mUseAudioCallback) {
148         flushQueue(&mAudioQueue);
149         flushQueue(&mVideoQueue);
150     }
151     mWakeLock.clear();
152     mMediaClock.clear();
153     mVideoScheduler.clear();
154     mNotify.clear();
155     mAudioSink.clear();
156 }
157 
queueBuffer(bool audio,const sp<MediaCodecBuffer> & buffer,const sp<AMessage> & notifyConsumed)158 void NuPlayer::Renderer::queueBuffer(
159         bool audio,
160         const sp<MediaCodecBuffer> &buffer,
161         const sp<AMessage> &notifyConsumed) {
162     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
163     msg->setInt32("queueGeneration", getQueueGeneration(audio));
164     msg->setInt32("audio", static_cast<int32_t>(audio));
165     msg->setObject("buffer", buffer);
166     msg->setMessage("notifyConsumed", notifyConsumed);
167     msg->post();
168 }
169 
queueEOS(bool audio,status_t finalResult)170 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
171     CHECK_NE(finalResult, (status_t)OK);
172 
173     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
174     msg->setInt32("queueGeneration", getQueueGeneration(audio));
175     msg->setInt32("audio", static_cast<int32_t>(audio));
176     msg->setInt32("finalResult", finalResult);
177     msg->post();
178 }
179 
setPlaybackSettings(const AudioPlaybackRate & rate)180 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
181     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
182     writeToAMessage(msg, rate);
183     sp<AMessage> response;
184     status_t err = msg->postAndAwaitResponse(&response);
185     if (err == OK && response != NULL) {
186         CHECK(response->findInt32("err", &err));
187     }
188     return err;
189 }
190 
onConfigPlayback(const AudioPlaybackRate & rate)191 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
192     if (rate.mSpeed == 0.f) {
193         onPause();
194         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
195         // have to correspond to the any non-0 speed (e.g old speed). Keep
196         // settings nonetheless, using the old speed, in case audiosink changes.
197         AudioPlaybackRate newRate = rate;
198         newRate.mSpeed = mPlaybackSettings.mSpeed;
199         mPlaybackSettings = newRate;
200         return OK;
201     }
202 
203     if (mAudioSink != NULL && mAudioSink->ready()) {
204         status_t err = mAudioSink->setPlaybackRate(rate);
205         if (err != OK) {
206             return err;
207         }
208     }
209     mPlaybackSettings = rate;
210     mPlaybackRate = rate.mSpeed;
211     mMediaClock->setPlaybackRate(mPlaybackRate);
212     return OK;
213 }
214 
getPlaybackSettings(AudioPlaybackRate * rate)215 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
216     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
217     sp<AMessage> response;
218     status_t err = msg->postAndAwaitResponse(&response);
219     if (err == OK && response != NULL) {
220         CHECK(response->findInt32("err", &err));
221         if (err == OK) {
222             readFromAMessage(response, rate);
223         }
224     }
225     return err;
226 }
227 
onGetPlaybackSettings(AudioPlaybackRate * rate)228 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
229     if (mAudioSink != NULL && mAudioSink->ready()) {
230         status_t err = mAudioSink->getPlaybackRate(rate);
231         if (err == OK) {
232             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
233                 ALOGW("correcting mismatch in internal/external playback rate");
234             }
235             // get playback settings used by audiosink, as it may be
236             // slightly off due to audiosink not taking small changes.
237             mPlaybackSettings = *rate;
238             if (mPaused) {
239                 rate->mSpeed = 0.f;
240             }
241         }
242         return err;
243     }
244     *rate = mPlaybackSettings;
245     return OK;
246 }
247 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)248 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
249     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
250     writeToAMessage(msg, sync, videoFpsHint);
251     sp<AMessage> response;
252     status_t err = msg->postAndAwaitResponse(&response);
253     if (err == OK && response != NULL) {
254         CHECK(response->findInt32("err", &err));
255     }
256     return err;
257 }
258 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)259 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
260     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
261         return BAD_VALUE;
262     }
263     // TODO: support sync sources
264     return INVALID_OPERATION;
265 }
266 
getSyncSettings(AVSyncSettings * sync,float * videoFps)267 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
268     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
269     sp<AMessage> response;
270     status_t err = msg->postAndAwaitResponse(&response);
271     if (err == OK && response != NULL) {
272         CHECK(response->findInt32("err", &err));
273         if (err == OK) {
274             readFromAMessage(response, sync, videoFps);
275         }
276     }
277     return err;
278 }
279 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)280 status_t NuPlayer::Renderer::onGetSyncSettings(
281         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
282     *sync = mSyncSettings;
283     *videoFps = -1.f;
284     return OK;
285 }
286 
flush(bool audio,bool notifyComplete)287 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
288     {
289         Mutex::Autolock autoLock(mLock);
290         if (audio) {
291             mNotifyCompleteAudio |= notifyComplete;
292             clearAudioFirstAnchorTime_l();
293             ++mAudioQueueGeneration;
294             ++mAudioDrainGeneration;
295         } else {
296             mNotifyCompleteVideo |= notifyComplete;
297             ++mVideoQueueGeneration;
298             ++mVideoDrainGeneration;
299         }
300 
301         mMediaClock->clearAnchor();
302         mVideoLateByUs = 0;
303         mSyncQueues = false;
304     }
305 
306     sp<AMessage> msg = new AMessage(kWhatFlush, this);
307     msg->setInt32("audio", static_cast<int32_t>(audio));
308     msg->post();
309 }
310 
signalTimeDiscontinuity()311 void NuPlayer::Renderer::signalTimeDiscontinuity() {
312 }
313 
signalDisableOffloadAudio()314 void NuPlayer::Renderer::signalDisableOffloadAudio() {
315     (new AMessage(kWhatDisableOffloadAudio, this))->post();
316 }
317 
signalEnableOffloadAudio()318 void NuPlayer::Renderer::signalEnableOffloadAudio() {
319     (new AMessage(kWhatEnableOffloadAudio, this))->post();
320 }
321 
pause()322 void NuPlayer::Renderer::pause() {
323     (new AMessage(kWhatPause, this))->post();
324 }
325 
resume()326 void NuPlayer::Renderer::resume() {
327     (new AMessage(kWhatResume, this))->post();
328 }
329 
setVideoFrameRate(float fps)330 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
331     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
332     msg->setFloat("frame-rate", fps);
333     msg->post();
334 }
335 
336 // Called on any threads without mLock acquired.
getCurrentPosition(int64_t * mediaUs)337 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
338     status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
339     if (result == OK) {
340         return result;
341     }
342 
343     // MediaClock has not started yet. Try to start it if possible.
344     {
345         Mutex::Autolock autoLock(mLock);
346         if (mAudioFirstAnchorTimeMediaUs == -1) {
347             return result;
348         }
349 
350         AudioTimestamp ts;
351         status_t res = mAudioSink->getTimestamp(ts);
352         if (res != OK) {
353             return result;
354         }
355 
356         // AudioSink has rendered some frames.
357         int64_t nowUs = ALooper::GetNowUs();
358         int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
359                 + mAudioFirstAnchorTimeMediaUs;
360         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
361     }
362 
363     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
364 }
365 
clearAudioFirstAnchorTime_l()366 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
367     mAudioFirstAnchorTimeMediaUs = -1;
368     mMediaClock->setStartingTimeMedia(-1);
369 }
370 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)371 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
372     if (mAudioFirstAnchorTimeMediaUs == -1) {
373         mAudioFirstAnchorTimeMediaUs = mediaUs;
374         mMediaClock->setStartingTimeMedia(mediaUs);
375     }
376 }
377 
378 // Called on renderer looper.
clearAnchorTime()379 void NuPlayer::Renderer::clearAnchorTime() {
380     mMediaClock->clearAnchor();
381     mAnchorTimeMediaUs = -1;
382     mAnchorNumFramesWritten = -1;
383 }
384 
setVideoLateByUs(int64_t lateUs)385 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
386     Mutex::Autolock autoLock(mLock);
387     mVideoLateByUs = lateUs;
388 }
389 
getVideoLateByUs()390 int64_t NuPlayer::Renderer::getVideoLateByUs() {
391     Mutex::Autolock autoLock(mLock);
392     return mVideoLateByUs;
393 }
394 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded,bool isStreaming)395 status_t NuPlayer::Renderer::openAudioSink(
396         const sp<AMessage> &format,
397         bool offloadOnly,
398         bool hasVideo,
399         uint32_t flags,
400         bool *isOffloaded,
401         bool isStreaming) {
402     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
403     msg->setMessage("format", format);
404     msg->setInt32("offload-only", offloadOnly);
405     msg->setInt32("has-video", hasVideo);
406     msg->setInt32("flags", flags);
407     msg->setInt32("isStreaming", isStreaming);
408 
409     sp<AMessage> response;
410     status_t postStatus = msg->postAndAwaitResponse(&response);
411 
412     int32_t err;
413     if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
414         err = INVALID_OPERATION;
415     } else if (err == OK && isOffloaded != NULL) {
416         int32_t offload;
417         CHECK(response->findInt32("offload", &offload));
418         *isOffloaded = (offload != 0);
419     }
420     return err;
421 }
422 
closeAudioSink()423 void NuPlayer::Renderer::closeAudioSink() {
424     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
425 
426     sp<AMessage> response;
427     msg->postAndAwaitResponse(&response);
428 }
429 
changeAudioFormat(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming,const sp<AMessage> & notify)430 void NuPlayer::Renderer::changeAudioFormat(
431         const sp<AMessage> &format,
432         bool offloadOnly,
433         bool hasVideo,
434         uint32_t flags,
435         bool isStreaming,
436         const sp<AMessage> &notify) {
437     sp<AMessage> meta = new AMessage;
438     meta->setMessage("format", format);
439     meta->setInt32("offload-only", offloadOnly);
440     meta->setInt32("has-video", hasVideo);
441     meta->setInt32("flags", flags);
442     meta->setInt32("isStreaming", isStreaming);
443 
444     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
445     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
446     msg->setMessage("notify", notify);
447     msg->setMessage("meta", meta);
448     msg->post();
449 }
450 
onMessageReceived(const sp<AMessage> & msg)451 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
452     switch (msg->what()) {
453         case kWhatOpenAudioSink:
454         {
455             sp<AMessage> format;
456             CHECK(msg->findMessage("format", &format));
457 
458             int32_t offloadOnly;
459             CHECK(msg->findInt32("offload-only", &offloadOnly));
460 
461             int32_t hasVideo;
462             CHECK(msg->findInt32("has-video", &hasVideo));
463 
464             uint32_t flags;
465             CHECK(msg->findInt32("flags", (int32_t *)&flags));
466 
467             uint32_t isStreaming;
468             CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
469 
470             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
471 
472             sp<AMessage> response = new AMessage;
473             response->setInt32("err", err);
474             response->setInt32("offload", offloadingAudio());
475 
476             sp<AReplyToken> replyID;
477             CHECK(msg->senderAwaitsResponse(&replyID));
478             response->postReply(replyID);
479 
480             break;
481         }
482 
483         case kWhatCloseAudioSink:
484         {
485             sp<AReplyToken> replyID;
486             CHECK(msg->senderAwaitsResponse(&replyID));
487 
488             onCloseAudioSink();
489 
490             sp<AMessage> response = new AMessage;
491             response->postReply(replyID);
492             break;
493         }
494 
495         case kWhatStopAudioSink:
496         {
497             mAudioSink->stop();
498             break;
499         }
500 
501         case kWhatChangeAudioFormat:
502         {
503             int32_t queueGeneration;
504             CHECK(msg->findInt32("queueGeneration", &queueGeneration));
505 
506             sp<AMessage> notify;
507             CHECK(msg->findMessage("notify", &notify));
508 
509             if (offloadingAudio()) {
510                 ALOGW("changeAudioFormat should NOT be called in offload mode");
511                 notify->setInt32("err", INVALID_OPERATION);
512                 notify->post();
513                 break;
514             }
515 
516             sp<AMessage> meta;
517             CHECK(msg->findMessage("meta", &meta));
518 
519             if (queueGeneration != getQueueGeneration(true /* audio */)
520                     || mAudioQueue.empty()) {
521                 onChangeAudioFormat(meta, notify);
522                 break;
523             }
524 
525             QueueEntry entry;
526             entry.mNotifyConsumed = notify;
527             entry.mMeta = meta;
528 
529             Mutex::Autolock autoLock(mLock);
530             mAudioQueue.push_back(entry);
531             postDrainAudioQueue_l();
532 
533             break;
534         }
535 
536         case kWhatDrainAudioQueue:
537         {
538             mDrainAudioQueuePending = false;
539 
540             int32_t generation;
541             CHECK(msg->findInt32("drainGeneration", &generation));
542             if (generation != getDrainGeneration(true /* audio */)) {
543                 break;
544             }
545 
546             if (onDrainAudioQueue()) {
547                 uint32_t numFramesPlayed;
548                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
549                          (status_t)OK);
550 
551                 uint32_t numFramesPendingPlayout =
552                     mNumFramesWritten - numFramesPlayed;
553 
554                 // This is how long the audio sink will have data to
555                 // play back.
556                 int64_t delayUs =
557                     mAudioSink->msecsPerFrame()
558                         * numFramesPendingPlayout * 1000ll;
559                 if (mPlaybackRate > 1.0f) {
560                     delayUs /= mPlaybackRate;
561                 }
562 
563                 // Let's give it more data after about half that time
564                 // has elapsed.
565                 delayUs /= 2;
566                 // check the buffer size to estimate maximum delay permitted.
567                 const int64_t maxDrainDelayUs = std::max(
568                         mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
569                 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
570                         (long long)delayUs, (long long)maxDrainDelayUs);
571                 Mutex::Autolock autoLock(mLock);
572                 postDrainAudioQueue_l(delayUs);
573             }
574             break;
575         }
576 
577         case kWhatDrainVideoQueue:
578         {
579             int32_t generation;
580             CHECK(msg->findInt32("drainGeneration", &generation));
581             if (generation != getDrainGeneration(false /* audio */)) {
582                 break;
583             }
584 
585             mDrainVideoQueuePending = false;
586 
587             onDrainVideoQueue();
588 
589             postDrainVideoQueue();
590             break;
591         }
592 
593         case kWhatPostDrainVideoQueue:
594         {
595             int32_t generation;
596             CHECK(msg->findInt32("drainGeneration", &generation));
597             if (generation != getDrainGeneration(false /* audio */)) {
598                 break;
599             }
600 
601             mDrainVideoQueuePending = false;
602             postDrainVideoQueue();
603             break;
604         }
605 
606         case kWhatQueueBuffer:
607         {
608             onQueueBuffer(msg);
609             break;
610         }
611 
612         case kWhatQueueEOS:
613         {
614             onQueueEOS(msg);
615             break;
616         }
617 
618         case kWhatEOS:
619         {
620             int32_t generation;
621             CHECK(msg->findInt32("audioEOSGeneration", &generation));
622             if (generation != mAudioEOSGeneration) {
623                 break;
624             }
625             status_t finalResult;
626             CHECK(msg->findInt32("finalResult", &finalResult));
627             notifyEOS(true /* audio */, finalResult);
628             break;
629         }
630 
631         case kWhatConfigPlayback:
632         {
633             sp<AReplyToken> replyID;
634             CHECK(msg->senderAwaitsResponse(&replyID));
635             AudioPlaybackRate rate;
636             readFromAMessage(msg, &rate);
637             status_t err = onConfigPlayback(rate);
638             sp<AMessage> response = new AMessage;
639             response->setInt32("err", err);
640             response->postReply(replyID);
641             break;
642         }
643 
644         case kWhatGetPlaybackSettings:
645         {
646             sp<AReplyToken> replyID;
647             CHECK(msg->senderAwaitsResponse(&replyID));
648             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
649             status_t err = onGetPlaybackSettings(&rate);
650             sp<AMessage> response = new AMessage;
651             if (err == OK) {
652                 writeToAMessage(response, rate);
653             }
654             response->setInt32("err", err);
655             response->postReply(replyID);
656             break;
657         }
658 
659         case kWhatConfigSync:
660         {
661             sp<AReplyToken> replyID;
662             CHECK(msg->senderAwaitsResponse(&replyID));
663             AVSyncSettings sync;
664             float videoFpsHint;
665             readFromAMessage(msg, &sync, &videoFpsHint);
666             status_t err = onConfigSync(sync, videoFpsHint);
667             sp<AMessage> response = new AMessage;
668             response->setInt32("err", err);
669             response->postReply(replyID);
670             break;
671         }
672 
673         case kWhatGetSyncSettings:
674         {
675             sp<AReplyToken> replyID;
676             CHECK(msg->senderAwaitsResponse(&replyID));
677 
678             ALOGV("kWhatGetSyncSettings");
679             AVSyncSettings sync;
680             float videoFps = -1.f;
681             status_t err = onGetSyncSettings(&sync, &videoFps);
682             sp<AMessage> response = new AMessage;
683             if (err == OK) {
684                 writeToAMessage(response, sync, videoFps);
685             }
686             response->setInt32("err", err);
687             response->postReply(replyID);
688             break;
689         }
690 
691         case kWhatFlush:
692         {
693             onFlush(msg);
694             break;
695         }
696 
697         case kWhatDisableOffloadAudio:
698         {
699             onDisableOffloadAudio();
700             break;
701         }
702 
703         case kWhatEnableOffloadAudio:
704         {
705             onEnableOffloadAudio();
706             break;
707         }
708 
709         case kWhatPause:
710         {
711             onPause();
712             break;
713         }
714 
715         case kWhatResume:
716         {
717             onResume();
718             break;
719         }
720 
721         case kWhatSetVideoFrameRate:
722         {
723             float fps;
724             CHECK(msg->findFloat("frame-rate", &fps));
725             onSetVideoFrameRate(fps);
726             break;
727         }
728 
729         case kWhatAudioTearDown:
730         {
731             int32_t reason;
732             CHECK(msg->findInt32("reason", &reason));
733 
734             onAudioTearDown((AudioTearDownReason)reason);
735             break;
736         }
737 
738         case kWhatAudioOffloadPauseTimeout:
739         {
740             int32_t generation;
741             CHECK(msg->findInt32("drainGeneration", &generation));
742             if (generation != mAudioOffloadPauseTimeoutGeneration) {
743                 break;
744             }
745             ALOGV("Audio Offload tear down due to pause timeout.");
746             onAudioTearDown(kDueToTimeout);
747             mWakeLock->release();
748             break;
749         }
750 
751         default:
752             TRESPASS();
753             break;
754     }
755 }
756 
postDrainAudioQueue_l(int64_t delayUs)757 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
758     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
759         return;
760     }
761 
762     if (mAudioQueue.empty()) {
763         return;
764     }
765 
766     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
767     if (mPaused) {
768         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
769         if (diffUs > delayUs) {
770             delayUs = diffUs;
771         }
772     }
773 
774     mDrainAudioQueuePending = true;
775     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
776     msg->setInt32("drainGeneration", mAudioDrainGeneration);
777     msg->post(delayUs);
778 }
779 
prepareForMediaRenderingStart_l()780 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
781     mAudioRenderingStartGeneration = mAudioDrainGeneration;
782     mVideoRenderingStartGeneration = mVideoDrainGeneration;
783     mRenderingDataDelivered = false;
784 }
785 
notifyIfMediaRenderingStarted_l()786 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
787     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
788         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
789         mRenderingDataDelivered = true;
790         if (mPaused) {
791             return;
792         }
793         mVideoRenderingStartGeneration = -1;
794         mAudioRenderingStartGeneration = -1;
795 
796         sp<AMessage> notify = mNotify->dup();
797         notify->setInt32("what", kWhatMediaRenderingStart);
798         notify->post();
799     }
800 }
801 
802 // static
AudioSinkCallback(MediaPlayerBase::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayerBase::AudioSink::cb_event_t event)803 size_t NuPlayer::Renderer::AudioSinkCallback(
804         MediaPlayerBase::AudioSink * /* audioSink */,
805         void *buffer,
806         size_t size,
807         void *cookie,
808         MediaPlayerBase::AudioSink::cb_event_t event) {
809     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
810 
811     switch (event) {
812         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
813         {
814             return me->fillAudioBuffer(buffer, size);
815             break;
816         }
817 
818         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
819         {
820             ALOGV("AudioSink::CB_EVENT_STREAM_END");
821             me->notifyEOSCallback();
822             break;
823         }
824 
825         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
826         {
827             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
828             me->notifyAudioTearDown(kDueToError);
829             break;
830         }
831     }
832 
833     return 0;
834 }
835 
notifyEOSCallback()836 void NuPlayer::Renderer::notifyEOSCallback() {
837     Mutex::Autolock autoLock(mLock);
838 
839     if (!mUseAudioCallback) {
840         return;
841     }
842 
843     notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
844 }
845 
fillAudioBuffer(void * buffer,size_t size)846 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
847     Mutex::Autolock autoLock(mLock);
848 
849     if (!mUseAudioCallback) {
850         return 0;
851     }
852 
853     bool hasEOS = false;
854 
855     size_t sizeCopied = 0;
856     bool firstEntry = true;
857     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
858     while (sizeCopied < size && !mAudioQueue.empty()) {
859         entry = &*mAudioQueue.begin();
860 
861         if (entry->mBuffer == NULL) { // EOS
862             hasEOS = true;
863             mAudioQueue.erase(mAudioQueue.begin());
864             break;
865         }
866 
867         if (firstEntry && entry->mOffset == 0) {
868             firstEntry = false;
869             int64_t mediaTimeUs;
870             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
871             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
872             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
873         }
874 
875         size_t copy = entry->mBuffer->size() - entry->mOffset;
876         size_t sizeRemaining = size - sizeCopied;
877         if (copy > sizeRemaining) {
878             copy = sizeRemaining;
879         }
880 
881         memcpy((char *)buffer + sizeCopied,
882                entry->mBuffer->data() + entry->mOffset,
883                copy);
884 
885         entry->mOffset += copy;
886         if (entry->mOffset == entry->mBuffer->size()) {
887             entry->mNotifyConsumed->post();
888             mAudioQueue.erase(mAudioQueue.begin());
889             entry = NULL;
890         }
891         sizeCopied += copy;
892 
893         notifyIfMediaRenderingStarted_l();
894     }
895 
896     if (mAudioFirstAnchorTimeMediaUs >= 0) {
897         int64_t nowUs = ALooper::GetNowUs();
898         int64_t nowMediaUs =
899             mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
900         // we don't know how much data we are queueing for offloaded tracks.
901         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
902     }
903 
904     // for non-offloaded audio, we need to compute the frames written because
905     // there is no EVENT_STREAM_END notification. The frames written gives
906     // an estimate on the pending played out duration.
907     if (!offloadingAudio()) {
908         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
909     }
910 
911     if (hasEOS) {
912         (new AMessage(kWhatStopAudioSink, this))->post();
913         // As there is currently no EVENT_STREAM_END callback notification for
914         // non-offloaded audio tracks, we need to post the EOS ourselves.
915         if (!offloadingAudio()) {
916             int64_t postEOSDelayUs = 0;
917             if (mAudioSink->needsTrailingPadding()) {
918                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
919             }
920             ALOGV("fillAudioBuffer: notifyEOS_l "
921                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
922                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
923             notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
924         }
925     }
926     return sizeCopied;
927 }
928 
drainAudioQueueUntilLastEOS()929 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
930     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
931     bool foundEOS = false;
932     while (it != mAudioQueue.end()) {
933         int32_t eos;
934         QueueEntry *entry = &*it++;
935         if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
936                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
937             itEOS = it;
938             foundEOS = true;
939         }
940     }
941 
942     if (foundEOS) {
943         // post all replies before EOS and drop the samples
944         for (it = mAudioQueue.begin(); it != itEOS; it++) {
945             if (it->mBuffer == nullptr) {
946                 if (it->mNotifyConsumed == nullptr) {
947                     // delay doesn't matter as we don't even have an AudioTrack
948                     notifyEOS(true /* audio */, it->mFinalResult);
949                 } else {
950                     // TAG for re-opening audio sink.
951                     onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
952                 }
953             } else {
954                 it->mNotifyConsumed->post();
955             }
956         }
957         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
958     }
959 }
960 
onDrainAudioQueue()961 bool NuPlayer::Renderer::onDrainAudioQueue() {
962     // do not drain audio during teardown as queued buffers may be invalid.
963     if (mAudioTornDown) {
964         return false;
965     }
966     // TODO: This call to getPosition checks if AudioTrack has been created
967     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
968     // CHECKs on getPosition will fail.
969     // We still need to figure out why AudioTrack is not created when
970     // this function is called. One possible reason could be leftover
971     // audio. Another possible place is to check whether decoder
972     // has received INFO_FORMAT_CHANGED as the first buffer since
973     // AudioSink is opened there, and possible interactions with flush
974     // immediately after start. Investigate error message
975     // "vorbis_dsp_synthesis returned -135", along with RTSP.
976     uint32_t numFramesPlayed;
977     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
978         // When getPosition fails, renderer will not reschedule the draining
979         // unless new samples are queued.
980         // If we have pending EOS (or "eos" marker for discontinuities), we need
981         // to post these now as NuPlayerDecoder might be waiting for it.
982         drainAudioQueueUntilLastEOS();
983 
984         ALOGW("onDrainAudioQueue(): audio sink is not ready");
985         return false;
986     }
987 
988 #if 0
989     ssize_t numFramesAvailableToWrite =
990         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
991 
992     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
993         ALOGI("audio sink underrun");
994     } else {
995         ALOGV("audio queue has %d frames left to play",
996              mAudioSink->frameCount() - numFramesAvailableToWrite);
997     }
998 #endif
999 
1000     uint32_t prevFramesWritten = mNumFramesWritten;
1001     while (!mAudioQueue.empty()) {
1002         QueueEntry *entry = &*mAudioQueue.begin();
1003 
1004         if (entry->mBuffer == NULL) {
1005             if (entry->mNotifyConsumed != nullptr) {
1006                 // TAG for re-open audio sink.
1007                 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1008                 mAudioQueue.erase(mAudioQueue.begin());
1009                 continue;
1010             }
1011 
1012             // EOS
1013             if (mPaused) {
1014                 // Do not notify EOS when paused.
1015                 // This is needed to avoid switch to next clip while in pause.
1016                 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1017                 return false;
1018             }
1019 
1020             int64_t postEOSDelayUs = 0;
1021             if (mAudioSink->needsTrailingPadding()) {
1022                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1023             }
1024             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1025             mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1026 
1027             mAudioQueue.erase(mAudioQueue.begin());
1028             entry = NULL;
1029             if (mAudioSink->needsTrailingPadding()) {
1030                 // If we're not in gapless playback (i.e. through setNextPlayer), we
1031                 // need to stop the track here, because that will play out the last
1032                 // little bit at the end of the file. Otherwise short files won't play.
1033                 mAudioSink->stop();
1034                 mNumFramesWritten = 0;
1035             }
1036             return false;
1037         }
1038 
1039         mLastAudioBufferDrained = entry->mBufferOrdinal;
1040 
1041         // ignore 0-sized buffer which could be EOS marker with no data
1042         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1043             int64_t mediaTimeUs;
1044             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1045             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1046                     mediaTimeUs / 1E6);
1047             onNewAudioMediaTime(mediaTimeUs);
1048         }
1049 
1050         size_t copy = entry->mBuffer->size() - entry->mOffset;
1051 
1052         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1053                                             copy, false /* blocking */);
1054         if (written < 0) {
1055             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1056             if (written == WOULD_BLOCK) {
1057                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1058             } else {
1059                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1060                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1061                 // true, in which case the NuPlayer will handle the reconnect.
1062                 notifyAudioTearDown(kDueToError);
1063             }
1064             break;
1065         }
1066 
1067         entry->mOffset += written;
1068         size_t remainder = entry->mBuffer->size() - entry->mOffset;
1069         if ((ssize_t)remainder < mAudioSink->frameSize()) {
1070             if (remainder > 0) {
1071                 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1072                         remainder);
1073                 entry->mOffset += remainder;
1074                 copy -= remainder;
1075             }
1076 
1077             entry->mNotifyConsumed->post();
1078             mAudioQueue.erase(mAudioQueue.begin());
1079 
1080             entry = NULL;
1081         }
1082 
1083         size_t copiedFrames = written / mAudioSink->frameSize();
1084         mNumFramesWritten += copiedFrames;
1085 
1086         {
1087             Mutex::Autolock autoLock(mLock);
1088             int64_t maxTimeMedia;
1089             maxTimeMedia =
1090                 mAnchorTimeMediaUs +
1091                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1092                                 * 1000LL * mAudioSink->msecsPerFrame());
1093             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1094 
1095             notifyIfMediaRenderingStarted_l();
1096         }
1097 
1098         if (written != (ssize_t)copy) {
1099             // A short count was received from AudioSink::write()
1100             //
1101             // AudioSink write is called in non-blocking mode.
1102             // It may return with a short count when:
1103             //
1104             // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1105             //    discarded.
1106             // 2) The data to be copied exceeds the available buffer in AudioSink.
1107             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1108             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1109 
1110             // (Case 1)
1111             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1112             // needs to fail, as we should not carry over fractional frames between calls.
1113             CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1114 
1115             // (Case 2, 3, 4)
1116             // Return early to the caller.
1117             // Beware of calling immediately again as this may busy-loop if you are not careful.
1118             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1119             break;
1120         }
1121     }
1122 
1123     // calculate whether we need to reschedule another write.
1124     bool reschedule = !mAudioQueue.empty()
1125             && (!mPaused
1126                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1127     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1128     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1129     return reschedule;
1130 }
1131 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)1132 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1133     int32_t sampleRate = offloadingAudio() ?
1134             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1135     if (sampleRate == 0) {
1136         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1137         return 0;
1138     }
1139     // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1140     return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1141 }
1142 
1143 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)1144 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1145     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1146     if (mUseVirtualAudioSink) {
1147         int64_t nowUs = ALooper::GetNowUs();
1148         int64_t mediaUs;
1149         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1150             return 0ll;
1151         } else {
1152             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1153         }
1154     }
1155 
1156     const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1157     int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1158     if (pendingUs < 0) {
1159         // This shouldn't happen unless the timestamp is stale.
1160         ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1161                 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1162                 __func__, (long long)pendingUs,
1163                 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1164         pendingUs = 0;
1165     }
1166     return pendingUs;
1167 }
1168 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)1169 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1170     int64_t realUs;
1171     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1172         // If failed to get current position, e.g. due to audio clock is
1173         // not ready, then just play out video immediately without delay.
1174         return nowUs;
1175     }
1176     return realUs;
1177 }
1178 
onNewAudioMediaTime(int64_t mediaTimeUs)1179 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1180     Mutex::Autolock autoLock(mLock);
1181     // TRICKY: vorbis decoder generates multiple frames with the same
1182     // timestamp, so only update on the first frame with a given timestamp
1183     if (mediaTimeUs == mAnchorTimeMediaUs) {
1184         return;
1185     }
1186     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1187 
1188     // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1189     if (mNextAudioClockUpdateTimeUs == -1) {
1190         AudioTimestamp ts;
1191         if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1192             mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1193         }
1194     }
1195     int64_t nowUs = ALooper::GetNowUs();
1196     if (mNextAudioClockUpdateTimeUs >= 0) {
1197         if (nowUs >= mNextAudioClockUpdateTimeUs) {
1198             int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1199             mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1200             mUseVirtualAudioSink = false;
1201             mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1202         }
1203     } else {
1204         int64_t unused;
1205         if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1206                 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1207                         > kMaxAllowedAudioSinkDelayUs)) {
1208             // Enough data has been sent to AudioSink, but AudioSink has not rendered
1209             // any data yet. Something is wrong with AudioSink, e.g., the device is not
1210             // connected to audio out.
1211             // Switch to system clock. This essentially creates a virtual AudioSink with
1212             // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1213             // This virtual AudioSink renders audio data starting from the very first sample
1214             // and it's paced by system clock.
1215             ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1216             mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1217             mUseVirtualAudioSink = true;
1218         }
1219     }
1220     mAnchorNumFramesWritten = mNumFramesWritten;
1221     mAnchorTimeMediaUs = mediaTimeUs;
1222 }
1223 
1224 // Called without mLock acquired.
postDrainVideoQueue()1225 void NuPlayer::Renderer::postDrainVideoQueue() {
1226     if (mDrainVideoQueuePending
1227             || getSyncQueues()
1228             || (mPaused && mVideoSampleReceived)) {
1229         return;
1230     }
1231 
1232     if (mVideoQueue.empty()) {
1233         return;
1234     }
1235 
1236     QueueEntry &entry = *mVideoQueue.begin();
1237 
1238     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1239     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1240 
1241     if (entry.mBuffer == NULL) {
1242         // EOS doesn't carry a timestamp.
1243         msg->post();
1244         mDrainVideoQueuePending = true;
1245         return;
1246     }
1247 
1248     bool needRepostDrainVideoQueue = false;
1249     int64_t delayUs;
1250     int64_t nowUs = ALooper::GetNowUs();
1251     int64_t realTimeUs;
1252     if (mFlags & FLAG_REAL_TIME) {
1253         int64_t mediaTimeUs;
1254         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1255         realTimeUs = mediaTimeUs;
1256     } else {
1257         int64_t mediaTimeUs;
1258         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1259 
1260         {
1261             Mutex::Autolock autoLock(mLock);
1262             if (mAnchorTimeMediaUs < 0) {
1263                 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1264                 mAnchorTimeMediaUs = mediaTimeUs;
1265                 realTimeUs = nowUs;
1266             } else if (!mVideoSampleReceived) {
1267                 // Always render the first video frame.
1268                 realTimeUs = nowUs;
1269             } else if (mAudioFirstAnchorTimeMediaUs < 0
1270                 || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) {
1271                 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1272             } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) {
1273                 needRepostDrainVideoQueue = true;
1274                 realTimeUs = nowUs;
1275             } else {
1276                 realTimeUs = nowUs;
1277             }
1278         }
1279         if (!mHasAudio) {
1280             // smooth out videos >= 10fps
1281             mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1282         }
1283 
1284         // Heuristics to handle situation when media time changed without a
1285         // discontinuity. If we have not drained an audio buffer that was
1286         // received after this buffer, repost in 10 msec. Otherwise repost
1287         // in 500 msec.
1288         delayUs = realTimeUs - nowUs;
1289         int64_t postDelayUs = -1;
1290         if (delayUs > 500000) {
1291             postDelayUs = 500000;
1292             if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1293                 postDelayUs = 10000;
1294             }
1295         } else if (needRepostDrainVideoQueue) {
1296             // CHECK(mPlaybackRate > 0);
1297             // CHECK(mAudioFirstAnchorTimeMediaUs >= 0);
1298             // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0);
1299             postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs;
1300             postDelayUs /= mPlaybackRate;
1301         }
1302 
1303         if (postDelayUs >= 0) {
1304             msg->setWhat(kWhatPostDrainVideoQueue);
1305             msg->post(postDelayUs);
1306             mVideoScheduler->restart();
1307             ALOGI("possible video time jump of %dms (%lld : %lld) or uninitialized media clock,"
1308                     " retrying in %dms",
1309                     (int)(delayUs / 1000), (long long)mediaTimeUs,
1310                     (long long)mAudioFirstAnchorTimeMediaUs, (int)(postDelayUs / 1000));
1311             mDrainVideoQueuePending = true;
1312             return;
1313         }
1314     }
1315 
1316     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1317     int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1318 
1319     delayUs = realTimeUs - nowUs;
1320 
1321     ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1322     // post 2 display refreshes before rendering is due
1323     msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1324 
1325     mDrainVideoQueuePending = true;
1326 }
1327 
onDrainVideoQueue()1328 void NuPlayer::Renderer::onDrainVideoQueue() {
1329     if (mVideoQueue.empty()) {
1330         return;
1331     }
1332 
1333     QueueEntry *entry = &*mVideoQueue.begin();
1334 
1335     if (entry->mBuffer == NULL) {
1336         // EOS
1337 
1338         notifyEOS(false /* audio */, entry->mFinalResult);
1339 
1340         mVideoQueue.erase(mVideoQueue.begin());
1341         entry = NULL;
1342 
1343         setVideoLateByUs(0);
1344         return;
1345     }
1346 
1347     int64_t nowUs = ALooper::GetNowUs();
1348     int64_t realTimeUs;
1349     int64_t mediaTimeUs = -1;
1350     if (mFlags & FLAG_REAL_TIME) {
1351         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1352     } else {
1353         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1354 
1355         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1356     }
1357 
1358     bool tooLate = false;
1359 
1360     if (!mPaused) {
1361         setVideoLateByUs(nowUs - realTimeUs);
1362         tooLate = (mVideoLateByUs > 40000);
1363 
1364         if (tooLate) {
1365             ALOGV("video late by %lld us (%.2f secs)",
1366                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1367         } else {
1368             int64_t mediaUs = 0;
1369             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1370             ALOGV("rendering video at media time %.2f secs",
1371                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1372                     mediaUs) / 1E6);
1373 
1374             if (!(mFlags & FLAG_REAL_TIME)
1375                     && mLastAudioMediaTimeUs != -1
1376                     && mediaTimeUs > mLastAudioMediaTimeUs) {
1377                 // If audio ends before video, video continues to drive media clock.
1378                 // Also smooth out videos >= 10fps.
1379                 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1380             }
1381         }
1382     } else {
1383         setVideoLateByUs(0);
1384         if (!mVideoSampleReceived && !mHasAudio) {
1385             // This will ensure that the first frame after a flush won't be used as anchor
1386             // when renderer is in paused state, because resume can happen any time after seek.
1387             clearAnchorTime();
1388         }
1389     }
1390 
1391     // Always render the first video frame while keeping stats on A/V sync.
1392     if (!mVideoSampleReceived) {
1393         realTimeUs = nowUs;
1394         tooLate = false;
1395     }
1396 
1397     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1398     entry->mNotifyConsumed->setInt32("render", !tooLate);
1399     entry->mNotifyConsumed->post();
1400     mVideoQueue.erase(mVideoQueue.begin());
1401     entry = NULL;
1402 
1403     mVideoSampleReceived = true;
1404 
1405     if (!mPaused) {
1406         if (!mVideoRenderingStarted) {
1407             mVideoRenderingStarted = true;
1408             notifyVideoRenderingStart();
1409         }
1410         Mutex::Autolock autoLock(mLock);
1411         notifyIfMediaRenderingStarted_l();
1412     }
1413 }
1414 
notifyVideoRenderingStart()1415 void NuPlayer::Renderer::notifyVideoRenderingStart() {
1416     sp<AMessage> notify = mNotify->dup();
1417     notify->setInt32("what", kWhatVideoRenderingStart);
1418     notify->post();
1419 }
1420 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1421 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1422     Mutex::Autolock autoLock(mLock);
1423     notifyEOS_l(audio, finalResult, delayUs);
1424 }
1425 
notifyEOS_l(bool audio,status_t finalResult,int64_t delayUs)1426 void NuPlayer::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1427     if (audio && delayUs > 0) {
1428         sp<AMessage> msg = new AMessage(kWhatEOS, this);
1429         msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1430         msg->setInt32("finalResult", finalResult);
1431         msg->post(delayUs);
1432         return;
1433     }
1434     sp<AMessage> notify = mNotify->dup();
1435     notify->setInt32("what", kWhatEOS);
1436     notify->setInt32("audio", static_cast<int32_t>(audio));
1437     notify->setInt32("finalResult", finalResult);
1438     notify->post(delayUs);
1439 
1440     if (audio) {
1441         // Video might outlive audio. Clear anchor to enable video only case.
1442         mAnchorTimeMediaUs = -1;
1443     }
1444 }
1445 
notifyAudioTearDown(AudioTearDownReason reason)1446 void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1447     sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1448     msg->setInt32("reason", reason);
1449     msg->post();
1450 }
1451 
onQueueBuffer(const sp<AMessage> & msg)1452 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1453     int32_t audio;
1454     CHECK(msg->findInt32("audio", &audio));
1455 
1456     if (dropBufferIfStale(audio, msg)) {
1457         return;
1458     }
1459 
1460     if (audio) {
1461         mHasAudio = true;
1462     } else {
1463         mHasVideo = true;
1464     }
1465 
1466     if (mHasVideo) {
1467         if (mVideoScheduler == NULL) {
1468             mVideoScheduler = new VideoFrameScheduler();
1469             mVideoScheduler->init();
1470         }
1471     }
1472 
1473     sp<RefBase> obj;
1474     CHECK(msg->findObject("buffer", &obj));
1475     sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1476 
1477     sp<AMessage> notifyConsumed;
1478     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1479 
1480     QueueEntry entry;
1481     entry.mBuffer = buffer;
1482     entry.mNotifyConsumed = notifyConsumed;
1483     entry.mOffset = 0;
1484     entry.mFinalResult = OK;
1485     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1486 
1487     if (audio) {
1488         Mutex::Autolock autoLock(mLock);
1489         mAudioQueue.push_back(entry);
1490         postDrainAudioQueue_l();
1491     } else {
1492         mVideoQueue.push_back(entry);
1493         postDrainVideoQueue();
1494     }
1495 
1496     Mutex::Autolock autoLock(mLock);
1497     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1498         return;
1499     }
1500 
1501     sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1502     sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1503 
1504     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1505         // EOS signalled on either queue.
1506         syncQueuesDone_l();
1507         return;
1508     }
1509 
1510     int64_t firstAudioTimeUs;
1511     int64_t firstVideoTimeUs;
1512     CHECK(firstAudioBuffer->meta()
1513             ->findInt64("timeUs", &firstAudioTimeUs));
1514     CHECK(firstVideoBuffer->meta()
1515             ->findInt64("timeUs", &firstVideoTimeUs));
1516 
1517     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1518 
1519     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1520 
1521     if (diff > 100000ll) {
1522         // Audio data starts More than 0.1 secs before video.
1523         // Drop some audio.
1524 
1525         (*mAudioQueue.begin()).mNotifyConsumed->post();
1526         mAudioQueue.erase(mAudioQueue.begin());
1527         return;
1528     }
1529 
1530     syncQueuesDone_l();
1531 }
1532 
syncQueuesDone_l()1533 void NuPlayer::Renderer::syncQueuesDone_l() {
1534     if (!mSyncQueues) {
1535         return;
1536     }
1537 
1538     mSyncQueues = false;
1539 
1540     if (!mAudioQueue.empty()) {
1541         postDrainAudioQueue_l();
1542     }
1543 
1544     if (!mVideoQueue.empty()) {
1545         mLock.unlock();
1546         postDrainVideoQueue();
1547         mLock.lock();
1548     }
1549 }
1550 
onQueueEOS(const sp<AMessage> & msg)1551 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1552     int32_t audio;
1553     CHECK(msg->findInt32("audio", &audio));
1554 
1555     if (dropBufferIfStale(audio, msg)) {
1556         return;
1557     }
1558 
1559     int32_t finalResult;
1560     CHECK(msg->findInt32("finalResult", &finalResult));
1561 
1562     QueueEntry entry;
1563     entry.mOffset = 0;
1564     entry.mFinalResult = finalResult;
1565 
1566     if (audio) {
1567         Mutex::Autolock autoLock(mLock);
1568         if (mAudioQueue.empty() && mSyncQueues) {
1569             syncQueuesDone_l();
1570         }
1571         mAudioQueue.push_back(entry);
1572         postDrainAudioQueue_l();
1573     } else {
1574         if (mVideoQueue.empty() && getSyncQueues()) {
1575             Mutex::Autolock autoLock(mLock);
1576             syncQueuesDone_l();
1577         }
1578         mVideoQueue.push_back(entry);
1579         postDrainVideoQueue();
1580     }
1581 }
1582 
onFlush(const sp<AMessage> & msg)1583 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1584     int32_t audio, notifyComplete;
1585     CHECK(msg->findInt32("audio", &audio));
1586 
1587     {
1588         Mutex::Autolock autoLock(mLock);
1589         if (audio) {
1590             notifyComplete = mNotifyCompleteAudio;
1591             mNotifyCompleteAudio = false;
1592             mLastAudioMediaTimeUs = -1;
1593         } else {
1594             notifyComplete = mNotifyCompleteVideo;
1595             mNotifyCompleteVideo = false;
1596         }
1597 
1598         // If we're currently syncing the queues, i.e. dropping audio while
1599         // aligning the first audio/video buffer times and only one of the
1600         // two queues has data, we may starve that queue by not requesting
1601         // more buffers from the decoder. If the other source then encounters
1602         // a discontinuity that leads to flushing, we'll never find the
1603         // corresponding discontinuity on the other queue.
1604         // Therefore we'll stop syncing the queues if at least one of them
1605         // is flushed.
1606         syncQueuesDone_l();
1607     }
1608     clearAnchorTime();
1609 
1610     ALOGV("flushing %s", audio ? "audio" : "video");
1611     if (audio) {
1612         {
1613             Mutex::Autolock autoLock(mLock);
1614             flushQueue(&mAudioQueue);
1615 
1616             ++mAudioDrainGeneration;
1617             ++mAudioEOSGeneration;
1618             prepareForMediaRenderingStart_l();
1619 
1620             // the frame count will be reset after flush.
1621             clearAudioFirstAnchorTime_l();
1622         }
1623 
1624         mDrainAudioQueuePending = false;
1625 
1626         if (offloadingAudio()) {
1627             mAudioSink->pause();
1628             mAudioSink->flush();
1629             if (!mPaused) {
1630                 mAudioSink->start();
1631             }
1632         } else {
1633             mAudioSink->pause();
1634             mAudioSink->flush();
1635             // Call stop() to signal to the AudioSink to completely fill the
1636             // internal buffer before resuming playback.
1637             // FIXME: this is ignored after flush().
1638             mAudioSink->stop();
1639             if (mPaused) {
1640                 // Race condition: if renderer is paused and audio sink is stopped,
1641                 // we need to make sure that the audio track buffer fully drains
1642                 // before delivering data.
1643                 // FIXME: remove this if we can detect if stop() is complete.
1644                 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1645                 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1646             } else {
1647                 mAudioSink->start();
1648             }
1649             mNumFramesWritten = 0;
1650         }
1651         mNextAudioClockUpdateTimeUs = -1;
1652     } else {
1653         flushQueue(&mVideoQueue);
1654 
1655         mDrainVideoQueuePending = false;
1656 
1657         if (mVideoScheduler != NULL) {
1658             mVideoScheduler->restart();
1659         }
1660 
1661         Mutex::Autolock autoLock(mLock);
1662         ++mVideoDrainGeneration;
1663         prepareForMediaRenderingStart_l();
1664     }
1665 
1666     mVideoSampleReceived = false;
1667 
1668     if (notifyComplete) {
1669         notifyFlushComplete(audio);
1670     }
1671 }
1672 
flushQueue(List<QueueEntry> * queue)1673 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1674     while (!queue->empty()) {
1675         QueueEntry *entry = &*queue->begin();
1676 
1677         if (entry->mBuffer != NULL) {
1678             entry->mNotifyConsumed->post();
1679         } else if (entry->mNotifyConsumed != nullptr) {
1680             // Is it needed to open audio sink now?
1681             onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1682         }
1683 
1684         queue->erase(queue->begin());
1685         entry = NULL;
1686     }
1687 }
1688 
notifyFlushComplete(bool audio)1689 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1690     sp<AMessage> notify = mNotify->dup();
1691     notify->setInt32("what", kWhatFlushComplete);
1692     notify->setInt32("audio", static_cast<int32_t>(audio));
1693     notify->post();
1694 }
1695 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1696 bool NuPlayer::Renderer::dropBufferIfStale(
1697         bool audio, const sp<AMessage> &msg) {
1698     int32_t queueGeneration;
1699     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1700 
1701     if (queueGeneration == getQueueGeneration(audio)) {
1702         return false;
1703     }
1704 
1705     sp<AMessage> notifyConsumed;
1706     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1707         notifyConsumed->post();
1708     }
1709 
1710     return true;
1711 }
1712 
onAudioSinkChanged()1713 void NuPlayer::Renderer::onAudioSinkChanged() {
1714     if (offloadingAudio()) {
1715         return;
1716     }
1717     CHECK(!mDrainAudioQueuePending);
1718     mNumFramesWritten = 0;
1719     mAnchorNumFramesWritten = -1;
1720     uint32_t written;
1721     if (mAudioSink->getFramesWritten(&written) == OK) {
1722         mNumFramesWritten = written;
1723     }
1724 }
1725 
onDisableOffloadAudio()1726 void NuPlayer::Renderer::onDisableOffloadAudio() {
1727     Mutex::Autolock autoLock(mLock);
1728     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1729     ++mAudioDrainGeneration;
1730     if (mAudioRenderingStartGeneration != -1) {
1731         prepareForMediaRenderingStart_l();
1732     }
1733 }
1734 
onEnableOffloadAudio()1735 void NuPlayer::Renderer::onEnableOffloadAudio() {
1736     Mutex::Autolock autoLock(mLock);
1737     mFlags |= FLAG_OFFLOAD_AUDIO;
1738     ++mAudioDrainGeneration;
1739     if (mAudioRenderingStartGeneration != -1) {
1740         prepareForMediaRenderingStart_l();
1741     }
1742 }
1743 
onPause()1744 void NuPlayer::Renderer::onPause() {
1745     if (mPaused) {
1746         return;
1747     }
1748 
1749     {
1750         Mutex::Autolock autoLock(mLock);
1751         // we do not increment audio drain generation so that we fill audio buffer during pause.
1752         ++mVideoDrainGeneration;
1753         prepareForMediaRenderingStart_l();
1754         mPaused = true;
1755         mMediaClock->setPlaybackRate(0.0);
1756     }
1757 
1758     mDrainAudioQueuePending = false;
1759     mDrainVideoQueuePending = false;
1760 
1761     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1762     mAudioSink->pause();
1763     startAudioOffloadPauseTimeout();
1764 
1765     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1766           mAudioQueue.size(), mVideoQueue.size());
1767 }
1768 
onResume()1769 void NuPlayer::Renderer::onResume() {
1770     if (!mPaused) {
1771         return;
1772     }
1773 
1774     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1775     cancelAudioOffloadPauseTimeout();
1776     if (mAudioSink->ready()) {
1777         status_t err = mAudioSink->start();
1778         if (err != OK) {
1779             ALOGE("cannot start AudioSink err %d", err);
1780             notifyAudioTearDown(kDueToError);
1781         }
1782     }
1783 
1784     {
1785         Mutex::Autolock autoLock(mLock);
1786         mPaused = false;
1787         // rendering started message may have been delayed if we were paused.
1788         if (mRenderingDataDelivered) {
1789             notifyIfMediaRenderingStarted_l();
1790         }
1791         // configure audiosink as we did not do it when pausing
1792         if (mAudioSink != NULL && mAudioSink->ready()) {
1793             mAudioSink->setPlaybackRate(mPlaybackSettings);
1794         }
1795 
1796         mMediaClock->setPlaybackRate(mPlaybackRate);
1797 
1798         if (!mAudioQueue.empty()) {
1799             postDrainAudioQueue_l();
1800         }
1801     }
1802 
1803     if (!mVideoQueue.empty()) {
1804         postDrainVideoQueue();
1805     }
1806 }
1807 
onSetVideoFrameRate(float fps)1808 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1809     if (mVideoScheduler == NULL) {
1810         mVideoScheduler = new VideoFrameScheduler();
1811     }
1812     mVideoScheduler->init(fps);
1813 }
1814 
getQueueGeneration(bool audio)1815 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1816     Mutex::Autolock autoLock(mLock);
1817     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1818 }
1819 
getDrainGeneration(bool audio)1820 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1821     Mutex::Autolock autoLock(mLock);
1822     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1823 }
1824 
getSyncQueues()1825 bool NuPlayer::Renderer::getSyncQueues() {
1826     Mutex::Autolock autoLock(mLock);
1827     return mSyncQueues;
1828 }
1829 
onAudioTearDown(AudioTearDownReason reason)1830 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1831     if (mAudioTornDown) {
1832         return;
1833     }
1834     mAudioTornDown = true;
1835 
1836     int64_t currentPositionUs;
1837     sp<AMessage> notify = mNotify->dup();
1838     if (getCurrentPosition(&currentPositionUs) == OK) {
1839         notify->setInt64("positionUs", currentPositionUs);
1840     }
1841 
1842     mAudioSink->stop();
1843     mAudioSink->flush();
1844 
1845     notify->setInt32("what", kWhatAudioTearDown);
1846     notify->setInt32("reason", reason);
1847     notify->post();
1848 }
1849 
startAudioOffloadPauseTimeout()1850 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1851     if (offloadingAudio()) {
1852         mWakeLock->acquire();
1853         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1854         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1855         msg->post(kOffloadPauseMaxUs);
1856     }
1857 }
1858 
cancelAudioOffloadPauseTimeout()1859 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1860     // We may have called startAudioOffloadPauseTimeout() without
1861     // the AudioSink open and with offloadingAudio enabled.
1862     //
1863     // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1864     // we always release the wakelock and increment the pause timeout generation.
1865     //
1866     // Note: The acquired wakelock prevents the device from suspending
1867     // immediately after offload pause (in case a resume happens shortly thereafter).
1868     mWakeLock->release(true);
1869     ++mAudioOffloadPauseTimeoutGeneration;
1870 }
1871 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming)1872 status_t NuPlayer::Renderer::onOpenAudioSink(
1873         const sp<AMessage> &format,
1874         bool offloadOnly,
1875         bool hasVideo,
1876         uint32_t flags,
1877         bool isStreaming) {
1878     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1879             offloadOnly, offloadingAudio());
1880     bool audioSinkChanged = false;
1881 
1882     int32_t numChannels;
1883     CHECK(format->findInt32("channel-count", &numChannels));
1884 
1885     int32_t channelMask;
1886     if (!format->findInt32("channel-mask", &channelMask)) {
1887         // signal to the AudioSink to derive the mask from count.
1888         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1889     }
1890 
1891     int32_t sampleRate;
1892     CHECK(format->findInt32("sample-rate", &sampleRate));
1893 
1894     if (offloadingAudio()) {
1895         audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1896         AString mime;
1897         CHECK(format->findString("mime", &mime));
1898         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1899 
1900         if (err != OK) {
1901             ALOGE("Couldn't map mime \"%s\" to a valid "
1902                     "audio_format", mime.c_str());
1903             onDisableOffloadAudio();
1904         } else {
1905             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1906                     mime.c_str(), audioFormat);
1907 
1908             int avgBitRate = -1;
1909             format->findInt32("bitrate", &avgBitRate);
1910 
1911             int32_t aacProfile = -1;
1912             if (audioFormat == AUDIO_FORMAT_AAC
1913                     && format->findInt32("aac-profile", &aacProfile)) {
1914                 // Redefine AAC format as per aac profile
1915                 mapAACProfileToAudioFormat(
1916                         audioFormat,
1917                         aacProfile);
1918             }
1919 
1920             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1921             offloadInfo.duration_us = -1;
1922             format->findInt64(
1923                     "durationUs", &offloadInfo.duration_us);
1924             offloadInfo.sample_rate = sampleRate;
1925             offloadInfo.channel_mask = channelMask;
1926             offloadInfo.format = audioFormat;
1927             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1928             offloadInfo.bit_rate = avgBitRate;
1929             offloadInfo.has_video = hasVideo;
1930             offloadInfo.is_streaming = isStreaming;
1931 
1932             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1933                 ALOGV("openAudioSink: no change in offload mode");
1934                 // no change from previous configuration, everything ok.
1935                 return OK;
1936             }
1937             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1938 
1939             ALOGV("openAudioSink: try to open AudioSink in offload mode");
1940             uint32_t offloadFlags = flags;
1941             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1942             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1943             audioSinkChanged = true;
1944             mAudioSink->close();
1945 
1946             err = mAudioSink->open(
1947                     sampleRate,
1948                     numChannels,
1949                     (audio_channel_mask_t)channelMask,
1950                     audioFormat,
1951                     0 /* bufferCount - unused */,
1952                     &NuPlayer::Renderer::AudioSinkCallback,
1953                     this,
1954                     (audio_output_flags_t)offloadFlags,
1955                     &offloadInfo);
1956 
1957             if (err == OK) {
1958                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1959             }
1960 
1961             if (err == OK) {
1962                 // If the playback is offloaded to h/w, we pass
1963                 // the HAL some metadata information.
1964                 // We don't want to do this for PCM because it
1965                 // will be going through the AudioFlinger mixer
1966                 // before reaching the hardware.
1967                 // TODO
1968                 mCurrentOffloadInfo = offloadInfo;
1969                 if (!mPaused) { // for preview mode, don't start if paused
1970                     err = mAudioSink->start();
1971                 }
1972                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1973             }
1974             if (err != OK) {
1975                 // Clean up, fall back to non offload mode.
1976                 mAudioSink->close();
1977                 onDisableOffloadAudio();
1978                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1979                 ALOGV("openAudioSink: offload failed");
1980                 if (offloadOnly) {
1981                     notifyAudioTearDown(kForceNonOffload);
1982                 }
1983             } else {
1984                 mUseAudioCallback = true;  // offload mode transfers data through callback
1985                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1986             }
1987         }
1988     }
1989     if (!offloadOnly && !offloadingAudio()) {
1990         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1991         uint32_t pcmFlags = flags;
1992         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1993 
1994         const PcmInfo info = {
1995                 (audio_channel_mask_t)channelMask,
1996                 (audio_output_flags_t)pcmFlags,
1997                 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1998                 numChannels,
1999                 sampleRate
2000         };
2001         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
2002             ALOGV("openAudioSink: no change in pcm mode");
2003             // no change from previous configuration, everything ok.
2004             return OK;
2005         }
2006 
2007         audioSinkChanged = true;
2008         mAudioSink->close();
2009         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2010         // Note: It is possible to set up the callback, but not use it to send audio data.
2011         // This requires a fix in AudioSink to explicitly specify the transfer mode.
2012         mUseAudioCallback = getUseAudioCallbackSetting();
2013         if (mUseAudioCallback) {
2014             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
2015         }
2016 
2017         // Compute the desired buffer size.
2018         // For callback mode, the amount of time before wakeup is about half the buffer size.
2019         const uint32_t frameCount =
2020                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2021 
2022         // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
2023         // AudioSink. We don't want this when there's video because it will cause a video seek to
2024         // the previous I frame. But we do want this when there's only audio because it will give
2025         // NuPlayer a chance to switch from non-offload mode to offload mode.
2026         // So we only set doNotReconnect when there's no video.
2027         const bool doNotReconnect = !hasVideo;
2028 
2029         // We should always be able to set our playback settings if the sink is closed.
2030         LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2031                 "onOpenAudioSink: can't set playback rate on closed sink");
2032         status_t err = mAudioSink->open(
2033                     sampleRate,
2034                     numChannels,
2035                     (audio_channel_mask_t)channelMask,
2036                     AUDIO_FORMAT_PCM_16_BIT,
2037                     0 /* bufferCount - unused */,
2038                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
2039                     mUseAudioCallback ? this : NULL,
2040                     (audio_output_flags_t)pcmFlags,
2041                     NULL,
2042                     doNotReconnect,
2043                     frameCount);
2044         if (err != OK) {
2045             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2046             mAudioSink->close();
2047             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2048             return err;
2049         }
2050         mCurrentPcmInfo = info;
2051         if (!mPaused) { // for preview mode, don't start if paused
2052             mAudioSink->start();
2053         }
2054     }
2055     if (audioSinkChanged) {
2056         onAudioSinkChanged();
2057     }
2058     mAudioTornDown = false;
2059     return OK;
2060 }
2061 
onCloseAudioSink()2062 void NuPlayer::Renderer::onCloseAudioSink() {
2063     mAudioSink->close();
2064     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2065     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2066 }
2067 
onChangeAudioFormat(const sp<AMessage> & meta,const sp<AMessage> & notify)2068 void NuPlayer::Renderer::onChangeAudioFormat(
2069         const sp<AMessage> &meta, const sp<AMessage> &notify) {
2070     sp<AMessage> format;
2071     CHECK(meta->findMessage("format", &format));
2072 
2073     int32_t offloadOnly;
2074     CHECK(meta->findInt32("offload-only", &offloadOnly));
2075 
2076     int32_t hasVideo;
2077     CHECK(meta->findInt32("has-video", &hasVideo));
2078 
2079     uint32_t flags;
2080     CHECK(meta->findInt32("flags", (int32_t *)&flags));
2081 
2082     uint32_t isStreaming;
2083     CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2084 
2085     status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2086 
2087     if (err != OK) {
2088         notify->setInt32("err", err);
2089     }
2090     notify->post();
2091 }
2092 
2093 }  // namespace android
2094 
2095