• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayerRenderer"
19 #include <utils/Log.h>
20 
21 #include "NuPlayerRenderer.h"
22 #include <cutils/properties.h>
23 #include <media/stagefright/foundation/ABuffer.h>
24 #include <media/stagefright/foundation/ADebug.h>
25 #include <media/stagefright/foundation/AMessage.h>
26 #include <media/stagefright/foundation/AUtils.h>
27 #include <media/stagefright/foundation/AWakeLock.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaErrors.h>
30 #include <media/stagefright/MetaData.h>
31 #include <media/stagefright/Utils.h>
32 #include <media/stagefright/VideoFrameScheduler.h>
33 
34 #include <inttypes.h>
35 
36 namespace android {
37 
38 /*
39  * Example of common configuration settings in shell script form
40 
41    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
42    adb shell setprop audio.offload.disable 1
43 
44    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
45    adb shell setprop audio.offload.video 1
46 
47    #Use audio callbacks for PCM data
48    adb shell setprop media.stagefright.audio.cbk 1
49 
50    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
51    adb shell setprop media.stagefright.audio.deep 1
52 
53    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
54    adb shell setprop media.stagefright.audio.sink 1000
55 
56  * These configurations take effect for the next track played (not the current track).
57  */
58 
getUseAudioCallbackSetting()59 static inline bool getUseAudioCallbackSetting() {
60     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
61 }
62 
getAudioSinkPcmMsSetting()63 static inline int32_t getAudioSinkPcmMsSetting() {
64     return property_get_int32(
65             "media.stagefright.audio.sink", 500 /* default_value */);
66 }
67 
68 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
69 // is closed to allow the audio DSP to power down.
70 static const int64_t kOffloadPauseMaxUs = 10000000ll;
71 
72 // static
73 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
74         AUDIO_CHANNEL_NONE,
75         AUDIO_OUTPUT_FLAG_NONE,
76         AUDIO_FORMAT_INVALID,
77         0, // mNumChannels
78         0 // mSampleRate
79 };
80 
81 // static
82 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
83 
Renderer(const sp<MediaPlayerBase::AudioSink> & sink,const sp<AMessage> & notify,uint32_t flags)84 NuPlayer::Renderer::Renderer(
85         const sp<MediaPlayerBase::AudioSink> &sink,
86         const sp<AMessage> &notify,
87         uint32_t flags)
88     : mAudioSink(sink),
89       mNotify(notify),
90       mFlags(flags),
91       mNumFramesWritten(0),
92       mDrainAudioQueuePending(false),
93       mDrainVideoQueuePending(false),
94       mAudioQueueGeneration(0),
95       mVideoQueueGeneration(0),
96       mAudioDrainGeneration(0),
97       mVideoDrainGeneration(0),
98       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
99       mAudioFirstAnchorTimeMediaUs(-1),
100       mAnchorTimeMediaUs(-1),
101       mAnchorNumFramesWritten(-1),
102       mVideoLateByUs(0ll),
103       mHasAudio(false),
104       mHasVideo(false),
105       mNotifyCompleteAudio(false),
106       mNotifyCompleteVideo(false),
107       mSyncQueues(false),
108       mPaused(false),
109       mPauseDrainAudioAllowedUs(0),
110       mVideoSampleReceived(false),
111       mVideoRenderingStarted(false),
112       mVideoRenderingStartGeneration(0),
113       mAudioRenderingStartGeneration(0),
114       mRenderingDataDelivered(false),
115       mAudioOffloadPauseTimeoutGeneration(0),
116       mAudioTornDown(false),
117       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
118       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
119       mTotalBuffersQueued(0),
120       mLastAudioBufferDrained(0),
121       mUseAudioCallback(false),
122       mWakeLock(new AWakeLock()) {
123     mMediaClock = new MediaClock;
124     mPlaybackRate = mPlaybackSettings.mSpeed;
125     mMediaClock->setPlaybackRate(mPlaybackRate);
126 }
127 
~Renderer()128 NuPlayer::Renderer::~Renderer() {
129     if (offloadingAudio()) {
130         mAudioSink->stop();
131         mAudioSink->flush();
132         mAudioSink->close();
133     }
134 }
135 
queueBuffer(bool audio,const sp<ABuffer> & buffer,const sp<AMessage> & notifyConsumed)136 void NuPlayer::Renderer::queueBuffer(
137         bool audio,
138         const sp<ABuffer> &buffer,
139         const sp<AMessage> &notifyConsumed) {
140     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
141     msg->setInt32("queueGeneration", getQueueGeneration(audio));
142     msg->setInt32("audio", static_cast<int32_t>(audio));
143     msg->setBuffer("buffer", buffer);
144     msg->setMessage("notifyConsumed", notifyConsumed);
145     msg->post();
146 }
147 
queueEOS(bool audio,status_t finalResult)148 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
149     CHECK_NE(finalResult, (status_t)OK);
150 
151     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
152     msg->setInt32("queueGeneration", getQueueGeneration(audio));
153     msg->setInt32("audio", static_cast<int32_t>(audio));
154     msg->setInt32("finalResult", finalResult);
155     msg->post();
156 }
157 
setPlaybackSettings(const AudioPlaybackRate & rate)158 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
159     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
160     writeToAMessage(msg, rate);
161     sp<AMessage> response;
162     status_t err = msg->postAndAwaitResponse(&response);
163     if (err == OK && response != NULL) {
164         CHECK(response->findInt32("err", &err));
165     }
166     return err;
167 }
168 
onConfigPlayback(const AudioPlaybackRate & rate)169 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
170     if (rate.mSpeed == 0.f) {
171         onPause();
172         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
173         // have to correspond to the any non-0 speed (e.g old speed). Keep
174         // settings nonetheless, using the old speed, in case audiosink changes.
175         AudioPlaybackRate newRate = rate;
176         newRate.mSpeed = mPlaybackSettings.mSpeed;
177         mPlaybackSettings = newRate;
178         return OK;
179     }
180 
181     if (mAudioSink != NULL && mAudioSink->ready()) {
182         status_t err = mAudioSink->setPlaybackRate(rate);
183         if (err != OK) {
184             return err;
185         }
186     }
187     mPlaybackSettings = rate;
188     mPlaybackRate = rate.mSpeed;
189     mMediaClock->setPlaybackRate(mPlaybackRate);
190     return OK;
191 }
192 
getPlaybackSettings(AudioPlaybackRate * rate)193 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
194     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
195     sp<AMessage> response;
196     status_t err = msg->postAndAwaitResponse(&response);
197     if (err == OK && response != NULL) {
198         CHECK(response->findInt32("err", &err));
199         if (err == OK) {
200             readFromAMessage(response, rate);
201         }
202     }
203     return err;
204 }
205 
onGetPlaybackSettings(AudioPlaybackRate * rate)206 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
207     if (mAudioSink != NULL && mAudioSink->ready()) {
208         status_t err = mAudioSink->getPlaybackRate(rate);
209         if (err == OK) {
210             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
211                 ALOGW("correcting mismatch in internal/external playback rate");
212             }
213             // get playback settings used by audiosink, as it may be
214             // slightly off due to audiosink not taking small changes.
215             mPlaybackSettings = *rate;
216             if (mPaused) {
217                 rate->mSpeed = 0.f;
218             }
219         }
220         return err;
221     }
222     *rate = mPlaybackSettings;
223     return OK;
224 }
225 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)226 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
227     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
228     writeToAMessage(msg, sync, videoFpsHint);
229     sp<AMessage> response;
230     status_t err = msg->postAndAwaitResponse(&response);
231     if (err == OK && response != NULL) {
232         CHECK(response->findInt32("err", &err));
233     }
234     return err;
235 }
236 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)237 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
238     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
239         return BAD_VALUE;
240     }
241     // TODO: support sync sources
242     return INVALID_OPERATION;
243 }
244 
getSyncSettings(AVSyncSettings * sync,float * videoFps)245 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
246     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
247     sp<AMessage> response;
248     status_t err = msg->postAndAwaitResponse(&response);
249     if (err == OK && response != NULL) {
250         CHECK(response->findInt32("err", &err));
251         if (err == OK) {
252             readFromAMessage(response, sync, videoFps);
253         }
254     }
255     return err;
256 }
257 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)258 status_t NuPlayer::Renderer::onGetSyncSettings(
259         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
260     *sync = mSyncSettings;
261     *videoFps = -1.f;
262     return OK;
263 }
264 
flush(bool audio,bool notifyComplete)265 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
266     {
267         Mutex::Autolock autoLock(mLock);
268         if (audio) {
269             mNotifyCompleteAudio |= notifyComplete;
270             clearAudioFirstAnchorTime_l();
271             ++mAudioQueueGeneration;
272             ++mAudioDrainGeneration;
273         } else {
274             mNotifyCompleteVideo |= notifyComplete;
275             ++mVideoQueueGeneration;
276             ++mVideoDrainGeneration;
277         }
278 
279         clearAnchorTime_l();
280         mVideoLateByUs = 0;
281         mSyncQueues = false;
282     }
283 
284     sp<AMessage> msg = new AMessage(kWhatFlush, this);
285     msg->setInt32("audio", static_cast<int32_t>(audio));
286     msg->post();
287 }
288 
signalTimeDiscontinuity()289 void NuPlayer::Renderer::signalTimeDiscontinuity() {
290 }
291 
signalDisableOffloadAudio()292 void NuPlayer::Renderer::signalDisableOffloadAudio() {
293     (new AMessage(kWhatDisableOffloadAudio, this))->post();
294 }
295 
signalEnableOffloadAudio()296 void NuPlayer::Renderer::signalEnableOffloadAudio() {
297     (new AMessage(kWhatEnableOffloadAudio, this))->post();
298 }
299 
pause()300 void NuPlayer::Renderer::pause() {
301     (new AMessage(kWhatPause, this))->post();
302 }
303 
resume()304 void NuPlayer::Renderer::resume() {
305     (new AMessage(kWhatResume, this))->post();
306 }
307 
setVideoFrameRate(float fps)308 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
309     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
310     msg->setFloat("frame-rate", fps);
311     msg->post();
312 }
313 
314 // Called on any threads.
getCurrentPosition(int64_t * mediaUs)315 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
316     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
317 }
318 
clearAudioFirstAnchorTime_l()319 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
320     mAudioFirstAnchorTimeMediaUs = -1;
321     mMediaClock->setStartingTimeMedia(-1);
322 }
323 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)324 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
325     if (mAudioFirstAnchorTimeMediaUs == -1) {
326         mAudioFirstAnchorTimeMediaUs = mediaUs;
327         mMediaClock->setStartingTimeMedia(mediaUs);
328     }
329 }
330 
clearAnchorTime_l()331 void NuPlayer::Renderer::clearAnchorTime_l() {
332     mMediaClock->clearAnchor();
333     mAnchorTimeMediaUs = -1;
334     mAnchorNumFramesWritten = -1;
335 }
336 
setVideoLateByUs(int64_t lateUs)337 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
338     Mutex::Autolock autoLock(mLock);
339     mVideoLateByUs = lateUs;
340 }
341 
getVideoLateByUs()342 int64_t NuPlayer::Renderer::getVideoLateByUs() {
343     Mutex::Autolock autoLock(mLock);
344     return mVideoLateByUs;
345 }
346 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded)347 status_t NuPlayer::Renderer::openAudioSink(
348         const sp<AMessage> &format,
349         bool offloadOnly,
350         bool hasVideo,
351         uint32_t flags,
352         bool *isOffloaded) {
353     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
354     msg->setMessage("format", format);
355     msg->setInt32("offload-only", offloadOnly);
356     msg->setInt32("has-video", hasVideo);
357     msg->setInt32("flags", flags);
358 
359     sp<AMessage> response;
360     msg->postAndAwaitResponse(&response);
361 
362     int32_t err;
363     if (!response->findInt32("err", &err)) {
364         err = INVALID_OPERATION;
365     } else if (err == OK && isOffloaded != NULL) {
366         int32_t offload;
367         CHECK(response->findInt32("offload", &offload));
368         *isOffloaded = (offload != 0);
369     }
370     return err;
371 }
372 
closeAudioSink()373 void NuPlayer::Renderer::closeAudioSink() {
374     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
375 
376     sp<AMessage> response;
377     msg->postAndAwaitResponse(&response);
378 }
379 
onMessageReceived(const sp<AMessage> & msg)380 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
381     switch (msg->what()) {
382         case kWhatOpenAudioSink:
383         {
384             sp<AMessage> format;
385             CHECK(msg->findMessage("format", &format));
386 
387             int32_t offloadOnly;
388             CHECK(msg->findInt32("offload-only", &offloadOnly));
389 
390             int32_t hasVideo;
391             CHECK(msg->findInt32("has-video", &hasVideo));
392 
393             uint32_t flags;
394             CHECK(msg->findInt32("flags", (int32_t *)&flags));
395 
396             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
397 
398             sp<AMessage> response = new AMessage;
399             response->setInt32("err", err);
400             response->setInt32("offload", offloadingAudio());
401 
402             sp<AReplyToken> replyID;
403             CHECK(msg->senderAwaitsResponse(&replyID));
404             response->postReply(replyID);
405 
406             break;
407         }
408 
409         case kWhatCloseAudioSink:
410         {
411             sp<AReplyToken> replyID;
412             CHECK(msg->senderAwaitsResponse(&replyID));
413 
414             onCloseAudioSink();
415 
416             sp<AMessage> response = new AMessage;
417             response->postReply(replyID);
418             break;
419         }
420 
421         case kWhatStopAudioSink:
422         {
423             mAudioSink->stop();
424             break;
425         }
426 
427         case kWhatDrainAudioQueue:
428         {
429             mDrainAudioQueuePending = false;
430 
431             int32_t generation;
432             CHECK(msg->findInt32("drainGeneration", &generation));
433             if (generation != getDrainGeneration(true /* audio */)) {
434                 break;
435             }
436 
437             if (onDrainAudioQueue()) {
438                 uint32_t numFramesPlayed;
439                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
440                          (status_t)OK);
441 
442                 uint32_t numFramesPendingPlayout =
443                     mNumFramesWritten - numFramesPlayed;
444 
445                 // This is how long the audio sink will have data to
446                 // play back.
447                 int64_t delayUs =
448                     mAudioSink->msecsPerFrame()
449                         * numFramesPendingPlayout * 1000ll;
450                 if (mPlaybackRate > 1.0f) {
451                     delayUs /= mPlaybackRate;
452                 }
453 
454                 // Let's give it more data after about half that time
455                 // has elapsed.
456                 Mutex::Autolock autoLock(mLock);
457                 postDrainAudioQueue_l(delayUs / 2);
458             }
459             break;
460         }
461 
462         case kWhatDrainVideoQueue:
463         {
464             int32_t generation;
465             CHECK(msg->findInt32("drainGeneration", &generation));
466             if (generation != getDrainGeneration(false /* audio */)) {
467                 break;
468             }
469 
470             mDrainVideoQueuePending = false;
471 
472             onDrainVideoQueue();
473 
474             postDrainVideoQueue();
475             break;
476         }
477 
478         case kWhatPostDrainVideoQueue:
479         {
480             int32_t generation;
481             CHECK(msg->findInt32("drainGeneration", &generation));
482             if (generation != getDrainGeneration(false /* audio */)) {
483                 break;
484             }
485 
486             mDrainVideoQueuePending = false;
487             postDrainVideoQueue();
488             break;
489         }
490 
491         case kWhatQueueBuffer:
492         {
493             onQueueBuffer(msg);
494             break;
495         }
496 
497         case kWhatQueueEOS:
498         {
499             onQueueEOS(msg);
500             break;
501         }
502 
503         case kWhatConfigPlayback:
504         {
505             sp<AReplyToken> replyID;
506             CHECK(msg->senderAwaitsResponse(&replyID));
507             AudioPlaybackRate rate;
508             readFromAMessage(msg, &rate);
509             status_t err = onConfigPlayback(rate);
510             sp<AMessage> response = new AMessage;
511             response->setInt32("err", err);
512             response->postReply(replyID);
513             break;
514         }
515 
516         case kWhatGetPlaybackSettings:
517         {
518             sp<AReplyToken> replyID;
519             CHECK(msg->senderAwaitsResponse(&replyID));
520             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
521             status_t err = onGetPlaybackSettings(&rate);
522             sp<AMessage> response = new AMessage;
523             if (err == OK) {
524                 writeToAMessage(response, rate);
525             }
526             response->setInt32("err", err);
527             response->postReply(replyID);
528             break;
529         }
530 
531         case kWhatConfigSync:
532         {
533             sp<AReplyToken> replyID;
534             CHECK(msg->senderAwaitsResponse(&replyID));
535             AVSyncSettings sync;
536             float videoFpsHint;
537             readFromAMessage(msg, &sync, &videoFpsHint);
538             status_t err = onConfigSync(sync, videoFpsHint);
539             sp<AMessage> response = new AMessage;
540             response->setInt32("err", err);
541             response->postReply(replyID);
542             break;
543         }
544 
545         case kWhatGetSyncSettings:
546         {
547             sp<AReplyToken> replyID;
548             CHECK(msg->senderAwaitsResponse(&replyID));
549 
550             ALOGV("kWhatGetSyncSettings");
551             AVSyncSettings sync;
552             float videoFps = -1.f;
553             status_t err = onGetSyncSettings(&sync, &videoFps);
554             sp<AMessage> response = new AMessage;
555             if (err == OK) {
556                 writeToAMessage(response, sync, videoFps);
557             }
558             response->setInt32("err", err);
559             response->postReply(replyID);
560             break;
561         }
562 
563         case kWhatFlush:
564         {
565             onFlush(msg);
566             break;
567         }
568 
569         case kWhatDisableOffloadAudio:
570         {
571             onDisableOffloadAudio();
572             break;
573         }
574 
575         case kWhatEnableOffloadAudio:
576         {
577             onEnableOffloadAudio();
578             break;
579         }
580 
581         case kWhatPause:
582         {
583             onPause();
584             break;
585         }
586 
587         case kWhatResume:
588         {
589             onResume();
590             break;
591         }
592 
593         case kWhatSetVideoFrameRate:
594         {
595             float fps;
596             CHECK(msg->findFloat("frame-rate", &fps));
597             onSetVideoFrameRate(fps);
598             break;
599         }
600 
601         case kWhatAudioTearDown:
602         {
603             onAudioTearDown(kDueToError);
604             break;
605         }
606 
607         case kWhatAudioOffloadPauseTimeout:
608         {
609             int32_t generation;
610             CHECK(msg->findInt32("drainGeneration", &generation));
611             if (generation != mAudioOffloadPauseTimeoutGeneration) {
612                 break;
613             }
614             ALOGV("Audio Offload tear down due to pause timeout.");
615             onAudioTearDown(kDueToTimeout);
616             mWakeLock->release();
617             break;
618         }
619 
620         default:
621             TRESPASS();
622             break;
623     }
624 }
625 
postDrainAudioQueue_l(int64_t delayUs)626 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
627     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
628         return;
629     }
630 
631     if (mAudioQueue.empty()) {
632         return;
633     }
634 
635     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
636     if (mPaused) {
637         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
638         if (diffUs > delayUs) {
639             delayUs = diffUs;
640         }
641     }
642 
643     mDrainAudioQueuePending = true;
644     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
645     msg->setInt32("drainGeneration", mAudioDrainGeneration);
646     msg->post(delayUs);
647 }
648 
prepareForMediaRenderingStart_l()649 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
650     mAudioRenderingStartGeneration = mAudioDrainGeneration;
651     mVideoRenderingStartGeneration = mVideoDrainGeneration;
652     mRenderingDataDelivered = false;
653 }
654 
notifyIfMediaRenderingStarted_l()655 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
656     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
657         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
658         mRenderingDataDelivered = true;
659         if (mPaused) {
660             return;
661         }
662         mVideoRenderingStartGeneration = -1;
663         mAudioRenderingStartGeneration = -1;
664 
665         sp<AMessage> notify = mNotify->dup();
666         notify->setInt32("what", kWhatMediaRenderingStart);
667         notify->post();
668     }
669 }
670 
671 // static
AudioSinkCallback(MediaPlayerBase::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayerBase::AudioSink::cb_event_t event)672 size_t NuPlayer::Renderer::AudioSinkCallback(
673         MediaPlayerBase::AudioSink * /* audioSink */,
674         void *buffer,
675         size_t size,
676         void *cookie,
677         MediaPlayerBase::AudioSink::cb_event_t event) {
678     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
679 
680     switch (event) {
681         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
682         {
683             return me->fillAudioBuffer(buffer, size);
684             break;
685         }
686 
687         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
688         {
689             ALOGV("AudioSink::CB_EVENT_STREAM_END");
690             me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
691             break;
692         }
693 
694         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
695         {
696             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
697             me->notifyAudioTearDown();
698             break;
699         }
700     }
701 
702     return 0;
703 }
704 
fillAudioBuffer(void * buffer,size_t size)705 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
706     Mutex::Autolock autoLock(mLock);
707 
708     if (!mUseAudioCallback) {
709         return 0;
710     }
711 
712     bool hasEOS = false;
713 
714     size_t sizeCopied = 0;
715     bool firstEntry = true;
716     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
717     while (sizeCopied < size && !mAudioQueue.empty()) {
718         entry = &*mAudioQueue.begin();
719 
720         if (entry->mBuffer == NULL) { // EOS
721             hasEOS = true;
722             mAudioQueue.erase(mAudioQueue.begin());
723             break;
724         }
725 
726         if (firstEntry && entry->mOffset == 0) {
727             firstEntry = false;
728             int64_t mediaTimeUs;
729             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
730             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
731             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
732         }
733 
734         size_t copy = entry->mBuffer->size() - entry->mOffset;
735         size_t sizeRemaining = size - sizeCopied;
736         if (copy > sizeRemaining) {
737             copy = sizeRemaining;
738         }
739 
740         memcpy((char *)buffer + sizeCopied,
741                entry->mBuffer->data() + entry->mOffset,
742                copy);
743 
744         entry->mOffset += copy;
745         if (entry->mOffset == entry->mBuffer->size()) {
746             entry->mNotifyConsumed->post();
747             mAudioQueue.erase(mAudioQueue.begin());
748             entry = NULL;
749         }
750         sizeCopied += copy;
751 
752         notifyIfMediaRenderingStarted_l();
753     }
754 
755     if (mAudioFirstAnchorTimeMediaUs >= 0) {
756         int64_t nowUs = ALooper::GetNowUs();
757         int64_t nowMediaUs =
758             mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
759         // we don't know how much data we are queueing for offloaded tracks.
760         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
761     }
762 
763     // for non-offloaded audio, we need to compute the frames written because
764     // there is no EVENT_STREAM_END notification. The frames written gives
765     // an estimate on the pending played out duration.
766     if (!offloadingAudio()) {
767         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
768     }
769 
770     if (hasEOS) {
771         (new AMessage(kWhatStopAudioSink, this))->post();
772         // As there is currently no EVENT_STREAM_END callback notification for
773         // non-offloaded audio tracks, we need to post the EOS ourselves.
774         if (!offloadingAudio()) {
775             int64_t postEOSDelayUs = 0;
776             if (mAudioSink->needsTrailingPadding()) {
777                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
778             }
779             ALOGV("fillAudioBuffer: notifyEOS "
780                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
781                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
782             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
783         }
784     }
785     return sizeCopied;
786 }
787 
drainAudioQueueUntilLastEOS()788 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
789     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
790     bool foundEOS = false;
791     while (it != mAudioQueue.end()) {
792         int32_t eos;
793         QueueEntry *entry = &*it++;
794         if (entry->mBuffer == NULL
795                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
796             itEOS = it;
797             foundEOS = true;
798         }
799     }
800 
801     if (foundEOS) {
802         // post all replies before EOS and drop the samples
803         for (it = mAudioQueue.begin(); it != itEOS; it++) {
804             if (it->mBuffer == NULL) {
805                 // delay doesn't matter as we don't even have an AudioTrack
806                 notifyEOS(true /* audio */, it->mFinalResult);
807             } else {
808                 it->mNotifyConsumed->post();
809             }
810         }
811         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
812     }
813 }
814 
onDrainAudioQueue()815 bool NuPlayer::Renderer::onDrainAudioQueue() {
816     // do not drain audio during teardown as queued buffers may be invalid.
817     if (mAudioTornDown) {
818         return false;
819     }
820     // TODO: This call to getPosition checks if AudioTrack has been created
821     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
822     // CHECKs on getPosition will fail.
823     // We still need to figure out why AudioTrack is not created when
824     // this function is called. One possible reason could be leftover
825     // audio. Another possible place is to check whether decoder
826     // has received INFO_FORMAT_CHANGED as the first buffer since
827     // AudioSink is opened there, and possible interactions with flush
828     // immediately after start. Investigate error message
829     // "vorbis_dsp_synthesis returned -135", along with RTSP.
830     uint32_t numFramesPlayed;
831     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
832         // When getPosition fails, renderer will not reschedule the draining
833         // unless new samples are queued.
834         // If we have pending EOS (or "eos" marker for discontinuities), we need
835         // to post these now as NuPlayerDecoder might be waiting for it.
836         drainAudioQueueUntilLastEOS();
837 
838         ALOGW("onDrainAudioQueue(): audio sink is not ready");
839         return false;
840     }
841 
842 #if 0
843     ssize_t numFramesAvailableToWrite =
844         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
845 
846     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
847         ALOGI("audio sink underrun");
848     } else {
849         ALOGV("audio queue has %d frames left to play",
850              mAudioSink->frameCount() - numFramesAvailableToWrite);
851     }
852 #endif
853 
854     uint32_t prevFramesWritten = mNumFramesWritten;
855     while (!mAudioQueue.empty()) {
856         QueueEntry *entry = &*mAudioQueue.begin();
857 
858         mLastAudioBufferDrained = entry->mBufferOrdinal;
859 
860         if (entry->mBuffer == NULL) {
861             // EOS
862             int64_t postEOSDelayUs = 0;
863             if (mAudioSink->needsTrailingPadding()) {
864                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
865             }
866             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
867 
868             mAudioQueue.erase(mAudioQueue.begin());
869             entry = NULL;
870             if (mAudioSink->needsTrailingPadding()) {
871                 // If we're not in gapless playback (i.e. through setNextPlayer), we
872                 // need to stop the track here, because that will play out the last
873                 // little bit at the end of the file. Otherwise short files won't play.
874                 mAudioSink->stop();
875                 mNumFramesWritten = 0;
876             }
877             return false;
878         }
879 
880         // ignore 0-sized buffer which could be EOS marker with no data
881         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
882             int64_t mediaTimeUs;
883             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
884             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
885                     mediaTimeUs / 1E6);
886             onNewAudioMediaTime(mediaTimeUs);
887         }
888 
889         size_t copy = entry->mBuffer->size() - entry->mOffset;
890 
891         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
892                                             copy, false /* blocking */);
893         if (written < 0) {
894             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
895             if (written == WOULD_BLOCK) {
896                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
897             } else {
898                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
899                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
900                 // true, in which case the NuPlayer will handle the reconnect.
901                 notifyAudioTearDown();
902             }
903             break;
904         }
905 
906         entry->mOffset += written;
907         if (entry->mOffset == entry->mBuffer->size()) {
908             entry->mNotifyConsumed->post();
909             mAudioQueue.erase(mAudioQueue.begin());
910 
911             entry = NULL;
912         }
913 
914         size_t copiedFrames = written / mAudioSink->frameSize();
915         mNumFramesWritten += copiedFrames;
916 
917         {
918             Mutex::Autolock autoLock(mLock);
919             int64_t maxTimeMedia;
920             maxTimeMedia =
921                 mAnchorTimeMediaUs +
922                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
923                                 * 1000LL * mAudioSink->msecsPerFrame());
924             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
925 
926             notifyIfMediaRenderingStarted_l();
927         }
928 
929         if (written != (ssize_t)copy) {
930             // A short count was received from AudioSink::write()
931             //
932             // AudioSink write is called in non-blocking mode.
933             // It may return with a short count when:
934             //
935             // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
936             // 2) The data to be copied exceeds the available buffer in AudioSink.
937             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
938             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
939 
940             // (Case 1)
941             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
942             // needs to fail, as we should not carry over fractional frames between calls.
943             CHECK_EQ(copy % mAudioSink->frameSize(), 0);
944 
945             // (Case 2, 3, 4)
946             // Return early to the caller.
947             // Beware of calling immediately again as this may busy-loop if you are not careful.
948             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
949             break;
950         }
951     }
952 
953     // calculate whether we need to reschedule another write.
954     bool reschedule = !mAudioQueue.empty()
955             && (!mPaused
956                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
957     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
958     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
959     return reschedule;
960 }
961 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)962 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
963     int32_t sampleRate = offloadingAudio() ?
964             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
965     if (sampleRate == 0) {
966         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
967         return 0;
968     }
969     // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
970     return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
971 }
972 
973 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)974 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
975     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
976     return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
977 }
978 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)979 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
980     int64_t realUs;
981     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
982         // If failed to get current position, e.g. due to audio clock is
983         // not ready, then just play out video immediately without delay.
984         return nowUs;
985     }
986     return realUs;
987 }
988 
onNewAudioMediaTime(int64_t mediaTimeUs)989 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
990     Mutex::Autolock autoLock(mLock);
991     // TRICKY: vorbis decoder generates multiple frames with the same
992     // timestamp, so only update on the first frame with a given timestamp
993     if (mediaTimeUs == mAnchorTimeMediaUs) {
994         return;
995     }
996     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
997     int64_t nowUs = ALooper::GetNowUs();
998     int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
999     mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1000     mAnchorNumFramesWritten = mNumFramesWritten;
1001     mAnchorTimeMediaUs = mediaTimeUs;
1002 }
1003 
1004 // Called without mLock acquired.
postDrainVideoQueue()1005 void NuPlayer::Renderer::postDrainVideoQueue() {
1006     if (mDrainVideoQueuePending
1007             || getSyncQueues()
1008             || (mPaused && mVideoSampleReceived)) {
1009         return;
1010     }
1011 
1012     if (mVideoQueue.empty()) {
1013         return;
1014     }
1015 
1016     QueueEntry &entry = *mVideoQueue.begin();
1017 
1018     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1019     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1020 
1021     if (entry.mBuffer == NULL) {
1022         // EOS doesn't carry a timestamp.
1023         msg->post();
1024         mDrainVideoQueuePending = true;
1025         return;
1026     }
1027 
1028     int64_t delayUs;
1029     int64_t nowUs = ALooper::GetNowUs();
1030     int64_t realTimeUs;
1031     if (mFlags & FLAG_REAL_TIME) {
1032         int64_t mediaTimeUs;
1033         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1034         realTimeUs = mediaTimeUs;
1035     } else {
1036         int64_t mediaTimeUs;
1037         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1038 
1039         {
1040             Mutex::Autolock autoLock(mLock);
1041             if (mAnchorTimeMediaUs < 0) {
1042                 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1043                 mAnchorTimeMediaUs = mediaTimeUs;
1044                 realTimeUs = nowUs;
1045             } else {
1046                 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1047             }
1048         }
1049         if (!mHasAudio) {
1050             // smooth out videos >= 10fps
1051             mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1052         }
1053 
1054         // Heuristics to handle situation when media time changed without a
1055         // discontinuity. If we have not drained an audio buffer that was
1056         // received after this buffer, repost in 10 msec. Otherwise repost
1057         // in 500 msec.
1058         delayUs = realTimeUs - nowUs;
1059         if (delayUs > 500000) {
1060             int64_t postDelayUs = 500000;
1061             if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1062                 postDelayUs = 10000;
1063             }
1064             msg->setWhat(kWhatPostDrainVideoQueue);
1065             msg->post(postDelayUs);
1066             mVideoScheduler->restart();
1067             ALOGI("possible video time jump of %dms, retrying in %dms",
1068                     (int)(delayUs / 1000), (int)(postDelayUs / 1000));
1069             mDrainVideoQueuePending = true;
1070             return;
1071         }
1072     }
1073 
1074     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1075     int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1076 
1077     delayUs = realTimeUs - nowUs;
1078 
1079     ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1080     // post 2 display refreshes before rendering is due
1081     msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1082 
1083     mDrainVideoQueuePending = true;
1084 }
1085 
onDrainVideoQueue()1086 void NuPlayer::Renderer::onDrainVideoQueue() {
1087     if (mVideoQueue.empty()) {
1088         return;
1089     }
1090 
1091     QueueEntry *entry = &*mVideoQueue.begin();
1092 
1093     if (entry->mBuffer == NULL) {
1094         // EOS
1095 
1096         notifyEOS(false /* audio */, entry->mFinalResult);
1097 
1098         mVideoQueue.erase(mVideoQueue.begin());
1099         entry = NULL;
1100 
1101         setVideoLateByUs(0);
1102         return;
1103     }
1104 
1105     int64_t nowUs = -1;
1106     int64_t realTimeUs;
1107     if (mFlags & FLAG_REAL_TIME) {
1108         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1109     } else {
1110         int64_t mediaTimeUs;
1111         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1112 
1113         nowUs = ALooper::GetNowUs();
1114         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1115     }
1116 
1117     bool tooLate = false;
1118 
1119     if (!mPaused) {
1120         if (nowUs == -1) {
1121             nowUs = ALooper::GetNowUs();
1122         }
1123         setVideoLateByUs(nowUs - realTimeUs);
1124         tooLate = (mVideoLateByUs > 40000);
1125 
1126         if (tooLate) {
1127             ALOGV("video late by %lld us (%.2f secs)",
1128                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1129         } else {
1130             int64_t mediaUs = 0;
1131             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1132             ALOGV("rendering video at media time %.2f secs",
1133                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1134                     mediaUs) / 1E6);
1135         }
1136     } else {
1137         setVideoLateByUs(0);
1138         if (!mVideoSampleReceived && !mHasAudio) {
1139             // This will ensure that the first frame after a flush won't be used as anchor
1140             // when renderer is in paused state, because resume can happen any time after seek.
1141             Mutex::Autolock autoLock(mLock);
1142             clearAnchorTime_l();
1143         }
1144     }
1145 
1146     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1147     entry->mNotifyConsumed->setInt32("render", !tooLate);
1148     entry->mNotifyConsumed->post();
1149     mVideoQueue.erase(mVideoQueue.begin());
1150     entry = NULL;
1151 
1152     mVideoSampleReceived = true;
1153 
1154     if (!mPaused) {
1155         if (!mVideoRenderingStarted) {
1156             mVideoRenderingStarted = true;
1157             notifyVideoRenderingStart();
1158         }
1159         Mutex::Autolock autoLock(mLock);
1160         notifyIfMediaRenderingStarted_l();
1161     }
1162 }
1163 
notifyVideoRenderingStart()1164 void NuPlayer::Renderer::notifyVideoRenderingStart() {
1165     sp<AMessage> notify = mNotify->dup();
1166     notify->setInt32("what", kWhatVideoRenderingStart);
1167     notify->post();
1168 }
1169 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1170 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1171     sp<AMessage> notify = mNotify->dup();
1172     notify->setInt32("what", kWhatEOS);
1173     notify->setInt32("audio", static_cast<int32_t>(audio));
1174     notify->setInt32("finalResult", finalResult);
1175     notify->post(delayUs);
1176 }
1177 
notifyAudioTearDown()1178 void NuPlayer::Renderer::notifyAudioTearDown() {
1179     (new AMessage(kWhatAudioTearDown, this))->post();
1180 }
1181 
onQueueBuffer(const sp<AMessage> & msg)1182 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1183     int32_t audio;
1184     CHECK(msg->findInt32("audio", &audio));
1185 
1186     if (dropBufferIfStale(audio, msg)) {
1187         return;
1188     }
1189 
1190     if (audio) {
1191         mHasAudio = true;
1192     } else {
1193         mHasVideo = true;
1194     }
1195 
1196     if (mHasVideo) {
1197         if (mVideoScheduler == NULL) {
1198             mVideoScheduler = new VideoFrameScheduler();
1199             mVideoScheduler->init();
1200         }
1201     }
1202 
1203     sp<ABuffer> buffer;
1204     CHECK(msg->findBuffer("buffer", &buffer));
1205 
1206     sp<AMessage> notifyConsumed;
1207     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1208 
1209     QueueEntry entry;
1210     entry.mBuffer = buffer;
1211     entry.mNotifyConsumed = notifyConsumed;
1212     entry.mOffset = 0;
1213     entry.mFinalResult = OK;
1214     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1215 
1216     if (audio) {
1217         Mutex::Autolock autoLock(mLock);
1218         mAudioQueue.push_back(entry);
1219         postDrainAudioQueue_l();
1220     } else {
1221         mVideoQueue.push_back(entry);
1222         postDrainVideoQueue();
1223     }
1224 
1225     Mutex::Autolock autoLock(mLock);
1226     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1227         return;
1228     }
1229 
1230     sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1231     sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1232 
1233     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1234         // EOS signalled on either queue.
1235         syncQueuesDone_l();
1236         return;
1237     }
1238 
1239     int64_t firstAudioTimeUs;
1240     int64_t firstVideoTimeUs;
1241     CHECK(firstAudioBuffer->meta()
1242             ->findInt64("timeUs", &firstAudioTimeUs));
1243     CHECK(firstVideoBuffer->meta()
1244             ->findInt64("timeUs", &firstVideoTimeUs));
1245 
1246     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1247 
1248     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1249 
1250     if (diff > 100000ll) {
1251         // Audio data starts More than 0.1 secs before video.
1252         // Drop some audio.
1253 
1254         (*mAudioQueue.begin()).mNotifyConsumed->post();
1255         mAudioQueue.erase(mAudioQueue.begin());
1256         return;
1257     }
1258 
1259     syncQueuesDone_l();
1260 }
1261 
syncQueuesDone_l()1262 void NuPlayer::Renderer::syncQueuesDone_l() {
1263     if (!mSyncQueues) {
1264         return;
1265     }
1266 
1267     mSyncQueues = false;
1268 
1269     if (!mAudioQueue.empty()) {
1270         postDrainAudioQueue_l();
1271     }
1272 
1273     if (!mVideoQueue.empty()) {
1274         mLock.unlock();
1275         postDrainVideoQueue();
1276         mLock.lock();
1277     }
1278 }
1279 
onQueueEOS(const sp<AMessage> & msg)1280 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1281     int32_t audio;
1282     CHECK(msg->findInt32("audio", &audio));
1283 
1284     if (dropBufferIfStale(audio, msg)) {
1285         return;
1286     }
1287 
1288     int32_t finalResult;
1289     CHECK(msg->findInt32("finalResult", &finalResult));
1290 
1291     QueueEntry entry;
1292     entry.mOffset = 0;
1293     entry.mFinalResult = finalResult;
1294 
1295     if (audio) {
1296         Mutex::Autolock autoLock(mLock);
1297         if (mAudioQueue.empty() && mSyncQueues) {
1298             syncQueuesDone_l();
1299         }
1300         mAudioQueue.push_back(entry);
1301         postDrainAudioQueue_l();
1302     } else {
1303         if (mVideoQueue.empty() && getSyncQueues()) {
1304             Mutex::Autolock autoLock(mLock);
1305             syncQueuesDone_l();
1306         }
1307         mVideoQueue.push_back(entry);
1308         postDrainVideoQueue();
1309     }
1310 }
1311 
onFlush(const sp<AMessage> & msg)1312 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1313     int32_t audio, notifyComplete;
1314     CHECK(msg->findInt32("audio", &audio));
1315 
1316     {
1317         Mutex::Autolock autoLock(mLock);
1318         if (audio) {
1319             notifyComplete = mNotifyCompleteAudio;
1320             mNotifyCompleteAudio = false;
1321         } else {
1322             notifyComplete = mNotifyCompleteVideo;
1323             mNotifyCompleteVideo = false;
1324         }
1325 
1326         // If we're currently syncing the queues, i.e. dropping audio while
1327         // aligning the first audio/video buffer times and only one of the
1328         // two queues has data, we may starve that queue by not requesting
1329         // more buffers from the decoder. If the other source then encounters
1330         // a discontinuity that leads to flushing, we'll never find the
1331         // corresponding discontinuity on the other queue.
1332         // Therefore we'll stop syncing the queues if at least one of them
1333         // is flushed.
1334         syncQueuesDone_l();
1335         clearAnchorTime_l();
1336     }
1337 
1338     ALOGV("flushing %s", audio ? "audio" : "video");
1339     if (audio) {
1340         {
1341             Mutex::Autolock autoLock(mLock);
1342             flushQueue(&mAudioQueue);
1343 
1344             ++mAudioDrainGeneration;
1345             prepareForMediaRenderingStart_l();
1346 
1347             // the frame count will be reset after flush.
1348             clearAudioFirstAnchorTime_l();
1349         }
1350 
1351         mDrainAudioQueuePending = false;
1352 
1353         if (offloadingAudio()) {
1354             mAudioSink->pause();
1355             mAudioSink->flush();
1356             if (!mPaused) {
1357                 mAudioSink->start();
1358             }
1359         } else {
1360             mAudioSink->pause();
1361             mAudioSink->flush();
1362             // Call stop() to signal to the AudioSink to completely fill the
1363             // internal buffer before resuming playback.
1364             // FIXME: this is ignored after flush().
1365             mAudioSink->stop();
1366             if (mPaused) {
1367                 // Race condition: if renderer is paused and audio sink is stopped,
1368                 // we need to make sure that the audio track buffer fully drains
1369                 // before delivering data.
1370                 // FIXME: remove this if we can detect if stop() is complete.
1371                 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1372                 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1373             } else {
1374                 mAudioSink->start();
1375             }
1376             mNumFramesWritten = 0;
1377         }
1378     } else {
1379         flushQueue(&mVideoQueue);
1380 
1381         mDrainVideoQueuePending = false;
1382 
1383         if (mVideoScheduler != NULL) {
1384             mVideoScheduler->restart();
1385         }
1386 
1387         Mutex::Autolock autoLock(mLock);
1388         ++mVideoDrainGeneration;
1389         prepareForMediaRenderingStart_l();
1390     }
1391 
1392     mVideoSampleReceived = false;
1393 
1394     if (notifyComplete) {
1395         notifyFlushComplete(audio);
1396     }
1397 }
1398 
flushQueue(List<QueueEntry> * queue)1399 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1400     while (!queue->empty()) {
1401         QueueEntry *entry = &*queue->begin();
1402 
1403         if (entry->mBuffer != NULL) {
1404             entry->mNotifyConsumed->post();
1405         }
1406 
1407         queue->erase(queue->begin());
1408         entry = NULL;
1409     }
1410 }
1411 
notifyFlushComplete(bool audio)1412 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1413     sp<AMessage> notify = mNotify->dup();
1414     notify->setInt32("what", kWhatFlushComplete);
1415     notify->setInt32("audio", static_cast<int32_t>(audio));
1416     notify->post();
1417 }
1418 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1419 bool NuPlayer::Renderer::dropBufferIfStale(
1420         bool audio, const sp<AMessage> &msg) {
1421     int32_t queueGeneration;
1422     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1423 
1424     if (queueGeneration == getQueueGeneration(audio)) {
1425         return false;
1426     }
1427 
1428     sp<AMessage> notifyConsumed;
1429     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1430         notifyConsumed->post();
1431     }
1432 
1433     return true;
1434 }
1435 
onAudioSinkChanged()1436 void NuPlayer::Renderer::onAudioSinkChanged() {
1437     if (offloadingAudio()) {
1438         return;
1439     }
1440     CHECK(!mDrainAudioQueuePending);
1441     mNumFramesWritten = 0;
1442     {
1443         Mutex::Autolock autoLock(mLock);
1444         mAnchorNumFramesWritten = -1;
1445     }
1446     uint32_t written;
1447     if (mAudioSink->getFramesWritten(&written) == OK) {
1448         mNumFramesWritten = written;
1449     }
1450 }
1451 
onDisableOffloadAudio()1452 void NuPlayer::Renderer::onDisableOffloadAudio() {
1453     Mutex::Autolock autoLock(mLock);
1454     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1455     ++mAudioDrainGeneration;
1456     if (mAudioRenderingStartGeneration != -1) {
1457         prepareForMediaRenderingStart_l();
1458     }
1459 }
1460 
onEnableOffloadAudio()1461 void NuPlayer::Renderer::onEnableOffloadAudio() {
1462     Mutex::Autolock autoLock(mLock);
1463     mFlags |= FLAG_OFFLOAD_AUDIO;
1464     ++mAudioDrainGeneration;
1465     if (mAudioRenderingStartGeneration != -1) {
1466         prepareForMediaRenderingStart_l();
1467     }
1468 }
1469 
onPause()1470 void NuPlayer::Renderer::onPause() {
1471     if (mPaused) {
1472         return;
1473     }
1474 
1475     {
1476         Mutex::Autolock autoLock(mLock);
1477         // we do not increment audio drain generation so that we fill audio buffer during pause.
1478         ++mVideoDrainGeneration;
1479         prepareForMediaRenderingStart_l();
1480         mPaused = true;
1481         mMediaClock->setPlaybackRate(0.0);
1482     }
1483 
1484     mDrainAudioQueuePending = false;
1485     mDrainVideoQueuePending = false;
1486 
1487     if (mHasAudio) {
1488         mAudioSink->pause();
1489         startAudioOffloadPauseTimeout();
1490     }
1491 
1492     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1493           mAudioQueue.size(), mVideoQueue.size());
1494 }
1495 
onResume()1496 void NuPlayer::Renderer::onResume() {
1497     if (!mPaused) {
1498         return;
1499     }
1500 
1501     if (mHasAudio) {
1502         cancelAudioOffloadPauseTimeout();
1503         status_t err = mAudioSink->start();
1504         if (err != OK) {
1505             ALOGE("cannot start AudioSink err %d", err);
1506             notifyAudioTearDown();
1507         }
1508     }
1509 
1510     {
1511         Mutex::Autolock autoLock(mLock);
1512         mPaused = false;
1513         // rendering started message may have been delayed if we were paused.
1514         if (mRenderingDataDelivered) {
1515             notifyIfMediaRenderingStarted_l();
1516         }
1517         // configure audiosink as we did not do it when pausing
1518         if (mAudioSink != NULL && mAudioSink->ready()) {
1519             mAudioSink->setPlaybackRate(mPlaybackSettings);
1520         }
1521 
1522         mMediaClock->setPlaybackRate(mPlaybackRate);
1523 
1524         if (!mAudioQueue.empty()) {
1525             postDrainAudioQueue_l();
1526         }
1527     }
1528 
1529     if (!mVideoQueue.empty()) {
1530         postDrainVideoQueue();
1531     }
1532 }
1533 
onSetVideoFrameRate(float fps)1534 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1535     if (mVideoScheduler == NULL) {
1536         mVideoScheduler = new VideoFrameScheduler();
1537     }
1538     mVideoScheduler->init(fps);
1539 }
1540 
getQueueGeneration(bool audio)1541 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1542     Mutex::Autolock autoLock(mLock);
1543     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1544 }
1545 
getDrainGeneration(bool audio)1546 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1547     Mutex::Autolock autoLock(mLock);
1548     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1549 }
1550 
getSyncQueues()1551 bool NuPlayer::Renderer::getSyncQueues() {
1552     Mutex::Autolock autoLock(mLock);
1553     return mSyncQueues;
1554 }
1555 
1556 // TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
1557 // as it acquires locks and may query the audio driver.
1558 //
1559 // Some calls could conceivably retrieve extrapolated data instead of
1560 // accessing getTimestamp() or getPosition() every time a data buffer with
1561 // a media time is received.
1562 //
1563 // Calculate duration of played samples if played at normal rate (i.e., 1.0).
getPlayedOutAudioDurationUs(int64_t nowUs)1564 int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
1565     uint32_t numFramesPlayed;
1566     int64_t numFramesPlayedAt;
1567     AudioTimestamp ts;
1568     static const int64_t kStaleTimestamp100ms = 100000;
1569 
1570     status_t res = mAudioSink->getTimestamp(ts);
1571     if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
1572         numFramesPlayed = ts.mPosition;
1573         numFramesPlayedAt =
1574             ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
1575         const int64_t timestampAge = nowUs - numFramesPlayedAt;
1576         if (timestampAge > kStaleTimestamp100ms) {
1577             // This is an audio FIXME.
1578             // getTimestamp returns a timestamp which may come from audio mixing threads.
1579             // After pausing, the MixerThread may go idle, thus the mTime estimate may
1580             // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
1581             // the max latency should be about 25ms with an average around 12ms (to be verified).
1582             // For safety we use 100ms.
1583             ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
1584                     (long long)nowUs, (long long)numFramesPlayedAt);
1585             numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
1586         }
1587         //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1588     } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
1589         numFramesPlayed = 0;
1590         numFramesPlayedAt = nowUs;
1591         //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
1592         //        numFramesPlayed, (long long)numFramesPlayedAt);
1593     } else {                         // case 3: transitory at new track or audio fast tracks.
1594         res = mAudioSink->getPosition(&numFramesPlayed);
1595         CHECK_EQ(res, (status_t)OK);
1596         numFramesPlayedAt = nowUs;
1597         numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
1598         //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1599     }
1600 
1601     //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
1602     int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
1603             + nowUs - numFramesPlayedAt;
1604     if (durationUs < 0) {
1605         // Occurs when numFramesPlayed position is very small and the following:
1606         // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
1607         //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
1608         // (2) In case 3, using getPosition and adding mAudioSink->latency() to
1609         //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
1610         //
1611         // Both of these are transitory conditions.
1612         ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs);
1613         durationUs = 0;
1614     }
1615     ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
1616             (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
1617     return durationUs;
1618 }
1619 
onAudioTearDown(AudioTearDownReason reason)1620 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1621     if (mAudioTornDown) {
1622         return;
1623     }
1624     mAudioTornDown = true;
1625 
1626     int64_t currentPositionUs;
1627     sp<AMessage> notify = mNotify->dup();
1628     if (getCurrentPosition(&currentPositionUs) == OK) {
1629         notify->setInt64("positionUs", currentPositionUs);
1630     }
1631 
1632     mAudioSink->stop();
1633     mAudioSink->flush();
1634 
1635     notify->setInt32("what", kWhatAudioTearDown);
1636     notify->setInt32("reason", reason);
1637     notify->post();
1638 }
1639 
startAudioOffloadPauseTimeout()1640 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1641     if (offloadingAudio()) {
1642         mWakeLock->acquire();
1643         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1644         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1645         msg->post(kOffloadPauseMaxUs);
1646     }
1647 }
1648 
cancelAudioOffloadPauseTimeout()1649 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1650     if (offloadingAudio()) {
1651         mWakeLock->release(true);
1652         ++mAudioOffloadPauseTimeoutGeneration;
1653     }
1654 }
1655 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags)1656 status_t NuPlayer::Renderer::onOpenAudioSink(
1657         const sp<AMessage> &format,
1658         bool offloadOnly,
1659         bool hasVideo,
1660         uint32_t flags) {
1661     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1662             offloadOnly, offloadingAudio());
1663     bool audioSinkChanged = false;
1664 
1665     int32_t numChannels;
1666     CHECK(format->findInt32("channel-count", &numChannels));
1667 
1668     int32_t channelMask;
1669     if (!format->findInt32("channel-mask", &channelMask)) {
1670         // signal to the AudioSink to derive the mask from count.
1671         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1672     }
1673 
1674     int32_t sampleRate;
1675     CHECK(format->findInt32("sample-rate", &sampleRate));
1676 
1677     if (offloadingAudio()) {
1678         audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1679         AString mime;
1680         CHECK(format->findString("mime", &mime));
1681         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1682 
1683         if (err != OK) {
1684             ALOGE("Couldn't map mime \"%s\" to a valid "
1685                     "audio_format", mime.c_str());
1686             onDisableOffloadAudio();
1687         } else {
1688             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1689                     mime.c_str(), audioFormat);
1690 
1691             int avgBitRate = -1;
1692             format->findInt32("bit-rate", &avgBitRate);
1693 
1694             int32_t aacProfile = -1;
1695             if (audioFormat == AUDIO_FORMAT_AAC
1696                     && format->findInt32("aac-profile", &aacProfile)) {
1697                 // Redefine AAC format as per aac profile
1698                 mapAACProfileToAudioFormat(
1699                         audioFormat,
1700                         aacProfile);
1701             }
1702 
1703             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1704             offloadInfo.duration_us = -1;
1705             format->findInt64(
1706                     "durationUs", &offloadInfo.duration_us);
1707             offloadInfo.sample_rate = sampleRate;
1708             offloadInfo.channel_mask = channelMask;
1709             offloadInfo.format = audioFormat;
1710             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1711             offloadInfo.bit_rate = avgBitRate;
1712             offloadInfo.has_video = hasVideo;
1713             offloadInfo.is_streaming = true;
1714 
1715             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1716                 ALOGV("openAudioSink: no change in offload mode");
1717                 // no change from previous configuration, everything ok.
1718                 return OK;
1719             }
1720             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1721 
1722             ALOGV("openAudioSink: try to open AudioSink in offload mode");
1723             uint32_t offloadFlags = flags;
1724             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1725             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1726             audioSinkChanged = true;
1727             mAudioSink->close();
1728 
1729             err = mAudioSink->open(
1730                     sampleRate,
1731                     numChannels,
1732                     (audio_channel_mask_t)channelMask,
1733                     audioFormat,
1734                     0 /* bufferCount - unused */,
1735                     &NuPlayer::Renderer::AudioSinkCallback,
1736                     this,
1737                     (audio_output_flags_t)offloadFlags,
1738                     &offloadInfo);
1739 
1740             if (err == OK) {
1741                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1742             }
1743 
1744             if (err == OK) {
1745                 // If the playback is offloaded to h/w, we pass
1746                 // the HAL some metadata information.
1747                 // We don't want to do this for PCM because it
1748                 // will be going through the AudioFlinger mixer
1749                 // before reaching the hardware.
1750                 // TODO
1751                 mCurrentOffloadInfo = offloadInfo;
1752                 if (!mPaused) { // for preview mode, don't start if paused
1753                     err = mAudioSink->start();
1754                 }
1755                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1756             }
1757             if (err != OK) {
1758                 // Clean up, fall back to non offload mode.
1759                 mAudioSink->close();
1760                 onDisableOffloadAudio();
1761                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1762                 ALOGV("openAudioSink: offload failed");
1763             } else {
1764                 mUseAudioCallback = true;  // offload mode transfers data through callback
1765                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1766             }
1767         }
1768     }
1769     if (!offloadOnly && !offloadingAudio()) {
1770         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1771         uint32_t pcmFlags = flags;
1772         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1773 
1774         const PcmInfo info = {
1775                 (audio_channel_mask_t)channelMask,
1776                 (audio_output_flags_t)pcmFlags,
1777                 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1778                 numChannels,
1779                 sampleRate
1780         };
1781         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1782             ALOGV("openAudioSink: no change in pcm mode");
1783             // no change from previous configuration, everything ok.
1784             return OK;
1785         }
1786 
1787         audioSinkChanged = true;
1788         mAudioSink->close();
1789         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1790         // Note: It is possible to set up the callback, but not use it to send audio data.
1791         // This requires a fix in AudioSink to explicitly specify the transfer mode.
1792         mUseAudioCallback = getUseAudioCallbackSetting();
1793         if (mUseAudioCallback) {
1794             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1795         }
1796 
1797         // Compute the desired buffer size.
1798         // For callback mode, the amount of time before wakeup is about half the buffer size.
1799         const uint32_t frameCount =
1800                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1801 
1802         // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
1803         // AudioSink. We don't want this when there's video because it will cause a video seek to
1804         // the previous I frame. But we do want this when there's only audio because it will give
1805         // NuPlayer a chance to switch from non-offload mode to offload mode.
1806         // So we only set doNotReconnect when there's no video.
1807         const bool doNotReconnect = !hasVideo;
1808         status_t err = mAudioSink->open(
1809                     sampleRate,
1810                     numChannels,
1811                     (audio_channel_mask_t)channelMask,
1812                     AUDIO_FORMAT_PCM_16_BIT,
1813                     0 /* bufferCount - unused */,
1814                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
1815                     mUseAudioCallback ? this : NULL,
1816                     (audio_output_flags_t)pcmFlags,
1817                     NULL,
1818                     doNotReconnect,
1819                     frameCount);
1820         if (err == OK) {
1821             err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1822         }
1823         if (err != OK) {
1824             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
1825             mAudioSink->close();
1826             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1827             return err;
1828         }
1829         mCurrentPcmInfo = info;
1830         if (!mPaused) { // for preview mode, don't start if paused
1831             mAudioSink->start();
1832         }
1833     }
1834     if (audioSinkChanged) {
1835         onAudioSinkChanged();
1836     }
1837     mAudioTornDown = false;
1838     return OK;
1839 }
1840 
onCloseAudioSink()1841 void NuPlayer::Renderer::onCloseAudioSink() {
1842     mAudioSink->close();
1843     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1844     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1845 }
1846 
1847 }  // namespace android
1848 
1849