• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 
19 //#define LOG_NDEBUG 0
20 #define LOG_TAG "AudioTrack"
21 
22 #include <sys/resource.h>
23 #include <audio_utils/primitives.h>
24 #include <binder/IPCThreadState.h>
25 #include <media/AudioTrack.h>
26 #include <utils/Log.h>
27 #include <private/media/AudioTrackShared.h>
28 #include <media/IAudioFlinger.h>
29 
30 #define WAIT_PERIOD_MS                  10
31 #define WAIT_STREAM_END_TIMEOUT_SEC     120
32 
33 
34 namespace android {
35 // ---------------------------------------------------------------------------
36 
37 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)38 status_t AudioTrack::getMinFrameCount(
39         size_t* frameCount,
40         audio_stream_type_t streamType,
41         uint32_t sampleRate)
42 {
43     if (frameCount == NULL) {
44         return BAD_VALUE;
45     }
46 
47     // default to 0 in case of error
48     *frameCount = 0;
49 
50     // FIXME merge with similar code in createTrack_l(), except we're missing
51     //       some information here that is available in createTrack_l():
52     //          audio_io_handle_t output
53     //          audio_format_t format
54     //          audio_channel_mask_t channelMask
55     //          audio_output_flags_t flags
56     uint32_t afSampleRate;
57     if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
58         return NO_INIT;
59     }
60     size_t afFrameCount;
61     if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
62         return NO_INIT;
63     }
64     uint32_t afLatency;
65     if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
66         return NO_INIT;
67     }
68 
69     // Ensure that buffer depth covers at least audio hardware latency
70     uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
71     if (minBufCount < 2) {
72         minBufCount = 2;
73     }
74 
75     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
76             afFrameCount * minBufCount * sampleRate / afSampleRate;
77     ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
78             *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
79     return NO_ERROR;
80 }
81 
82 // ---------------------------------------------------------------------------
83 
AudioTrack()84 AudioTrack::AudioTrack()
85     : mStatus(NO_INIT),
86       mIsTimed(false),
87       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
88       mPreviousSchedulingGroup(SP_DEFAULT),
89       mPausedPosition(0)
90 {
91 }
92 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,int frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int notificationFrames,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid)93 AudioTrack::AudioTrack(
94         audio_stream_type_t streamType,
95         uint32_t sampleRate,
96         audio_format_t format,
97         audio_channel_mask_t channelMask,
98         int frameCount,
99         audio_output_flags_t flags,
100         callback_t cbf,
101         void* user,
102         int notificationFrames,
103         int sessionId,
104         transfer_type transferType,
105         const audio_offload_info_t *offloadInfo,
106         int uid)
107     : mStatus(NO_INIT),
108       mIsTimed(false),
109       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
110       mPreviousSchedulingGroup(SP_DEFAULT),
111       mPausedPosition(0)
112 {
113     mStatus = set(streamType, sampleRate, format, channelMask,
114             frameCount, flags, cbf, user, notificationFrames,
115             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
116             offloadInfo, uid);
117 }
118 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int notificationFrames,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid)119 AudioTrack::AudioTrack(
120         audio_stream_type_t streamType,
121         uint32_t sampleRate,
122         audio_format_t format,
123         audio_channel_mask_t channelMask,
124         const sp<IMemory>& sharedBuffer,
125         audio_output_flags_t flags,
126         callback_t cbf,
127         void* user,
128         int notificationFrames,
129         int sessionId,
130         transfer_type transferType,
131         const audio_offload_info_t *offloadInfo,
132         int uid)
133     : mStatus(NO_INIT),
134       mIsTimed(false),
135       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
136       mPreviousSchedulingGroup(SP_DEFAULT),
137       mPausedPosition(0)
138 {
139     mStatus = set(streamType, sampleRate, format, channelMask,
140             0 /*frameCount*/, flags, cbf, user, notificationFrames,
141             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
142 }
143 
~AudioTrack()144 AudioTrack::~AudioTrack()
145 {
146     if (mStatus == NO_ERROR) {
147         // Make sure that callback function exits in the case where
148         // it is looping on buffer full condition in obtainBuffer().
149         // Otherwise the callback thread will never exit.
150         stop();
151         if (mAudioTrackThread != 0) {
152             mProxy->interrupt();
153             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
154             mAudioTrackThread->requestExitAndWait();
155             mAudioTrackThread.clear();
156         }
157         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
158         mAudioTrack.clear();
159         IPCThreadState::self()->flushCommands();
160         AudioSystem::releaseAudioSessionId(mSessionId);
161     }
162 }
163 
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,int frameCountInt,audio_output_flags_t flags,callback_t cbf,void * user,int notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid)164 status_t AudioTrack::set(
165         audio_stream_type_t streamType,
166         uint32_t sampleRate,
167         audio_format_t format,
168         audio_channel_mask_t channelMask,
169         int frameCountInt,
170         audio_output_flags_t flags,
171         callback_t cbf,
172         void* user,
173         int notificationFrames,
174         const sp<IMemory>& sharedBuffer,
175         bool threadCanCallJava,
176         int sessionId,
177         transfer_type transferType,
178         const audio_offload_info_t *offloadInfo,
179         int uid)
180 {
181     switch (transferType) {
182     case TRANSFER_DEFAULT:
183         if (sharedBuffer != 0) {
184             transferType = TRANSFER_SHARED;
185         } else if (cbf == NULL || threadCanCallJava) {
186             transferType = TRANSFER_SYNC;
187         } else {
188             transferType = TRANSFER_CALLBACK;
189         }
190         break;
191     case TRANSFER_CALLBACK:
192         if (cbf == NULL || sharedBuffer != 0) {
193             ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
194             return BAD_VALUE;
195         }
196         break;
197     case TRANSFER_OBTAIN:
198     case TRANSFER_SYNC:
199         if (sharedBuffer != 0) {
200             ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
201             return BAD_VALUE;
202         }
203         break;
204     case TRANSFER_SHARED:
205         if (sharedBuffer == 0) {
206             ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
207             return BAD_VALUE;
208         }
209         break;
210     default:
211         ALOGE("Invalid transfer type %d", transferType);
212         return BAD_VALUE;
213     }
214     mTransfer = transferType;
215 
216     // FIXME "int" here is legacy and will be replaced by size_t later
217     if (frameCountInt < 0) {
218         ALOGE("Invalid frame count %d", frameCountInt);
219         return BAD_VALUE;
220     }
221     size_t frameCount = frameCountInt;
222 
223     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
224             sharedBuffer->size());
225 
226     ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
227 
228     AutoMutex lock(mLock);
229 
230     // invariant that mAudioTrack != 0 is true only after set() returns successfully
231     if (mAudioTrack != 0) {
232         ALOGE("Track already in use");
233         return INVALID_OPERATION;
234     }
235 
236     mOutput = 0;
237 
238     // handle default values first.
239     if (streamType == AUDIO_STREAM_DEFAULT) {
240         streamType = AUDIO_STREAM_MUSIC;
241     }
242 
243     if (sampleRate == 0) {
244         uint32_t afSampleRate;
245         if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
246             return NO_INIT;
247         }
248         sampleRate = afSampleRate;
249     }
250     mSampleRate = sampleRate;
251 
252     // these below should probably come from the audioFlinger too...
253     if (format == AUDIO_FORMAT_DEFAULT) {
254         format = AUDIO_FORMAT_PCM_16_BIT;
255     }
256     if (channelMask == 0) {
257         channelMask = AUDIO_CHANNEL_OUT_STEREO;
258     }
259 
260     // validate parameters
261     if (!audio_is_valid_format(format)) {
262         ALOGE("Invalid format %d", format);
263         return BAD_VALUE;
264     }
265 
266     // AudioFlinger does not currently support 8-bit data in shared memory
267     if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
268         ALOGE("8-bit data in shared memory is not supported");
269         return BAD_VALUE;
270     }
271 
272     // force direct flag if format is not linear PCM
273     // or offload was requested
274     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
275             || !audio_is_linear_pcm(format)) {
276         ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
277                     ? "Offload request, forcing to Direct Output"
278                     : "Not linear PCM, forcing to Direct Output");
279         flags = (audio_output_flags_t)
280                 // FIXME why can't we allow direct AND fast?
281                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
282     }
283     // only allow deep buffering for music stream type
284     if (streamType != AUDIO_STREAM_MUSIC) {
285         flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
286     }
287 
288     if (!audio_is_output_channel(channelMask)) {
289         ALOGE("Invalid channel mask %#x", channelMask);
290         return BAD_VALUE;
291     }
292     mChannelMask = channelMask;
293     uint32_t channelCount = popcount(channelMask);
294     mChannelCount = channelCount;
295 
296     if (audio_is_linear_pcm(format)) {
297         mFrameSize = channelCount * audio_bytes_per_sample(format);
298         mFrameSizeAF = channelCount * sizeof(int16_t);
299     } else {
300         mFrameSize = sizeof(uint8_t);
301         mFrameSizeAF = sizeof(uint8_t);
302     }
303 
304     audio_io_handle_t output = AudioSystem::getOutput(
305                                     streamType,
306                                     sampleRate, format, channelMask,
307                                     flags,
308                                     offloadInfo);
309 
310     if (output == 0) {
311         ALOGE("Could not get audio output for stream type %d", streamType);
312         return BAD_VALUE;
313     }
314 
315     mVolume[LEFT] = 1.0f;
316     mVolume[RIGHT] = 1.0f;
317     mSendLevel = 0.0f;
318     mFrameCount = frameCount;
319     mReqFrameCount = frameCount;
320     mNotificationFramesReq = notificationFrames;
321     mNotificationFramesAct = 0;
322     mSessionId = sessionId;
323     if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
324         mClientUid = IPCThreadState::self()->getCallingUid();
325     } else {
326         mClientUid = uid;
327     }
328     mAuxEffectId = 0;
329     mFlags = flags;
330     mCbf = cbf;
331 
332     if (cbf != NULL) {
333         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
334         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
335     }
336 
337     // create the IAudioTrack
338     status_t status = createTrack_l(streamType,
339                                   sampleRate,
340                                   format,
341                                   frameCount,
342                                   flags,
343                                   sharedBuffer,
344                                   output,
345                                   0 /*epoch*/);
346 
347     if (status != NO_ERROR) {
348         if (mAudioTrackThread != 0) {
349             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
350             mAudioTrackThread->requestExitAndWait();
351             mAudioTrackThread.clear();
352         }
353         //Use of direct and offloaded output streams is ref counted by audio policy manager.
354         // As getOutput was called above and resulted in an output stream to be opened,
355         // we need to release it.
356         AudioSystem::releaseOutput(output);
357         return status;
358     }
359 
360     mStatus = NO_ERROR;
361     mStreamType = streamType;
362     mFormat = format;
363     mSharedBuffer = sharedBuffer;
364     mState = STATE_STOPPED;
365     mUserData = user;
366     mLoopPeriod = 0;
367     mMarkerPosition = 0;
368     mMarkerReached = false;
369     mNewPosition = 0;
370     mUpdatePeriod = 0;
371     AudioSystem::acquireAudioSessionId(mSessionId);
372     mSequence = 1;
373     mObservedSequence = mSequence;
374     mInUnderrun = false;
375     mOutput = output;
376 
377     return NO_ERROR;
378 }
379 
380 // -------------------------------------------------------------------------
381 
start()382 status_t AudioTrack::start()
383 {
384     AutoMutex lock(mLock);
385 
386     if (mState == STATE_ACTIVE) {
387         return INVALID_OPERATION;
388     }
389 
390     mInUnderrun = true;
391 
392     State previousState = mState;
393     if (previousState == STATE_PAUSED_STOPPING) {
394         mState = STATE_STOPPING;
395     } else {
396         mState = STATE_ACTIVE;
397     }
398     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
399         // reset current position as seen by client to 0
400         mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
401         // force refresh of remaining frames by processAudioBuffer() as last
402         // write before stop could be partial.
403         mRefreshRemaining = true;
404     }
405     mNewPosition = mProxy->getPosition() + mUpdatePeriod;
406     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
407 
408     sp<AudioTrackThread> t = mAudioTrackThread;
409     if (t != 0) {
410         if (previousState == STATE_STOPPING) {
411             mProxy->interrupt();
412         } else {
413             t->resume();
414         }
415     } else {
416         mPreviousPriority = getpriority(PRIO_PROCESS, 0);
417         get_sched_policy(0, &mPreviousSchedulingGroup);
418         androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
419     }
420 
421     status_t status = NO_ERROR;
422     if (!(flags & CBLK_INVALID)) {
423         status = mAudioTrack->start();
424         if (status == DEAD_OBJECT) {
425             flags |= CBLK_INVALID;
426         }
427     }
428     if (flags & CBLK_INVALID) {
429         status = restoreTrack_l("start");
430     }
431 
432     if (status != NO_ERROR) {
433         ALOGE("start() status %d", status);
434         mState = previousState;
435         if (t != 0) {
436             if (previousState != STATE_STOPPING) {
437                 t->pause();
438             }
439         } else {
440             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
441             set_sched_policy(0, mPreviousSchedulingGroup);
442         }
443     }
444 
445     return status;
446 }
447 
stop()448 void AudioTrack::stop()
449 {
450     AutoMutex lock(mLock);
451     // FIXME pause then stop should not be a nop
452     if (mState != STATE_ACTIVE) {
453         return;
454     }
455 
456     if (isOffloaded()) {
457         mState = STATE_STOPPING;
458     } else {
459         mState = STATE_STOPPED;
460     }
461 
462     mProxy->interrupt();
463     mAudioTrack->stop();
464     // the playback head position will reset to 0, so if a marker is set, we need
465     // to activate it again
466     mMarkerReached = false;
467 #if 0
468     // Force flush if a shared buffer is used otherwise audioflinger
469     // will not stop before end of buffer is reached.
470     // It may be needed to make sure that we stop playback, likely in case looping is on.
471     if (mSharedBuffer != 0) {
472         flush_l();
473     }
474 #endif
475 
476     sp<AudioTrackThread> t = mAudioTrackThread;
477     if (t != 0) {
478         if (!isOffloaded()) {
479             t->pause();
480         }
481     } else {
482         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
483         set_sched_policy(0, mPreviousSchedulingGroup);
484     }
485 }
486 
stopped() const487 bool AudioTrack::stopped() const
488 {
489     AutoMutex lock(mLock);
490     return mState != STATE_ACTIVE;
491 }
492 
flush()493 void AudioTrack::flush()
494 {
495     if (mSharedBuffer != 0) {
496         return;
497     }
498     AutoMutex lock(mLock);
499     if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
500         return;
501     }
502     flush_l();
503 }
504 
flush_l()505 void AudioTrack::flush_l()
506 {
507     ALOG_ASSERT(mState != STATE_ACTIVE);
508 
509     // clear playback marker and periodic update counter
510     mMarkerPosition = 0;
511     mMarkerReached = false;
512     mUpdatePeriod = 0;
513     mRefreshRemaining = true;
514 
515     mState = STATE_FLUSHED;
516     if (isOffloaded()) {
517         mProxy->interrupt();
518     }
519     mProxy->flush();
520     mAudioTrack->flush();
521 }
522 
pause()523 void AudioTrack::pause()
524 {
525     AutoMutex lock(mLock);
526     if (mState == STATE_ACTIVE) {
527         mState = STATE_PAUSED;
528     } else if (mState == STATE_STOPPING) {
529         mState = STATE_PAUSED_STOPPING;
530     } else {
531         return;
532     }
533     mProxy->interrupt();
534     mAudioTrack->pause();
535 
536     if (isOffloaded()) {
537         if (mOutput != 0) {
538             uint32_t halFrames;
539             // OffloadThread sends HAL pause in its threadLoop.. time saved
540             // here can be slightly off
541             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
542             ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
543         }
544     }
545 }
546 
setVolume(float left,float right)547 status_t AudioTrack::setVolume(float left, float right)
548 {
549     if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
550         return BAD_VALUE;
551     }
552 
553     AutoMutex lock(mLock);
554     mVolume[LEFT] = left;
555     mVolume[RIGHT] = right;
556 
557     mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
558 
559     if (isOffloaded()) {
560         mAudioTrack->signal();
561     }
562     return NO_ERROR;
563 }
564 
setVolume(float volume)565 status_t AudioTrack::setVolume(float volume)
566 {
567     return setVolume(volume, volume);
568 }
569 
setAuxEffectSendLevel(float level)570 status_t AudioTrack::setAuxEffectSendLevel(float level)
571 {
572     if (level < 0.0f || level > 1.0f) {
573         return BAD_VALUE;
574     }
575 
576     AutoMutex lock(mLock);
577     mSendLevel = level;
578     mProxy->setSendLevel(level);
579 
580     return NO_ERROR;
581 }
582 
getAuxEffectSendLevel(float * level) const583 void AudioTrack::getAuxEffectSendLevel(float* level) const
584 {
585     if (level != NULL) {
586         *level = mSendLevel;
587     }
588 }
589 
setSampleRate(uint32_t rate)590 status_t AudioTrack::setSampleRate(uint32_t rate)
591 {
592     if (mIsTimed || isOffloaded()) {
593         return INVALID_OPERATION;
594     }
595 
596     uint32_t afSamplingRate;
597     if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
598         return NO_INIT;
599     }
600     // Resampler implementation limits input sampling rate to 2 x output sampling rate.
601     if (rate == 0 || rate > afSamplingRate*2 ) {
602         return BAD_VALUE;
603     }
604 
605     AutoMutex lock(mLock);
606     mSampleRate = rate;
607     mProxy->setSampleRate(rate);
608 
609     return NO_ERROR;
610 }
611 
getSampleRate() const612 uint32_t AudioTrack::getSampleRate() const
613 {
614     if (mIsTimed) {
615         return 0;
616     }
617 
618     AutoMutex lock(mLock);
619 
620     // sample rate can be updated during playback by the offloaded decoder so we need to
621     // query the HAL and update if needed.
622 // FIXME use Proxy return channel to update the rate from server and avoid polling here
623     if (isOffloaded()) {
624         if (mOutput != 0) {
625             uint32_t sampleRate = 0;
626             status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
627             if (status == NO_ERROR) {
628                 mSampleRate = sampleRate;
629             }
630         }
631     }
632     return mSampleRate;
633 }
634 
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)635 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
636 {
637     if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
638         return INVALID_OPERATION;
639     }
640 
641     if (loopCount == 0) {
642         ;
643     } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
644             loopEnd - loopStart >= MIN_LOOP) {
645         ;
646     } else {
647         return BAD_VALUE;
648     }
649 
650     AutoMutex lock(mLock);
651     // See setPosition() regarding setting parameters such as loop points or position while active
652     if (mState == STATE_ACTIVE) {
653         return INVALID_OPERATION;
654     }
655     setLoop_l(loopStart, loopEnd, loopCount);
656     return NO_ERROR;
657 }
658 
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)659 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
660 {
661     // FIXME If setting a loop also sets position to start of loop, then
662     //       this is correct.  Otherwise it should be removed.
663     mNewPosition = mProxy->getPosition() + mUpdatePeriod;
664     mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
665     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
666 }
667 
setMarkerPosition(uint32_t marker)668 status_t AudioTrack::setMarkerPosition(uint32_t marker)
669 {
670     // The only purpose of setting marker position is to get a callback
671     if (mCbf == NULL || isOffloaded()) {
672         return INVALID_OPERATION;
673     }
674 
675     AutoMutex lock(mLock);
676     mMarkerPosition = marker;
677     mMarkerReached = false;
678 
679     return NO_ERROR;
680 }
681 
getMarkerPosition(uint32_t * marker) const682 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
683 {
684     if (isOffloaded()) {
685         return INVALID_OPERATION;
686     }
687     if (marker == NULL) {
688         return BAD_VALUE;
689     }
690 
691     AutoMutex lock(mLock);
692     *marker = mMarkerPosition;
693 
694     return NO_ERROR;
695 }
696 
setPositionUpdatePeriod(uint32_t updatePeriod)697 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
698 {
699     // The only purpose of setting position update period is to get a callback
700     if (mCbf == NULL || isOffloaded()) {
701         return INVALID_OPERATION;
702     }
703 
704     AutoMutex lock(mLock);
705     mNewPosition = mProxy->getPosition() + updatePeriod;
706     mUpdatePeriod = updatePeriod;
707     return NO_ERROR;
708 }
709 
getPositionUpdatePeriod(uint32_t * updatePeriod) const710 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
711 {
712     if (isOffloaded()) {
713         return INVALID_OPERATION;
714     }
715     if (updatePeriod == NULL) {
716         return BAD_VALUE;
717     }
718 
719     AutoMutex lock(mLock);
720     *updatePeriod = mUpdatePeriod;
721 
722     return NO_ERROR;
723 }
724 
setPosition(uint32_t position)725 status_t AudioTrack::setPosition(uint32_t position)
726 {
727     if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
728         return INVALID_OPERATION;
729     }
730     if (position > mFrameCount) {
731         return BAD_VALUE;
732     }
733 
734     AutoMutex lock(mLock);
735     // Currently we require that the player is inactive before setting parameters such as position
736     // or loop points.  Otherwise, there could be a race condition: the application could read the
737     // current position, compute a new position or loop parameters, and then set that position or
738     // loop parameters but it would do the "wrong" thing since the position has continued to advance
739     // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
740     // to specify how it wants to handle such scenarios.
741     if (mState == STATE_ACTIVE) {
742         return INVALID_OPERATION;
743     }
744     mNewPosition = mProxy->getPosition() + mUpdatePeriod;
745     mLoopPeriod = 0;
746     // FIXME Check whether loops and setting position are incompatible in old code.
747     // If we use setLoop for both purposes we lose the capability to set the position while looping.
748     mStaticProxy->setLoop(position, mFrameCount, 0);
749 
750     return NO_ERROR;
751 }
752 
getPosition(uint32_t * position) const753 status_t AudioTrack::getPosition(uint32_t *position) const
754 {
755     if (position == NULL) {
756         return BAD_VALUE;
757     }
758 
759     AutoMutex lock(mLock);
760     if (isOffloaded()) {
761         uint32_t dspFrames = 0;
762 
763         if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
764             ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
765             *position = mPausedPosition;
766             return NO_ERROR;
767         }
768 
769         if (mOutput != 0) {
770             uint32_t halFrames;
771             AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
772         }
773         *position = dspFrames;
774     } else {
775         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
776         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
777                 mProxy->getPosition();
778     }
779     return NO_ERROR;
780 }
781 
getBufferPosition(size_t * position)782 status_t AudioTrack::getBufferPosition(size_t *position)
783 {
784     if (mSharedBuffer == 0 || mIsTimed) {
785         return INVALID_OPERATION;
786     }
787     if (position == NULL) {
788         return BAD_VALUE;
789     }
790 
791     AutoMutex lock(mLock);
792     *position = mStaticProxy->getBufferPosition();
793     return NO_ERROR;
794 }
795 
reload()796 status_t AudioTrack::reload()
797 {
798     if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
799         return INVALID_OPERATION;
800     }
801 
802     AutoMutex lock(mLock);
803     // See setPosition() regarding setting parameters such as loop points or position while active
804     if (mState == STATE_ACTIVE) {
805         return INVALID_OPERATION;
806     }
807     mNewPosition = mUpdatePeriod;
808     mLoopPeriod = 0;
809     // FIXME The new code cannot reload while keeping a loop specified.
810     // Need to check how the old code handled this, and whether it's a significant change.
811     mStaticProxy->setLoop(0, mFrameCount, 0);
812     return NO_ERROR;
813 }
814 
getOutput()815 audio_io_handle_t AudioTrack::getOutput()
816 {
817     AutoMutex lock(mLock);
818     return mOutput;
819 }
820 
821 // must be called with mLock held
getOutput_l()822 audio_io_handle_t AudioTrack::getOutput_l()
823 {
824     if (mOutput) {
825         return mOutput;
826     } else {
827         return AudioSystem::getOutput(mStreamType,
828                                       mSampleRate, mFormat, mChannelMask, mFlags);
829     }
830 }
831 
attachAuxEffect(int effectId)832 status_t AudioTrack::attachAuxEffect(int effectId)
833 {
834     AutoMutex lock(mLock);
835     status_t status = mAudioTrack->attachAuxEffect(effectId);
836     if (status == NO_ERROR) {
837         mAuxEffectId = effectId;
838     }
839     return status;
840 }
841 
842 // -------------------------------------------------------------------------
843 
844 // must be called with mLock held
createTrack_l(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,size_t frameCount,audio_output_flags_t flags,const sp<IMemory> & sharedBuffer,audio_io_handle_t output,size_t epoch)845 status_t AudioTrack::createTrack_l(
846         audio_stream_type_t streamType,
847         uint32_t sampleRate,
848         audio_format_t format,
849         size_t frameCount,
850         audio_output_flags_t flags,
851         const sp<IMemory>& sharedBuffer,
852         audio_io_handle_t output,
853         size_t epoch)
854 {
855     status_t status;
856     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
857     if (audioFlinger == 0) {
858         ALOGE("Could not get audioflinger");
859         return NO_INIT;
860     }
861 
862     // Not all of these values are needed under all conditions, but it is easier to get them all
863 
864     uint32_t afLatency;
865     status = AudioSystem::getLatency(output, streamType, &afLatency);
866     if (status != NO_ERROR) {
867         ALOGE("getLatency(%d) failed status %d", output, status);
868         return NO_INIT;
869     }
870 
871     size_t afFrameCount;
872     status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
873     if (status != NO_ERROR) {
874         ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
875         return NO_INIT;
876     }
877 
878     uint32_t afSampleRate;
879     status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
880     if (status != NO_ERROR) {
881         ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
882         return NO_INIT;
883     }
884 
885     // Client decides whether the track is TIMED (see below), but can only express a preference
886     // for FAST.  Server will perform additional tests.
887     if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
888             // either of these use cases:
889             // use case 1: shared buffer
890             (sharedBuffer != 0) ||
891             // use case 2: callback handler
892             (mCbf != NULL))) {
893         ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
894         // once denied, do not request again if IAudioTrack is re-created
895         flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
896         mFlags = flags;
897     }
898     ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
899 
900     // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
901     //  n = 1   fast track with single buffering; nBuffering is ignored
902     //  n = 2   fast track with double buffering
903     //  n = 2   normal track, no sample rate conversion
904     //  n = 3   normal track, with sample rate conversion
905     //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
906     //  n > 3   very high latency or very small notification interval; nBuffering is ignored
907     const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
908 
909     mNotificationFramesAct = mNotificationFramesReq;
910 
911     if (!audio_is_linear_pcm(format)) {
912 
913         if (sharedBuffer != 0) {
914             // Same comment as below about ignoring frameCount parameter for set()
915             frameCount = sharedBuffer->size();
916         } else if (frameCount == 0) {
917             frameCount = afFrameCount;
918         }
919         if (mNotificationFramesAct != frameCount) {
920             mNotificationFramesAct = frameCount;
921         }
922     } else if (sharedBuffer != 0) {
923 
924         // Ensure that buffer alignment matches channel count
925         // 8-bit data in shared memory is not currently supported by AudioFlinger
926         size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
927         if (mChannelCount > 1) {
928             // More than 2 channels does not require stronger alignment than stereo
929             alignment <<= 1;
930         }
931         if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
932             ALOGE("Invalid buffer alignment: address %p, channel count %u",
933                     sharedBuffer->pointer(), mChannelCount);
934             return BAD_VALUE;
935         }
936 
937         // When initializing a shared buffer AudioTrack via constructors,
938         // there's no frameCount parameter.
939         // But when initializing a shared buffer AudioTrack via set(),
940         // there _is_ a frameCount parameter.  We silently ignore it.
941         frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
942 
943     } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
944 
945         // FIXME move these calculations and associated checks to server
946 
947         // Ensure that buffer depth covers at least audio hardware latency
948         uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
949         ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
950                 afFrameCount, minBufCount, afSampleRate, afLatency);
951         if (minBufCount <= nBuffering) {
952             minBufCount = nBuffering;
953         }
954 
955         size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
956         ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
957                 ", afLatency=%d",
958                 minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
959 
960         if (frameCount == 0) {
961             frameCount = minFrameCount;
962         } else if (frameCount < minFrameCount) {
963             // not ALOGW because it happens all the time when playing key clicks over A2DP
964             ALOGV("Minimum buffer size corrected from %d to %d",
965                      frameCount, minFrameCount);
966             frameCount = minFrameCount;
967         }
968         // Make sure that application is notified with sufficient margin before underrun
969         if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
970             mNotificationFramesAct = frameCount/nBuffering;
971         }
972 
973     } else {
974         // For fast tracks, the frame count calculations and checks are done by server
975     }
976 
977     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
978     if (mIsTimed) {
979         trackFlags |= IAudioFlinger::TRACK_TIMED;
980     }
981 
982     pid_t tid = -1;
983     if (flags & AUDIO_OUTPUT_FLAG_FAST) {
984         trackFlags |= IAudioFlinger::TRACK_FAST;
985         if (mAudioTrackThread != 0) {
986             tid = mAudioTrackThread->getTid();
987         }
988     }
989 
990     if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
991         trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
992     }
993 
994     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
995                                                       sampleRate,
996                                                       // AudioFlinger only sees 16-bit PCM
997                                                       format == AUDIO_FORMAT_PCM_8_BIT ?
998                                                               AUDIO_FORMAT_PCM_16_BIT : format,
999                                                       mChannelMask,
1000                                                       frameCount,
1001                                                       &trackFlags,
1002                                                       sharedBuffer,
1003                                                       output,
1004                                                       tid,
1005                                                       &mSessionId,
1006                                                       mName,
1007                                                       mClientUid,
1008                                                       &status);
1009 
1010     if (track == 0) {
1011         ALOGE("AudioFlinger could not create track, status: %d", status);
1012         return status;
1013     }
1014     sp<IMemory> iMem = track->getCblk();
1015     if (iMem == 0) {
1016         ALOGE("Could not get control block");
1017         return NO_INIT;
1018     }
1019     // invariant that mAudioTrack != 0 is true only after set() returns successfully
1020     if (mAudioTrack != 0) {
1021         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1022         mDeathNotifier.clear();
1023     }
1024     mAudioTrack = track;
1025     mCblkMemory = iMem;
1026     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
1027     mCblk = cblk;
1028     size_t temp = cblk->frameCount_;
1029     if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1030         // In current design, AudioTrack client checks and ensures frame count validity before
1031         // passing it to AudioFlinger so AudioFlinger should not return a different value except
1032         // for fast track as it uses a special method of assigning frame count.
1033         ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1034     }
1035     frameCount = temp;
1036     mAwaitBoost = false;
1037     if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1038         if (trackFlags & IAudioFlinger::TRACK_FAST) {
1039             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1040             mAwaitBoost = true;
1041             if (sharedBuffer == 0) {
1042                 // Theoretically double-buffering is not required for fast tracks,
1043                 // due to tighter scheduling.  But in practice, to accommodate kernels with
1044                 // scheduling jitter, and apps with computation jitter, we use double-buffering.
1045                 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1046                     mNotificationFramesAct = frameCount/nBuffering;
1047                 }
1048             }
1049         } else {
1050             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1051             // once denied, do not request again if IAudioTrack is re-created
1052             flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1053             mFlags = flags;
1054             if (sharedBuffer == 0) {
1055                 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1056                     mNotificationFramesAct = frameCount/nBuffering;
1057                 }
1058             }
1059         }
1060     }
1061     if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1062         if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1063             ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1064         } else {
1065             ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1066             flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1067             mFlags = flags;
1068             return NO_INIT;
1069         }
1070     }
1071 
1072     mRefreshRemaining = true;
1073 
1074     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1075     // is the value of pointer() for the shared buffer, otherwise buffers points
1076     // immediately after the control block.  This address is for the mapping within client
1077     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1078     void* buffers;
1079     if (sharedBuffer == 0) {
1080         buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1081     } else {
1082         buffers = sharedBuffer->pointer();
1083     }
1084 
1085     mAudioTrack->attachAuxEffect(mAuxEffectId);
1086     // FIXME don't believe this lie
1087     mLatency = afLatency + (1000*frameCount) / sampleRate;
1088     mFrameCount = frameCount;
1089     // If IAudioTrack is re-created, don't let the requested frameCount
1090     // decrease.  This can confuse clients that cache frameCount().
1091     if (frameCount > mReqFrameCount) {
1092         mReqFrameCount = frameCount;
1093     }
1094 
1095     // update proxy
1096     if (sharedBuffer == 0) {
1097         mStaticProxy.clear();
1098         mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1099     } else {
1100         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1101         mProxy = mStaticProxy;
1102     }
1103     mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1104             uint16_t(mVolume[LEFT] * 0x1000));
1105     mProxy->setSendLevel(mSendLevel);
1106     mProxy->setSampleRate(mSampleRate);
1107     mProxy->setEpoch(epoch);
1108     mProxy->setMinimum(mNotificationFramesAct);
1109 
1110     mDeathNotifier = new DeathNotifier(this);
1111     mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1112 
1113     return NO_ERROR;
1114 }
1115 
obtainBuffer(Buffer * audioBuffer,int32_t waitCount)1116 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1117 {
1118     if (audioBuffer == NULL) {
1119         return BAD_VALUE;
1120     }
1121     if (mTransfer != TRANSFER_OBTAIN) {
1122         audioBuffer->frameCount = 0;
1123         audioBuffer->size = 0;
1124         audioBuffer->raw = NULL;
1125         return INVALID_OPERATION;
1126     }
1127 
1128     const struct timespec *requested;
1129     struct timespec timeout;
1130     if (waitCount == -1) {
1131         requested = &ClientProxy::kForever;
1132     } else if (waitCount == 0) {
1133         requested = &ClientProxy::kNonBlocking;
1134     } else if (waitCount > 0) {
1135         long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1136         timeout.tv_sec = ms / 1000;
1137         timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1138         requested = &timeout;
1139     } else {
1140         ALOGE("%s invalid waitCount %d", __func__, waitCount);
1141         requested = NULL;
1142     }
1143     return obtainBuffer(audioBuffer, requested);
1144 }
1145 
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1146 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1147         struct timespec *elapsed, size_t *nonContig)
1148 {
1149     // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1150     uint32_t oldSequence = 0;
1151     uint32_t newSequence;
1152 
1153     Proxy::Buffer buffer;
1154     status_t status = NO_ERROR;
1155 
1156     static const int32_t kMaxTries = 5;
1157     int32_t tryCounter = kMaxTries;
1158 
1159     do {
1160         // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1161         // keep them from going away if another thread re-creates the track during obtainBuffer()
1162         sp<AudioTrackClientProxy> proxy;
1163         sp<IMemory> iMem;
1164 
1165         {   // start of lock scope
1166             AutoMutex lock(mLock);
1167 
1168             newSequence = mSequence;
1169             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1170             if (status == DEAD_OBJECT) {
1171                 // re-create track, unless someone else has already done so
1172                 if (newSequence == oldSequence) {
1173                     status = restoreTrack_l("obtainBuffer");
1174                     if (status != NO_ERROR) {
1175                         buffer.mFrameCount = 0;
1176                         buffer.mRaw = NULL;
1177                         buffer.mNonContig = 0;
1178                         break;
1179                     }
1180                 }
1181             }
1182             oldSequence = newSequence;
1183 
1184             // Keep the extra references
1185             proxy = mProxy;
1186             iMem = mCblkMemory;
1187 
1188             if (mState == STATE_STOPPING) {
1189                 status = -EINTR;
1190                 buffer.mFrameCount = 0;
1191                 buffer.mRaw = NULL;
1192                 buffer.mNonContig = 0;
1193                 break;
1194             }
1195 
1196             // Non-blocking if track is stopped or paused
1197             if (mState != STATE_ACTIVE) {
1198                 requested = &ClientProxy::kNonBlocking;
1199             }
1200 
1201         }   // end of lock scope
1202 
1203         buffer.mFrameCount = audioBuffer->frameCount;
1204         // FIXME starts the requested timeout and elapsed over from scratch
1205         status = proxy->obtainBuffer(&buffer, requested, elapsed);
1206 
1207     } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1208 
1209     audioBuffer->frameCount = buffer.mFrameCount;
1210     audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1211     audioBuffer->raw = buffer.mRaw;
1212     if (nonContig != NULL) {
1213         *nonContig = buffer.mNonContig;
1214     }
1215     return status;
1216 }
1217 
releaseBuffer(Buffer * audioBuffer)1218 void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1219 {
1220     if (mTransfer == TRANSFER_SHARED) {
1221         return;
1222     }
1223 
1224     size_t stepCount = audioBuffer->size / mFrameSizeAF;
1225     if (stepCount == 0) {
1226         return;
1227     }
1228 
1229     Proxy::Buffer buffer;
1230     buffer.mFrameCount = stepCount;
1231     buffer.mRaw = audioBuffer->raw;
1232 
1233     AutoMutex lock(mLock);
1234     mInUnderrun = false;
1235     mProxy->releaseBuffer(&buffer);
1236 
1237     // restart track if it was disabled by audioflinger due to previous underrun
1238     if (mState == STATE_ACTIVE) {
1239         audio_track_cblk_t* cblk = mCblk;
1240         if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1241             ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1242                     this, mName.string());
1243             // FIXME ignoring status
1244             mAudioTrack->start();
1245         }
1246     }
1247 }
1248 
1249 // -------------------------------------------------------------------------
1250 
write(const void * buffer,size_t userSize)1251 ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1252 {
1253     if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1254         return INVALID_OPERATION;
1255     }
1256 
1257     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1258         // Sanity-check: user is most-likely passing an error code, and it would
1259         // make the return value ambiguous (actualSize vs error).
1260         ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1261         return BAD_VALUE;
1262     }
1263 
1264     size_t written = 0;
1265     Buffer audioBuffer;
1266 
1267     while (userSize >= mFrameSize) {
1268         audioBuffer.frameCount = userSize / mFrameSize;
1269 
1270         status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1271         if (err < 0) {
1272             if (written > 0) {
1273                 break;
1274             }
1275             return ssize_t(err);
1276         }
1277 
1278         size_t toWrite;
1279         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1280             // Divide capacity by 2 to take expansion into account
1281             toWrite = audioBuffer.size >> 1;
1282             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1283         } else {
1284             toWrite = audioBuffer.size;
1285             memcpy(audioBuffer.i8, buffer, toWrite);
1286         }
1287         buffer = ((const char *) buffer) + toWrite;
1288         userSize -= toWrite;
1289         written += toWrite;
1290 
1291         releaseBuffer(&audioBuffer);
1292     }
1293 
1294     return written;
1295 }
1296 
1297 // -------------------------------------------------------------------------
1298 
TimedAudioTrack()1299 TimedAudioTrack::TimedAudioTrack() {
1300     mIsTimed = true;
1301 }
1302 
allocateTimedBuffer(size_t size,sp<IMemory> * buffer)1303 status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1304 {
1305     AutoMutex lock(mLock);
1306     status_t result = UNKNOWN_ERROR;
1307 
1308 #if 1
1309     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1310     // while we are accessing the cblk
1311     sp<IAudioTrack> audioTrack = mAudioTrack;
1312     sp<IMemory> iMem = mCblkMemory;
1313 #endif
1314 
1315     // If the track is not invalid already, try to allocate a buffer.  alloc
1316     // fails indicating that the server is dead, flag the track as invalid so
1317     // we can attempt to restore in just a bit.
1318     audio_track_cblk_t* cblk = mCblk;
1319     if (!(cblk->mFlags & CBLK_INVALID)) {
1320         result = mAudioTrack->allocateTimedBuffer(size, buffer);
1321         if (result == DEAD_OBJECT) {
1322             android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1323         }
1324     }
1325 
1326     // If the track is invalid at this point, attempt to restore it. and try the
1327     // allocation one more time.
1328     if (cblk->mFlags & CBLK_INVALID) {
1329         result = restoreTrack_l("allocateTimedBuffer");
1330 
1331         if (result == NO_ERROR) {
1332             result = mAudioTrack->allocateTimedBuffer(size, buffer);
1333         }
1334     }
1335 
1336     return result;
1337 }
1338 
queueTimedBuffer(const sp<IMemory> & buffer,int64_t pts)1339 status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1340                                            int64_t pts)
1341 {
1342     status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1343     {
1344         AutoMutex lock(mLock);
1345         audio_track_cblk_t* cblk = mCblk;
1346         // restart track if it was disabled by audioflinger due to previous underrun
1347         if (buffer->size() != 0 && status == NO_ERROR &&
1348                 (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1349             android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1350             ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1351             // FIXME ignoring status
1352             mAudioTrack->start();
1353         }
1354     }
1355     return status;
1356 }
1357 
setMediaTimeTransform(const LinearTransform & xform,TargetTimeline target)1358 status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1359                                                 TargetTimeline target)
1360 {
1361     return mAudioTrack->setMediaTimeTransform(xform, target);
1362 }
1363 
1364 // -------------------------------------------------------------------------
1365 
processAudioBuffer(const sp<AudioTrackThread> & thread)1366 nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1367 {
1368     // Currently the AudioTrack thread is not created if there are no callbacks.
1369     // Would it ever make sense to run the thread, even without callbacks?
1370     // If so, then replace this by checks at each use for mCbf != NULL.
1371     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1372 
1373     mLock.lock();
1374     if (mAwaitBoost) {
1375         mAwaitBoost = false;
1376         mLock.unlock();
1377         static const int32_t kMaxTries = 5;
1378         int32_t tryCounter = kMaxTries;
1379         uint32_t pollUs = 10000;
1380         do {
1381             int policy = sched_getscheduler(0);
1382             if (policy == SCHED_FIFO || policy == SCHED_RR) {
1383                 break;
1384             }
1385             usleep(pollUs);
1386             pollUs <<= 1;
1387         } while (tryCounter-- > 0);
1388         if (tryCounter < 0) {
1389             ALOGE("did not receive expected priority boost on time");
1390         }
1391         // Run again immediately
1392         return 0;
1393     }
1394 
1395     // Can only reference mCblk while locked
1396     int32_t flags = android_atomic_and(
1397         ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1398 
1399     // Check for track invalidation
1400     if (flags & CBLK_INVALID) {
1401         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1402         // AudioSystem cache. We should not exit here but after calling the callback so
1403         // that the upper layers can recreate the track
1404         if (!isOffloaded() || (mSequence == mObservedSequence)) {
1405             status_t status = restoreTrack_l("processAudioBuffer");
1406             mLock.unlock();
1407             // Run again immediately, but with a new IAudioTrack
1408             return 0;
1409         }
1410     }
1411 
1412     bool waitStreamEnd = mState == STATE_STOPPING;
1413     bool active = mState == STATE_ACTIVE;
1414 
1415     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1416     bool newUnderrun = false;
1417     if (flags & CBLK_UNDERRUN) {
1418 #if 0
1419         // Currently in shared buffer mode, when the server reaches the end of buffer,
1420         // the track stays active in continuous underrun state.  It's up to the application
1421         // to pause or stop the track, or set the position to a new offset within buffer.
1422         // This was some experimental code to auto-pause on underrun.   Keeping it here
1423         // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1424         if (mTransfer == TRANSFER_SHARED) {
1425             mState = STATE_PAUSED;
1426             active = false;
1427         }
1428 #endif
1429         if (!mInUnderrun) {
1430             mInUnderrun = true;
1431             newUnderrun = true;
1432         }
1433     }
1434 
1435     // Get current position of server
1436     size_t position = mProxy->getPosition();
1437 
1438     // Manage marker callback
1439     bool markerReached = false;
1440     size_t markerPosition = mMarkerPosition;
1441     // FIXME fails for wraparound, need 64 bits
1442     if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1443         mMarkerReached = markerReached = true;
1444     }
1445 
1446     // Determine number of new position callback(s) that will be needed, while locked
1447     size_t newPosCount = 0;
1448     size_t newPosition = mNewPosition;
1449     size_t updatePeriod = mUpdatePeriod;
1450     // FIXME fails for wraparound, need 64 bits
1451     if (updatePeriod > 0 && position >= newPosition) {
1452         newPosCount = ((position - newPosition) / updatePeriod) + 1;
1453         mNewPosition += updatePeriod * newPosCount;
1454     }
1455 
1456     // Cache other fields that will be needed soon
1457     uint32_t loopPeriod = mLoopPeriod;
1458     uint32_t sampleRate = mSampleRate;
1459     size_t notificationFrames = mNotificationFramesAct;
1460     if (mRefreshRemaining) {
1461         mRefreshRemaining = false;
1462         mRemainingFrames = notificationFrames;
1463         mRetryOnPartialBuffer = false;
1464     }
1465     size_t misalignment = mProxy->getMisalignment();
1466     uint32_t sequence = mSequence;
1467     sp<AudioTrackClientProxy> proxy = mProxy;
1468 
1469     // These fields don't need to be cached, because they are assigned only by set():
1470     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1471     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1472 
1473     mLock.unlock();
1474 
1475     if (waitStreamEnd) {
1476         struct timespec timeout;
1477         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1478         timeout.tv_nsec = 0;
1479 
1480         status_t status = proxy->waitStreamEndDone(&timeout);
1481         switch (status) {
1482         case NO_ERROR:
1483         case DEAD_OBJECT:
1484         case TIMED_OUT:
1485             mCbf(EVENT_STREAM_END, mUserData, NULL);
1486             {
1487                 AutoMutex lock(mLock);
1488                 // The previously assigned value of waitStreamEnd is no longer valid,
1489                 // since the mutex has been unlocked and either the callback handler
1490                 // or another thread could have re-started the AudioTrack during that time.
1491                 waitStreamEnd = mState == STATE_STOPPING;
1492                 if (waitStreamEnd) {
1493                     mState = STATE_STOPPED;
1494                 }
1495             }
1496             if (waitStreamEnd && status != DEAD_OBJECT) {
1497                return NS_INACTIVE;
1498             }
1499             break;
1500         }
1501         return 0;
1502     }
1503 
1504     // perform callbacks while unlocked
1505     if (newUnderrun) {
1506         mCbf(EVENT_UNDERRUN, mUserData, NULL);
1507     }
1508     // FIXME we will miss loops if loop cycle was signaled several times since last call
1509     //       to processAudioBuffer()
1510     if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1511         mCbf(EVENT_LOOP_END, mUserData, NULL);
1512     }
1513     if (flags & CBLK_BUFFER_END) {
1514         mCbf(EVENT_BUFFER_END, mUserData, NULL);
1515     }
1516     if (markerReached) {
1517         mCbf(EVENT_MARKER, mUserData, &markerPosition);
1518     }
1519     while (newPosCount > 0) {
1520         size_t temp = newPosition;
1521         mCbf(EVENT_NEW_POS, mUserData, &temp);
1522         newPosition += updatePeriod;
1523         newPosCount--;
1524     }
1525 
1526     if (mObservedSequence != sequence) {
1527         mObservedSequence = sequence;
1528         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1529         // for offloaded tracks, just wait for the upper layers to recreate the track
1530         if (isOffloaded()) {
1531             return NS_INACTIVE;
1532         }
1533     }
1534 
1535     // if inactive, then don't run me again until re-started
1536     if (!active) {
1537         return NS_INACTIVE;
1538     }
1539 
1540     // Compute the estimated time until the next timed event (position, markers, loops)
1541     // FIXME only for non-compressed audio
1542     uint32_t minFrames = ~0;
1543     if (!markerReached && position < markerPosition) {
1544         minFrames = markerPosition - position;
1545     }
1546     if (loopPeriod > 0 && loopPeriod < minFrames) {
1547         minFrames = loopPeriod;
1548     }
1549     if (updatePeriod > 0 && updatePeriod < minFrames) {
1550         minFrames = updatePeriod;
1551     }
1552 
1553     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1554     static const uint32_t kPoll = 0;
1555     if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1556         minFrames = kPoll * notificationFrames;
1557     }
1558 
1559     // Convert frame units to time units
1560     nsecs_t ns = NS_WHENEVER;
1561     if (minFrames != (uint32_t) ~0) {
1562         // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1563         static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1564         ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1565     }
1566 
1567     // If not supplying data by EVENT_MORE_DATA, then we're done
1568     if (mTransfer != TRANSFER_CALLBACK) {
1569         return ns;
1570     }
1571 
1572     struct timespec timeout;
1573     const struct timespec *requested = &ClientProxy::kForever;
1574     if (ns != NS_WHENEVER) {
1575         timeout.tv_sec = ns / 1000000000LL;
1576         timeout.tv_nsec = ns % 1000000000LL;
1577         ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1578         requested = &timeout;
1579     }
1580 
1581     while (mRemainingFrames > 0) {
1582 
1583         Buffer audioBuffer;
1584         audioBuffer.frameCount = mRemainingFrames;
1585         size_t nonContig;
1586         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1587         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1588                 "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1589         requested = &ClientProxy::kNonBlocking;
1590         size_t avail = audioBuffer.frameCount + nonContig;
1591         ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1592                 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1593         if (err != NO_ERROR) {
1594             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1595                     (isOffloaded() && (err == DEAD_OBJECT))) {
1596                 return 0;
1597             }
1598             ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1599             return NS_NEVER;
1600         }
1601 
1602         if (mRetryOnPartialBuffer && !isOffloaded()) {
1603             mRetryOnPartialBuffer = false;
1604             if (avail < mRemainingFrames) {
1605                 int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1606                 if (ns < 0 || myns < ns) {
1607                     ns = myns;
1608                 }
1609                 return ns;
1610             }
1611         }
1612 
1613         // Divide buffer size by 2 to take into account the expansion
1614         // due to 8 to 16 bit conversion: the callback must fill only half
1615         // of the destination buffer
1616         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1617             audioBuffer.size >>= 1;
1618         }
1619 
1620         size_t reqSize = audioBuffer.size;
1621         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1622         size_t writtenSize = audioBuffer.size;
1623         size_t writtenFrames = writtenSize / mFrameSize;
1624 
1625         // Sanity check on returned size
1626         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1627             ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1628                     reqSize, (int) writtenSize);
1629             return NS_NEVER;
1630         }
1631 
1632         if (writtenSize == 0) {
1633             // The callback is done filling buffers
1634             // Keep this thread going to handle timed events and
1635             // still try to get more data in intervals of WAIT_PERIOD_MS
1636             // but don't just loop and block the CPU, so wait
1637             return WAIT_PERIOD_MS * 1000000LL;
1638         }
1639 
1640         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1641             // 8 to 16 bit conversion, note that source and destination are the same address
1642             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1643             audioBuffer.size <<= 1;
1644         }
1645 
1646         size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1647         audioBuffer.frameCount = releasedFrames;
1648         mRemainingFrames -= releasedFrames;
1649         if (misalignment >= releasedFrames) {
1650             misalignment -= releasedFrames;
1651         } else {
1652             misalignment = 0;
1653         }
1654 
1655         releaseBuffer(&audioBuffer);
1656 
1657         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1658         // if callback doesn't like to accept the full chunk
1659         if (writtenSize < reqSize) {
1660             continue;
1661         }
1662 
1663         // There could be enough non-contiguous frames available to satisfy the remaining request
1664         if (mRemainingFrames <= nonContig) {
1665             continue;
1666         }
1667 
1668 #if 0
1669         // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1670         // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1671         // that total to a sum == notificationFrames.
1672         if (0 < misalignment && misalignment <= mRemainingFrames) {
1673             mRemainingFrames = misalignment;
1674             return (mRemainingFrames * 1100000000LL) / sampleRate;
1675         }
1676 #endif
1677 
1678     }
1679     mRemainingFrames = notificationFrames;
1680     mRetryOnPartialBuffer = true;
1681 
1682     // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1683     return 0;
1684 }
1685 
restoreTrack_l(const char * from)1686 status_t AudioTrack::restoreTrack_l(const char *from)
1687 {
1688     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1689           isOffloaded() ? "Offloaded" : "PCM", from);
1690     ++mSequence;
1691     status_t result;
1692 
1693     // refresh the audio configuration cache in this process to make sure we get new
1694     // output parameters in getOutput_l() and createTrack_l()
1695     AudioSystem::clearAudioConfigCache();
1696 
1697     if (isOffloaded()) {
1698         return DEAD_OBJECT;
1699     }
1700 
1701     // force new output query from audio policy manager;
1702     mOutput = 0;
1703     audio_io_handle_t output = getOutput_l();
1704 
1705     // if the new IAudioTrack is created, createTrack_l() will modify the
1706     // following member variables: mAudioTrack, mCblkMemory and mCblk.
1707     // It will also delete the strong references on previous IAudioTrack and IMemory
1708 
1709     // take the frames that will be lost by track recreation into account in saved position
1710     size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1711     size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1712     result = createTrack_l(mStreamType,
1713                            mSampleRate,
1714                            mFormat,
1715                            mReqFrameCount,  // so that frame count never goes down
1716                            mFlags,
1717                            mSharedBuffer,
1718                            output,
1719                            position /*epoch*/);
1720 
1721     if (result == NO_ERROR) {
1722         // continue playback from last known position, but
1723         // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1724         if (mStaticProxy != NULL) {
1725             mLoopPeriod = 0;
1726             mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1727         }
1728         // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1729         //       track destruction have been played? This is critical for SoundPool implementation
1730         //       This must be broken, and needs to be tested/debugged.
1731 #if 0
1732         // restore write index and set other indexes to reflect empty buffer status
1733         if (!strcmp(from, "start")) {
1734             // Make sure that a client relying on callback events indicating underrun or
1735             // the actual amount of audio frames played (e.g SoundPool) receives them.
1736             if (mSharedBuffer == 0) {
1737                 // restart playback even if buffer is not completely filled.
1738                 android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1739             }
1740         }
1741 #endif
1742         if (mState == STATE_ACTIVE) {
1743             result = mAudioTrack->start();
1744         }
1745     }
1746     if (result != NO_ERROR) {
1747         //Use of direct and offloaded output streams is ref counted by audio policy manager.
1748         // As getOutput was called above and resulted in an output stream to be opened,
1749         // we need to release it.
1750         AudioSystem::releaseOutput(output);
1751         ALOGW("restoreTrack_l() failed status %d", result);
1752         mState = STATE_STOPPED;
1753     }
1754 
1755     return result;
1756 }
1757 
setParameters(const String8 & keyValuePairs)1758 status_t AudioTrack::setParameters(const String8& keyValuePairs)
1759 {
1760     AutoMutex lock(mLock);
1761     return mAudioTrack->setParameters(keyValuePairs);
1762 }
1763 
getTimestamp(AudioTimestamp & timestamp)1764 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1765 {
1766     AutoMutex lock(mLock);
1767     // FIXME not implemented for fast tracks; should use proxy and SSQ
1768     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1769         return INVALID_OPERATION;
1770     }
1771     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1772         return INVALID_OPERATION;
1773     }
1774     status_t status = mAudioTrack->getTimestamp(timestamp);
1775     if (status == NO_ERROR) {
1776         timestamp.mPosition += mProxy->getEpoch();
1777     }
1778     return status;
1779 }
1780 
getParameters(const String8 & keys)1781 String8 AudioTrack::getParameters(const String8& keys)
1782 {
1783     if (mOutput) {
1784         return AudioSystem::getParameters(mOutput, keys);
1785     } else {
1786         return String8::empty();
1787     }
1788 }
1789 
dump(int fd,const Vector<String16> & args) const1790 status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1791 {
1792 
1793     const size_t SIZE = 256;
1794     char buffer[SIZE];
1795     String8 result;
1796 
1797     result.append(" AudioTrack::dump\n");
1798     snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1799             mVolume[0], mVolume[1]);
1800     result.append(buffer);
1801     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1802             mChannelCount, mFrameCount);
1803     result.append(buffer);
1804     snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1805     result.append(buffer);
1806     snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1807     result.append(buffer);
1808     ::write(fd, result.string(), result.size());
1809     return NO_ERROR;
1810 }
1811 
getUnderrunFrames() const1812 uint32_t AudioTrack::getUnderrunFrames() const
1813 {
1814     AutoMutex lock(mLock);
1815     return mProxy->getUnderrunFrames();
1816 }
1817 
1818 // =========================================================================
1819 
binderDied(const wp<IBinder> & who)1820 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1821 {
1822     sp<AudioTrack> audioTrack = mAudioTrack.promote();
1823     if (audioTrack != 0) {
1824         AutoMutex lock(audioTrack->mLock);
1825         audioTrack->mProxy->binderDied();
1826     }
1827 }
1828 
1829 // =========================================================================
1830 
AudioTrackThread(AudioTrack & receiver,bool bCanCallJava)1831 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1832     : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1833       mIgnoreNextPausedInt(false)
1834 {
1835 }
1836 
~AudioTrackThread()1837 AudioTrack::AudioTrackThread::~AudioTrackThread()
1838 {
1839 }
1840 
threadLoop()1841 bool AudioTrack::AudioTrackThread::threadLoop()
1842 {
1843     {
1844         AutoMutex _l(mMyLock);
1845         if (mPaused) {
1846             mMyCond.wait(mMyLock);
1847             // caller will check for exitPending()
1848             return true;
1849         }
1850         if (mIgnoreNextPausedInt) {
1851             mIgnoreNextPausedInt = false;
1852             mPausedInt = false;
1853         }
1854         if (mPausedInt) {
1855             if (mPausedNs > 0) {
1856                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1857             } else {
1858                 mMyCond.wait(mMyLock);
1859             }
1860             mPausedInt = false;
1861             return true;
1862         }
1863     }
1864     nsecs_t ns = mReceiver.processAudioBuffer(this);
1865     switch (ns) {
1866     case 0:
1867         return true;
1868     case NS_INACTIVE:
1869         pauseInternal();
1870         return true;
1871     case NS_NEVER:
1872         return false;
1873     case NS_WHENEVER:
1874         // FIXME increase poll interval, or make event-driven
1875         ns = 1000000000LL;
1876         // fall through
1877     default:
1878         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1879         pauseInternal(ns);
1880         return true;
1881     }
1882 }
1883 
requestExit()1884 void AudioTrack::AudioTrackThread::requestExit()
1885 {
1886     // must be in this order to avoid a race condition
1887     Thread::requestExit();
1888     resume();
1889 }
1890 
pause()1891 void AudioTrack::AudioTrackThread::pause()
1892 {
1893     AutoMutex _l(mMyLock);
1894     mPaused = true;
1895 }
1896 
resume()1897 void AudioTrack::AudioTrackThread::resume()
1898 {
1899     AutoMutex _l(mMyLock);
1900     mIgnoreNextPausedInt = true;
1901     if (mPaused || mPausedInt) {
1902         mPaused = false;
1903         mPausedInt = false;
1904         mMyCond.signal();
1905     }
1906 }
1907 
pauseInternal(nsecs_t ns)1908 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1909 {
1910     AutoMutex _l(mMyLock);
1911     mPausedInt = true;
1912     mPausedNs = ns;
1913 }
1914 
1915 }; // namespace android
1916