• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20 
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24 
25 #include <audio_utils/primitives.h>
26 #include <binder/IPCThreadState.h>
27 #include <media/AudioTrack.h>
28 #include <utils/Log.h>
29 #include <private/media/AudioTrackShared.h>
30 #include <media/IAudioFlinger.h>
31 #include <media/AudioResamplerPublic.h>
32 
33 #define WAIT_PERIOD_MS                  10
34 #define WAIT_STREAM_END_TIMEOUT_SEC     120
35 
36 
37 namespace android {
38 // ---------------------------------------------------------------------------
39 
convertTimespecToUs(const struct timespec & tv)40 static int64_t convertTimespecToUs(const struct timespec &tv)
41 {
42     return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
43 }
44 
45 // current monotonic time in microseconds.
getNowUs()46 static int64_t getNowUs()
47 {
48     struct timespec tv;
49     (void) clock_gettime(CLOCK_MONOTONIC, &tv);
50     return convertTimespecToUs(tv);
51 }
52 
53 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)54 status_t AudioTrack::getMinFrameCount(
55         size_t* frameCount,
56         audio_stream_type_t streamType,
57         uint32_t sampleRate)
58 {
59     if (frameCount == NULL) {
60         return BAD_VALUE;
61     }
62 
63     // FIXME merge with similar code in createTrack_l(), except we're missing
64     //       some information here that is available in createTrack_l():
65     //          audio_io_handle_t output
66     //          audio_format_t format
67     //          audio_channel_mask_t channelMask
68     //          audio_output_flags_t flags
69     uint32_t afSampleRate;
70     status_t status;
71     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
72     if (status != NO_ERROR) {
73         ALOGE("Unable to query output sample rate for stream type %d; status %d",
74                 streamType, status);
75         return status;
76     }
77     size_t afFrameCount;
78     status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
79     if (status != NO_ERROR) {
80         ALOGE("Unable to query output frame count for stream type %d; status %d",
81                 streamType, status);
82         return status;
83     }
84     uint32_t afLatency;
85     status = AudioSystem::getOutputLatency(&afLatency, streamType);
86     if (status != NO_ERROR) {
87         ALOGE("Unable to query output latency for stream type %d; status %d",
88                 streamType, status);
89         return status;
90     }
91 
92     // Ensure that buffer depth covers at least audio hardware latency
93     uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
94     if (minBufCount < 2) {
95         minBufCount = 2;
96     }
97 
98     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
99             afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
100     // The formula above should always produce a non-zero value, but return an error
101     // in the unlikely event that it does not, as that's part of the API contract.
102     if (*frameCount == 0) {
103         ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
104                 streamType, sampleRate);
105         return BAD_VALUE;
106     }
107     ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
108             *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
109     return NO_ERROR;
110 }
111 
112 // ---------------------------------------------------------------------------
113 
AudioTrack()114 AudioTrack::AudioTrack()
115     : mStatus(NO_INIT),
116       mIsTimed(false),
117       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
118       mPreviousSchedulingGroup(SP_DEFAULT),
119       mPausedPosition(0)
120 {
121     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
122     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
123     mAttributes.flags = 0x0;
124     strcpy(mAttributes.tags, "");
125 }
126 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,uint32_t notificationFrames,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes)127 AudioTrack::AudioTrack(
128         audio_stream_type_t streamType,
129         uint32_t sampleRate,
130         audio_format_t format,
131         audio_channel_mask_t channelMask,
132         size_t frameCount,
133         audio_output_flags_t flags,
134         callback_t cbf,
135         void* user,
136         uint32_t notificationFrames,
137         int sessionId,
138         transfer_type transferType,
139         const audio_offload_info_t *offloadInfo,
140         int uid,
141         pid_t pid,
142         const audio_attributes_t* pAttributes)
143     : mStatus(NO_INIT),
144       mIsTimed(false),
145       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
146       mPreviousSchedulingGroup(SP_DEFAULT),
147       mPausedPosition(0)
148 {
149     mStatus = set(streamType, sampleRate, format, channelMask,
150             frameCount, flags, cbf, user, notificationFrames,
151             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
152             offloadInfo, uid, pid, pAttributes);
153 }
154 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,uint32_t notificationFrames,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes)155 AudioTrack::AudioTrack(
156         audio_stream_type_t streamType,
157         uint32_t sampleRate,
158         audio_format_t format,
159         audio_channel_mask_t channelMask,
160         const sp<IMemory>& sharedBuffer,
161         audio_output_flags_t flags,
162         callback_t cbf,
163         void* user,
164         uint32_t notificationFrames,
165         int sessionId,
166         transfer_type transferType,
167         const audio_offload_info_t *offloadInfo,
168         int uid,
169         pid_t pid,
170         const audio_attributes_t* pAttributes)
171     : mStatus(NO_INIT),
172       mIsTimed(false),
173       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
174       mPreviousSchedulingGroup(SP_DEFAULT),
175       mPausedPosition(0)
176 {
177     mStatus = set(streamType, sampleRate, format, channelMask,
178             0 /*frameCount*/, flags, cbf, user, notificationFrames,
179             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
180             uid, pid, pAttributes);
181 }
182 
~AudioTrack()183 AudioTrack::~AudioTrack()
184 {
185     if (mStatus == NO_ERROR) {
186         // Make sure that callback function exits in the case where
187         // it is looping on buffer full condition in obtainBuffer().
188         // Otherwise the callback thread will never exit.
189         stop();
190         if (mAudioTrackThread != 0) {
191             mProxy->interrupt();
192             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
193             mAudioTrackThread->requestExitAndWait();
194             mAudioTrackThread.clear();
195         }
196         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
197         mAudioTrack.clear();
198         mCblkMemory.clear();
199         mSharedBuffer.clear();
200         IPCThreadState::self()->flushCommands();
201         ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
202                 IPCThreadState::self()->getCallingPid(), mClientPid);
203         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
204     }
205 }
206 
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,uint32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes)207 status_t AudioTrack::set(
208         audio_stream_type_t streamType,
209         uint32_t sampleRate,
210         audio_format_t format,
211         audio_channel_mask_t channelMask,
212         size_t frameCount,
213         audio_output_flags_t flags,
214         callback_t cbf,
215         void* user,
216         uint32_t notificationFrames,
217         const sp<IMemory>& sharedBuffer,
218         bool threadCanCallJava,
219         int sessionId,
220         transfer_type transferType,
221         const audio_offload_info_t *offloadInfo,
222         int uid,
223         pid_t pid,
224         const audio_attributes_t* pAttributes)
225 {
226     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
227           "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
228           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
229           sessionId, transferType);
230 
231     switch (transferType) {
232     case TRANSFER_DEFAULT:
233         if (sharedBuffer != 0) {
234             transferType = TRANSFER_SHARED;
235         } else if (cbf == NULL || threadCanCallJava) {
236             transferType = TRANSFER_SYNC;
237         } else {
238             transferType = TRANSFER_CALLBACK;
239         }
240         break;
241     case TRANSFER_CALLBACK:
242         if (cbf == NULL || sharedBuffer != 0) {
243             ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
244             return BAD_VALUE;
245         }
246         break;
247     case TRANSFER_OBTAIN:
248     case TRANSFER_SYNC:
249         if (sharedBuffer != 0) {
250             ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
251             return BAD_VALUE;
252         }
253         break;
254     case TRANSFER_SHARED:
255         if (sharedBuffer == 0) {
256             ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
257             return BAD_VALUE;
258         }
259         break;
260     default:
261         ALOGE("Invalid transfer type %d", transferType);
262         return BAD_VALUE;
263     }
264     mSharedBuffer = sharedBuffer;
265     mTransfer = transferType;
266 
267     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
268             sharedBuffer->size());
269 
270     ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
271 
272     AutoMutex lock(mLock);
273 
274     // invariant that mAudioTrack != 0 is true only after set() returns successfully
275     if (mAudioTrack != 0) {
276         ALOGE("Track already in use");
277         return INVALID_OPERATION;
278     }
279 
280     // handle default values first.
281     if (streamType == AUDIO_STREAM_DEFAULT) {
282         streamType = AUDIO_STREAM_MUSIC;
283     }
284 
285     if (pAttributes == NULL) {
286         if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
287             ALOGE("Invalid stream type %d", streamType);
288             return BAD_VALUE;
289         }
290         setAttributesFromStreamType(streamType);
291         mStreamType = streamType;
292     } else {
293         if (!isValidAttributes(pAttributes)) {
294             ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
295                 pAttributes->usage, pAttributes->content_type, pAttributes->flags,
296                 pAttributes->tags);
297         }
298         // stream type shouldn't be looked at, this track has audio attributes
299         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
300         setStreamTypeFromAttributes(mAttributes);
301         ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
302                 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
303     }
304 
305     status_t status;
306     if (sampleRate == 0) {
307         status = AudioSystem::getOutputSamplingRateForAttr(&sampleRate, &mAttributes);
308         if (status != NO_ERROR) {
309             ALOGE("Could not get output sample rate for stream type %d; status %d",
310                     mStreamType, status);
311             return status;
312         }
313     }
314     mSampleRate = sampleRate;
315 
316     // these below should probably come from the audioFlinger too...
317     if (format == AUDIO_FORMAT_DEFAULT) {
318         format = AUDIO_FORMAT_PCM_16_BIT;
319     }
320 
321     // validate parameters
322     if (!audio_is_valid_format(format)) {
323         ALOGE("Invalid format %#x", format);
324         return BAD_VALUE;
325     }
326     mFormat = format;
327 
328     if (!audio_is_output_channel(channelMask)) {
329         ALOGE("Invalid channel mask %#x", channelMask);
330         return BAD_VALUE;
331     }
332     mChannelMask = channelMask;
333     uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
334     mChannelCount = channelCount;
335 
336     // AudioFlinger does not currently support 8-bit data in shared memory
337     if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
338         ALOGE("8-bit data in shared memory is not supported");
339         return BAD_VALUE;
340     }
341 
342     // force direct flag if format is not linear PCM
343     // or offload was requested
344     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
345             || !audio_is_linear_pcm(format)) {
346         ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
347                     ? "Offload request, forcing to Direct Output"
348                     : "Not linear PCM, forcing to Direct Output");
349         flags = (audio_output_flags_t)
350                 // FIXME why can't we allow direct AND fast?
351                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
352     }
353     // only allow deep buffering for music stream type
354     if (mStreamType != AUDIO_STREAM_MUSIC) {
355         flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
356     }
357 
358     if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
359         if (audio_is_linear_pcm(format)) {
360             mFrameSize = channelCount * audio_bytes_per_sample(format);
361         } else {
362             mFrameSize = sizeof(uint8_t);
363         }
364         mFrameSizeAF = mFrameSize;
365     } else {
366         ALOG_ASSERT(audio_is_linear_pcm(format));
367         mFrameSize = channelCount * audio_bytes_per_sample(format);
368         mFrameSizeAF = channelCount * audio_bytes_per_sample(
369                 format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
370         // createTrack will return an error if PCM format is not supported by server,
371         // so no need to check for specific PCM formats here
372     }
373 
374     // Make copy of input parameter offloadInfo so that in the future:
375     //  (a) createTrack_l doesn't need it as an input parameter
376     //  (b) we can support re-creation of offloaded tracks
377     if (offloadInfo != NULL) {
378         mOffloadInfoCopy = *offloadInfo;
379         mOffloadInfo = &mOffloadInfoCopy;
380     } else {
381         mOffloadInfo = NULL;
382     }
383 
384     mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
385     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
386     mSendLevel = 0.0f;
387     // mFrameCount is initialized in createTrack_l
388     mReqFrameCount = frameCount;
389     mNotificationFramesReq = notificationFrames;
390     mNotificationFramesAct = 0;
391     mSessionId = sessionId;
392     int callingpid = IPCThreadState::self()->getCallingPid();
393     int mypid = getpid();
394     if (uid == -1 || (callingpid != mypid)) {
395         mClientUid = IPCThreadState::self()->getCallingUid();
396     } else {
397         mClientUid = uid;
398     }
399     if (pid == -1 || (callingpid != mypid)) {
400         mClientPid = callingpid;
401     } else {
402         mClientPid = pid;
403     }
404     mAuxEffectId = 0;
405     mFlags = flags;
406     mCbf = cbf;
407 
408     if (cbf != NULL) {
409         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
410         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
411     }
412 
413     // create the IAudioTrack
414     status = createTrack_l();
415 
416     if (status != NO_ERROR) {
417         if (mAudioTrackThread != 0) {
418             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
419             mAudioTrackThread->requestExitAndWait();
420             mAudioTrackThread.clear();
421         }
422         return status;
423     }
424 
425     mStatus = NO_ERROR;
426     mState = STATE_STOPPED;
427     mUserData = user;
428     mLoopPeriod = 0;
429     mMarkerPosition = 0;
430     mMarkerReached = false;
431     mNewPosition = 0;
432     mUpdatePeriod = 0;
433     mServer = 0;
434     mPosition = 0;
435     mReleased = 0;
436     mStartUs = 0;
437     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
438     mSequence = 1;
439     mObservedSequence = mSequence;
440     mInUnderrun = false;
441 
442     return NO_ERROR;
443 }
444 
445 // -------------------------------------------------------------------------
446 
start()447 status_t AudioTrack::start()
448 {
449     AutoMutex lock(mLock);
450 
451     if (mState == STATE_ACTIVE) {
452         return INVALID_OPERATION;
453     }
454 
455     mInUnderrun = true;
456 
457     State previousState = mState;
458     if (previousState == STATE_PAUSED_STOPPING) {
459         mState = STATE_STOPPING;
460     } else {
461         mState = STATE_ACTIVE;
462     }
463     (void) updateAndGetPosition_l();
464     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
465         // reset current position as seen by client to 0
466         mPosition = 0;
467         // For offloaded tracks, we don't know if the hardware counters are really zero here,
468         // since the flush is asynchronous and stop may not fully drain.
469         // We save the time when the track is started to later verify whether
470         // the counters are realistic (i.e. start from zero after this time).
471         mStartUs = getNowUs();
472 
473         // force refresh of remaining frames by processAudioBuffer() as last
474         // write before stop could be partial.
475         mRefreshRemaining = true;
476     }
477     mNewPosition = mPosition + mUpdatePeriod;
478     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
479 
480     sp<AudioTrackThread> t = mAudioTrackThread;
481     if (t != 0) {
482         if (previousState == STATE_STOPPING) {
483             mProxy->interrupt();
484         } else {
485             t->resume();
486         }
487     } else {
488         mPreviousPriority = getpriority(PRIO_PROCESS, 0);
489         get_sched_policy(0, &mPreviousSchedulingGroup);
490         androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
491     }
492 
493     status_t status = NO_ERROR;
494     if (!(flags & CBLK_INVALID)) {
495         status = mAudioTrack->start();
496         if (status == DEAD_OBJECT) {
497             flags |= CBLK_INVALID;
498         }
499     }
500     if (flags & CBLK_INVALID) {
501         status = restoreTrack_l("start");
502     }
503 
504     if (status != NO_ERROR) {
505         ALOGE("start() status %d", status);
506         mState = previousState;
507         if (t != 0) {
508             if (previousState != STATE_STOPPING) {
509                 t->pause();
510             }
511         } else {
512             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
513             set_sched_policy(0, mPreviousSchedulingGroup);
514         }
515     }
516 
517     return status;
518 }
519 
stop()520 void AudioTrack::stop()
521 {
522     AutoMutex lock(mLock);
523     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
524         return;
525     }
526 
527     if (isOffloaded_l()) {
528         mState = STATE_STOPPING;
529     } else {
530         mState = STATE_STOPPED;
531         mReleased = 0;
532     }
533 
534     mProxy->interrupt();
535     mAudioTrack->stop();
536     // the playback head position will reset to 0, so if a marker is set, we need
537     // to activate it again
538     mMarkerReached = false;
539 #if 0
540     // Force flush if a shared buffer is used otherwise audioflinger
541     // will not stop before end of buffer is reached.
542     // It may be needed to make sure that we stop playback, likely in case looping is on.
543     if (mSharedBuffer != 0) {
544         flush_l();
545     }
546 #endif
547 
548     sp<AudioTrackThread> t = mAudioTrackThread;
549     if (t != 0) {
550         if (!isOffloaded_l()) {
551             t->pause();
552         }
553     } else {
554         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
555         set_sched_policy(0, mPreviousSchedulingGroup);
556     }
557 }
558 
stopped() const559 bool AudioTrack::stopped() const
560 {
561     AutoMutex lock(mLock);
562     return mState != STATE_ACTIVE;
563 }
564 
flush()565 void AudioTrack::flush()
566 {
567     if (mSharedBuffer != 0) {
568         return;
569     }
570     AutoMutex lock(mLock);
571     if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
572         return;
573     }
574     flush_l();
575 }
576 
flush_l()577 void AudioTrack::flush_l()
578 {
579     ALOG_ASSERT(mState != STATE_ACTIVE);
580 
581     // clear playback marker and periodic update counter
582     mMarkerPosition = 0;
583     mMarkerReached = false;
584     mUpdatePeriod = 0;
585     mRefreshRemaining = true;
586 
587     mState = STATE_FLUSHED;
588     mReleased = 0;
589     if (isOffloaded_l()) {
590         mProxy->interrupt();
591     }
592     mProxy->flush();
593     mAudioTrack->flush();
594 }
595 
pause()596 void AudioTrack::pause()
597 {
598     AutoMutex lock(mLock);
599     if (mState == STATE_ACTIVE) {
600         mState = STATE_PAUSED;
601     } else if (mState == STATE_STOPPING) {
602         mState = STATE_PAUSED_STOPPING;
603     } else {
604         return;
605     }
606     mProxy->interrupt();
607     mAudioTrack->pause();
608 
609     if (isOffloaded_l()) {
610         if (mOutput != AUDIO_IO_HANDLE_NONE) {
611             // An offload output can be re-used between two audio tracks having
612             // the same configuration. A timestamp query for a paused track
613             // while the other is running would return an incorrect time.
614             // To fix this, cache the playback position on a pause() and return
615             // this time when requested until the track is resumed.
616 
617             // OffloadThread sends HAL pause in its threadLoop. Time saved
618             // here can be slightly off.
619 
620             // TODO: check return code for getRenderPosition.
621 
622             uint32_t halFrames;
623             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
624             ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
625         }
626     }
627 }
628 
setVolume(float left,float right)629 status_t AudioTrack::setVolume(float left, float right)
630 {
631     // This duplicates a test by AudioTrack JNI, but that is not the only caller
632     if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
633             isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
634         return BAD_VALUE;
635     }
636 
637     AutoMutex lock(mLock);
638     mVolume[AUDIO_INTERLEAVE_LEFT] = left;
639     mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
640 
641     mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
642 
643     if (isOffloaded_l()) {
644         mAudioTrack->signal();
645     }
646     return NO_ERROR;
647 }
648 
setVolume(float volume)649 status_t AudioTrack::setVolume(float volume)
650 {
651     return setVolume(volume, volume);
652 }
653 
setAuxEffectSendLevel(float level)654 status_t AudioTrack::setAuxEffectSendLevel(float level)
655 {
656     // This duplicates a test by AudioTrack JNI, but that is not the only caller
657     if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
658         return BAD_VALUE;
659     }
660 
661     AutoMutex lock(mLock);
662     mSendLevel = level;
663     mProxy->setSendLevel(level);
664 
665     return NO_ERROR;
666 }
667 
getAuxEffectSendLevel(float * level) const668 void AudioTrack::getAuxEffectSendLevel(float* level) const
669 {
670     if (level != NULL) {
671         *level = mSendLevel;
672     }
673 }
674 
setSampleRate(uint32_t rate)675 status_t AudioTrack::setSampleRate(uint32_t rate)
676 {
677     if (mIsTimed || isOffloadedOrDirect()) {
678         return INVALID_OPERATION;
679     }
680 
681     uint32_t afSamplingRate;
682     if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) {
683         return NO_INIT;
684     }
685     if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
686         return BAD_VALUE;
687     }
688 
689     AutoMutex lock(mLock);
690     mSampleRate = rate;
691     mProxy->setSampleRate(rate);
692 
693     return NO_ERROR;
694 }
695 
getSampleRate() const696 uint32_t AudioTrack::getSampleRate() const
697 {
698     if (mIsTimed) {
699         return 0;
700     }
701 
702     AutoMutex lock(mLock);
703 
704     // sample rate can be updated during playback by the offloaded decoder so we need to
705     // query the HAL and update if needed.
706 // FIXME use Proxy return channel to update the rate from server and avoid polling here
707     if (isOffloadedOrDirect_l()) {
708         if (mOutput != AUDIO_IO_HANDLE_NONE) {
709             uint32_t sampleRate = 0;
710             status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
711             if (status == NO_ERROR) {
712                 mSampleRate = sampleRate;
713             }
714         }
715     }
716     return mSampleRate;
717 }
718 
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)719 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
720 {
721     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
722         return INVALID_OPERATION;
723     }
724 
725     if (loopCount == 0) {
726         ;
727     } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
728             loopEnd - loopStart >= MIN_LOOP) {
729         ;
730     } else {
731         return BAD_VALUE;
732     }
733 
734     AutoMutex lock(mLock);
735     // See setPosition() regarding setting parameters such as loop points or position while active
736     if (mState == STATE_ACTIVE) {
737         return INVALID_OPERATION;
738     }
739     setLoop_l(loopStart, loopEnd, loopCount);
740     return NO_ERROR;
741 }
742 
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)743 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
744 {
745     // FIXME If setting a loop also sets position to start of loop, then
746     //       this is correct.  Otherwise it should be removed.
747     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
748     mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
749     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
750 }
751 
setMarkerPosition(uint32_t marker)752 status_t AudioTrack::setMarkerPosition(uint32_t marker)
753 {
754     // The only purpose of setting marker position is to get a callback
755     if (mCbf == NULL || isOffloadedOrDirect()) {
756         return INVALID_OPERATION;
757     }
758 
759     AutoMutex lock(mLock);
760     mMarkerPosition = marker;
761     mMarkerReached = false;
762 
763     return NO_ERROR;
764 }
765 
getMarkerPosition(uint32_t * marker) const766 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
767 {
768     if (isOffloadedOrDirect()) {
769         return INVALID_OPERATION;
770     }
771     if (marker == NULL) {
772         return BAD_VALUE;
773     }
774 
775     AutoMutex lock(mLock);
776     *marker = mMarkerPosition;
777 
778     return NO_ERROR;
779 }
780 
setPositionUpdatePeriod(uint32_t updatePeriod)781 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
782 {
783     // The only purpose of setting position update period is to get a callback
784     if (mCbf == NULL || isOffloadedOrDirect()) {
785         return INVALID_OPERATION;
786     }
787 
788     AutoMutex lock(mLock);
789     mNewPosition = updateAndGetPosition_l() + updatePeriod;
790     mUpdatePeriod = updatePeriod;
791 
792     return NO_ERROR;
793 }
794 
getPositionUpdatePeriod(uint32_t * updatePeriod) const795 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
796 {
797     if (isOffloadedOrDirect()) {
798         return INVALID_OPERATION;
799     }
800     if (updatePeriod == NULL) {
801         return BAD_VALUE;
802     }
803 
804     AutoMutex lock(mLock);
805     *updatePeriod = mUpdatePeriod;
806 
807     return NO_ERROR;
808 }
809 
setPosition(uint32_t position)810 status_t AudioTrack::setPosition(uint32_t position)
811 {
812     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
813         return INVALID_OPERATION;
814     }
815     if (position > mFrameCount) {
816         return BAD_VALUE;
817     }
818 
819     AutoMutex lock(mLock);
820     // Currently we require that the player is inactive before setting parameters such as position
821     // or loop points.  Otherwise, there could be a race condition: the application could read the
822     // current position, compute a new position or loop parameters, and then set that position or
823     // loop parameters but it would do the "wrong" thing since the position has continued to advance
824     // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
825     // to specify how it wants to handle such scenarios.
826     if (mState == STATE_ACTIVE) {
827         return INVALID_OPERATION;
828     }
829     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
830     mLoopPeriod = 0;
831     // FIXME Check whether loops and setting position are incompatible in old code.
832     // If we use setLoop for both purposes we lose the capability to set the position while looping.
833     mStaticProxy->setLoop(position, mFrameCount, 0);
834 
835     return NO_ERROR;
836 }
837 
getPosition(uint32_t * position)838 status_t AudioTrack::getPosition(uint32_t *position)
839 {
840     if (position == NULL) {
841         return BAD_VALUE;
842     }
843 
844     AutoMutex lock(mLock);
845     if (isOffloadedOrDirect_l()) {
846         uint32_t dspFrames = 0;
847 
848         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
849             ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
850             *position = mPausedPosition;
851             return NO_ERROR;
852         }
853 
854         if (mOutput != AUDIO_IO_HANDLE_NONE) {
855             uint32_t halFrames;
856             AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
857         }
858         // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
859         // due to hardware latency. We leave this behavior for now.
860         *position = dspFrames;
861     } else {
862         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
863         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
864                 0 : updateAndGetPosition_l();
865     }
866     return NO_ERROR;
867 }
868 
getBufferPosition(uint32_t * position)869 status_t AudioTrack::getBufferPosition(uint32_t *position)
870 {
871     if (mSharedBuffer == 0 || mIsTimed) {
872         return INVALID_OPERATION;
873     }
874     if (position == NULL) {
875         return BAD_VALUE;
876     }
877 
878     AutoMutex lock(mLock);
879     *position = mStaticProxy->getBufferPosition();
880     return NO_ERROR;
881 }
882 
reload()883 status_t AudioTrack::reload()
884 {
885     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
886         return INVALID_OPERATION;
887     }
888 
889     AutoMutex lock(mLock);
890     // See setPosition() regarding setting parameters such as loop points or position while active
891     if (mState == STATE_ACTIVE) {
892         return INVALID_OPERATION;
893     }
894     mNewPosition = mUpdatePeriod;
895     mLoopPeriod = 0;
896     // FIXME The new code cannot reload while keeping a loop specified.
897     // Need to check how the old code handled this, and whether it's a significant change.
898     mStaticProxy->setLoop(0, mFrameCount, 0);
899     return NO_ERROR;
900 }
901 
getOutput() const902 audio_io_handle_t AudioTrack::getOutput() const
903 {
904     AutoMutex lock(mLock);
905     return mOutput;
906 }
907 
attachAuxEffect(int effectId)908 status_t AudioTrack::attachAuxEffect(int effectId)
909 {
910     AutoMutex lock(mLock);
911     status_t status = mAudioTrack->attachAuxEffect(effectId);
912     if (status == NO_ERROR) {
913         mAuxEffectId = effectId;
914     }
915     return status;
916 }
917 
918 // -------------------------------------------------------------------------
919 
920 // must be called with mLock held
createTrack_l()921 status_t AudioTrack::createTrack_l()
922 {
923     status_t status;
924     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
925     if (audioFlinger == 0) {
926         ALOGE("Could not get audioflinger");
927         return NO_INIT;
928     }
929 
930     audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat,
931             mChannelMask, mFlags, mOffloadInfo);
932     if (output == AUDIO_IO_HANDLE_NONE) {
933         ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
934               " channel mask %#x, flags %#x",
935               mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
936         return BAD_VALUE;
937     }
938     {
939     // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
940     // we must release it ourselves if anything goes wrong.
941 
942     // Not all of these values are needed under all conditions, but it is easier to get them all
943 
944     uint32_t afLatency;
945     status = AudioSystem::getLatency(output, &afLatency);
946     if (status != NO_ERROR) {
947         ALOGE("getLatency(%d) failed status %d", output, status);
948         goto release;
949     }
950 
951     size_t afFrameCount;
952     status = AudioSystem::getFrameCount(output, &afFrameCount);
953     if (status != NO_ERROR) {
954         ALOGE("getFrameCount(output=%d) status %d", output, status);
955         goto release;
956     }
957 
958     uint32_t afSampleRate;
959     status = AudioSystem::getSamplingRate(output, &afSampleRate);
960     if (status != NO_ERROR) {
961         ALOGE("getSamplingRate(output=%d) status %d", output, status);
962         goto release;
963     }
964 
965     // Client decides whether the track is TIMED (see below), but can only express a preference
966     // for FAST.  Server will perform additional tests.
967     if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
968             // either of these use cases:
969             // use case 1: shared buffer
970             (mSharedBuffer != 0) ||
971             // use case 2: callback transfer mode
972             (mTransfer == TRANSFER_CALLBACK)) &&
973             // matching sample rate
974             (mSampleRate == afSampleRate))) {
975         ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
976         // once denied, do not request again if IAudioTrack is re-created
977         mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
978     }
979     ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
980 
981     // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
982     //  n = 1   fast track with single buffering; nBuffering is ignored
983     //  n = 2   fast track with double buffering
984     //  n = 2   normal track, no sample rate conversion
985     //  n = 3   normal track, with sample rate conversion
986     //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
987     //  n > 3   very high latency or very small notification interval; nBuffering is ignored
988     const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
989 
990     mNotificationFramesAct = mNotificationFramesReq;
991 
992     size_t frameCount = mReqFrameCount;
993     if (!audio_is_linear_pcm(mFormat)) {
994 
995         if (mSharedBuffer != 0) {
996             // Same comment as below about ignoring frameCount parameter for set()
997             frameCount = mSharedBuffer->size();
998         } else if (frameCount == 0) {
999             frameCount = afFrameCount;
1000         }
1001         if (mNotificationFramesAct != frameCount) {
1002             mNotificationFramesAct = frameCount;
1003         }
1004     } else if (mSharedBuffer != 0) {
1005 
1006         // Ensure that buffer alignment matches channel count
1007         // 8-bit data in shared memory is not currently supported by AudioFlinger
1008         size_t alignment = audio_bytes_per_sample(
1009                 mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
1010         if (alignment & 1) {
1011             alignment = 1;
1012         }
1013         if (mChannelCount > 1) {
1014             // More than 2 channels does not require stronger alignment than stereo
1015             alignment <<= 1;
1016         }
1017         if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1018             ALOGE("Invalid buffer alignment: address %p, channel count %u",
1019                     mSharedBuffer->pointer(), mChannelCount);
1020             status = BAD_VALUE;
1021             goto release;
1022         }
1023 
1024         // When initializing a shared buffer AudioTrack via constructors,
1025         // there's no frameCount parameter.
1026         // But when initializing a shared buffer AudioTrack via set(),
1027         // there _is_ a frameCount parameter.  We silently ignore it.
1028         frameCount = mSharedBuffer->size() / mFrameSizeAF;
1029 
1030     } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
1031 
1032         // FIXME move these calculations and associated checks to server
1033 
1034         // Ensure that buffer depth covers at least audio hardware latency
1035         uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
1036         ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
1037                 afFrameCount, minBufCount, afSampleRate, afLatency);
1038         if (minBufCount <= nBuffering) {
1039             minBufCount = nBuffering;
1040         }
1041 
1042         size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
1043         ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1044                 ", afLatency=%d",
1045                 minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1046 
1047         if (frameCount == 0) {
1048             frameCount = minFrameCount;
1049         } else if (frameCount < minFrameCount) {
1050             // not ALOGW because it happens all the time when playing key clicks over A2DP
1051             ALOGV("Minimum buffer size corrected from %zu to %zu",
1052                      frameCount, minFrameCount);
1053             frameCount = minFrameCount;
1054         }
1055         // Make sure that application is notified with sufficient margin before underrun
1056         if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1057             mNotificationFramesAct = frameCount/nBuffering;
1058         }
1059 
1060     } else {
1061         // For fast tracks, the frame count calculations and checks are done by server
1062     }
1063 
1064     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1065     if (mIsTimed) {
1066         trackFlags |= IAudioFlinger::TRACK_TIMED;
1067     }
1068 
1069     pid_t tid = -1;
1070     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1071         trackFlags |= IAudioFlinger::TRACK_FAST;
1072         if (mAudioTrackThread != 0) {
1073             tid = mAudioTrackThread->getTid();
1074         }
1075     }
1076 
1077     if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1078         trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1079     }
1080 
1081     if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1082         trackFlags |= IAudioFlinger::TRACK_DIRECT;
1083     }
1084 
1085     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1086                                 // but we will still need the original value also
1087     sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1088                                                       mSampleRate,
1089                                                       // AudioFlinger only sees 16-bit PCM
1090                                                       mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1091                                                           !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1092                                                               AUDIO_FORMAT_PCM_16_BIT : mFormat,
1093                                                       mChannelMask,
1094                                                       &temp,
1095                                                       &trackFlags,
1096                                                       mSharedBuffer,
1097                                                       output,
1098                                                       tid,
1099                                                       &mSessionId,
1100                                                       mClientUid,
1101                                                       &status);
1102 
1103     if (status != NO_ERROR) {
1104         ALOGE("AudioFlinger could not create track, status: %d", status);
1105         goto release;
1106     }
1107     ALOG_ASSERT(track != 0);
1108 
1109     // AudioFlinger now owns the reference to the I/O handle,
1110     // so we are no longer responsible for releasing it.
1111 
1112     sp<IMemory> iMem = track->getCblk();
1113     if (iMem == 0) {
1114         ALOGE("Could not get control block");
1115         return NO_INIT;
1116     }
1117     void *iMemPointer = iMem->pointer();
1118     if (iMemPointer == NULL) {
1119         ALOGE("Could not get control block pointer");
1120         return NO_INIT;
1121     }
1122     // invariant that mAudioTrack != 0 is true only after set() returns successfully
1123     if (mAudioTrack != 0) {
1124         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1125         mDeathNotifier.clear();
1126     }
1127     mAudioTrack = track;
1128     mCblkMemory = iMem;
1129     IPCThreadState::self()->flushCommands();
1130 
1131     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1132     mCblk = cblk;
1133     // note that temp is the (possibly revised) value of frameCount
1134     if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1135         // In current design, AudioTrack client checks and ensures frame count validity before
1136         // passing it to AudioFlinger so AudioFlinger should not return a different value except
1137         // for fast track as it uses a special method of assigning frame count.
1138         ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1139     }
1140     frameCount = temp;
1141 
1142     mAwaitBoost = false;
1143     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1144         if (trackFlags & IAudioFlinger::TRACK_FAST) {
1145             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1146             mAwaitBoost = true;
1147             if (mSharedBuffer == 0) {
1148                 // Theoretically double-buffering is not required for fast tracks,
1149                 // due to tighter scheduling.  But in practice, to accommodate kernels with
1150                 // scheduling jitter, and apps with computation jitter, we use double-buffering.
1151                 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1152                     mNotificationFramesAct = frameCount/nBuffering;
1153                 }
1154             }
1155         } else {
1156             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1157             // once denied, do not request again if IAudioTrack is re-created
1158             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1159             if (mSharedBuffer == 0) {
1160                 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1161                     mNotificationFramesAct = frameCount/nBuffering;
1162                 }
1163             }
1164         }
1165     }
1166     if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1167         if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1168             ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1169         } else {
1170             ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1171             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1172             // FIXME This is a warning, not an error, so don't return error status
1173             //return NO_INIT;
1174         }
1175     }
1176     if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1177         if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1178             ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1179         } else {
1180             ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1181             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1182             // FIXME This is a warning, not an error, so don't return error status
1183             //return NO_INIT;
1184         }
1185     }
1186 
1187     // We retain a copy of the I/O handle, but don't own the reference
1188     mOutput = output;
1189     mRefreshRemaining = true;
1190 
1191     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1192     // is the value of pointer() for the shared buffer, otherwise buffers points
1193     // immediately after the control block.  This address is for the mapping within client
1194     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1195     void* buffers;
1196     if (mSharedBuffer == 0) {
1197         buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1198     } else {
1199         buffers = mSharedBuffer->pointer();
1200     }
1201 
1202     mAudioTrack->attachAuxEffect(mAuxEffectId);
1203     // FIXME don't believe this lie
1204     mLatency = afLatency + (1000*frameCount) / mSampleRate;
1205 
1206     mFrameCount = frameCount;
1207     // If IAudioTrack is re-created, don't let the requested frameCount
1208     // decrease.  This can confuse clients that cache frameCount().
1209     if (frameCount > mReqFrameCount) {
1210         mReqFrameCount = frameCount;
1211     }
1212 
1213     // update proxy
1214     if (mSharedBuffer == 0) {
1215         mStaticProxy.clear();
1216         mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1217     } else {
1218         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1219         mProxy = mStaticProxy;
1220     }
1221     mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1222     mProxy->setSendLevel(mSendLevel);
1223     mProxy->setSampleRate(mSampleRate);
1224     mProxy->setMinimum(mNotificationFramesAct);
1225 
1226     mDeathNotifier = new DeathNotifier(this);
1227     mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1228 
1229     return NO_ERROR;
1230     }
1231 
1232 release:
1233     AudioSystem::releaseOutput(output);
1234     if (status == NO_ERROR) {
1235         status = NO_INIT;
1236     }
1237     return status;
1238 }
1239 
obtainBuffer(Buffer * audioBuffer,int32_t waitCount)1240 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1241 {
1242     if (audioBuffer == NULL) {
1243         return BAD_VALUE;
1244     }
1245     if (mTransfer != TRANSFER_OBTAIN) {
1246         audioBuffer->frameCount = 0;
1247         audioBuffer->size = 0;
1248         audioBuffer->raw = NULL;
1249         return INVALID_OPERATION;
1250     }
1251 
1252     const struct timespec *requested;
1253     struct timespec timeout;
1254     if (waitCount == -1) {
1255         requested = &ClientProxy::kForever;
1256     } else if (waitCount == 0) {
1257         requested = &ClientProxy::kNonBlocking;
1258     } else if (waitCount > 0) {
1259         long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1260         timeout.tv_sec = ms / 1000;
1261         timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1262         requested = &timeout;
1263     } else {
1264         ALOGE("%s invalid waitCount %d", __func__, waitCount);
1265         requested = NULL;
1266     }
1267     return obtainBuffer(audioBuffer, requested);
1268 }
1269 
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1270 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1271         struct timespec *elapsed, size_t *nonContig)
1272 {
1273     // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1274     uint32_t oldSequence = 0;
1275     uint32_t newSequence;
1276 
1277     Proxy::Buffer buffer;
1278     status_t status = NO_ERROR;
1279 
1280     static const int32_t kMaxTries = 5;
1281     int32_t tryCounter = kMaxTries;
1282 
1283     do {
1284         // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1285         // keep them from going away if another thread re-creates the track during obtainBuffer()
1286         sp<AudioTrackClientProxy> proxy;
1287         sp<IMemory> iMem;
1288 
1289         {   // start of lock scope
1290             AutoMutex lock(mLock);
1291 
1292             newSequence = mSequence;
1293             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1294             if (status == DEAD_OBJECT) {
1295                 // re-create track, unless someone else has already done so
1296                 if (newSequence == oldSequence) {
1297                     status = restoreTrack_l("obtainBuffer");
1298                     if (status != NO_ERROR) {
1299                         buffer.mFrameCount = 0;
1300                         buffer.mRaw = NULL;
1301                         buffer.mNonContig = 0;
1302                         break;
1303                     }
1304                 }
1305             }
1306             oldSequence = newSequence;
1307 
1308             // Keep the extra references
1309             proxy = mProxy;
1310             iMem = mCblkMemory;
1311 
1312             if (mState == STATE_STOPPING) {
1313                 status = -EINTR;
1314                 buffer.mFrameCount = 0;
1315                 buffer.mRaw = NULL;
1316                 buffer.mNonContig = 0;
1317                 break;
1318             }
1319 
1320             // Non-blocking if track is stopped or paused
1321             if (mState != STATE_ACTIVE) {
1322                 requested = &ClientProxy::kNonBlocking;
1323             }
1324 
1325         }   // end of lock scope
1326 
1327         buffer.mFrameCount = audioBuffer->frameCount;
1328         // FIXME starts the requested timeout and elapsed over from scratch
1329         status = proxy->obtainBuffer(&buffer, requested, elapsed);
1330 
1331     } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1332 
1333     audioBuffer->frameCount = buffer.mFrameCount;
1334     audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1335     audioBuffer->raw = buffer.mRaw;
1336     if (nonContig != NULL) {
1337         *nonContig = buffer.mNonContig;
1338     }
1339     return status;
1340 }
1341 
releaseBuffer(Buffer * audioBuffer)1342 void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1343 {
1344     if (mTransfer == TRANSFER_SHARED) {
1345         return;
1346     }
1347 
1348     size_t stepCount = audioBuffer->size / mFrameSizeAF;
1349     if (stepCount == 0) {
1350         return;
1351     }
1352 
1353     Proxy::Buffer buffer;
1354     buffer.mFrameCount = stepCount;
1355     buffer.mRaw = audioBuffer->raw;
1356 
1357     AutoMutex lock(mLock);
1358     mReleased += stepCount;
1359     mInUnderrun = false;
1360     mProxy->releaseBuffer(&buffer);
1361 
1362     // restart track if it was disabled by audioflinger due to previous underrun
1363     if (mState == STATE_ACTIVE) {
1364         audio_track_cblk_t* cblk = mCblk;
1365         if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1366             ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1367             // FIXME ignoring status
1368             mAudioTrack->start();
1369         }
1370     }
1371 }
1372 
1373 // -------------------------------------------------------------------------
1374 
write(const void * buffer,size_t userSize,bool blocking)1375 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1376 {
1377     if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1378         return INVALID_OPERATION;
1379     }
1380 
1381     if (isDirect()) {
1382         AutoMutex lock(mLock);
1383         int32_t flags = android_atomic_and(
1384                             ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1385                             &mCblk->mFlags);
1386         if (flags & CBLK_INVALID) {
1387             return DEAD_OBJECT;
1388         }
1389     }
1390 
1391     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1392         // Sanity-check: user is most-likely passing an error code, and it would
1393         // make the return value ambiguous (actualSize vs error).
1394         ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1395         return BAD_VALUE;
1396     }
1397 
1398     size_t written = 0;
1399     Buffer audioBuffer;
1400 
1401     while (userSize >= mFrameSize) {
1402         audioBuffer.frameCount = userSize / mFrameSize;
1403 
1404         status_t err = obtainBuffer(&audioBuffer,
1405                 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1406         if (err < 0) {
1407             if (written > 0) {
1408                 break;
1409             }
1410             return ssize_t(err);
1411         }
1412 
1413         size_t toWrite;
1414         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1415             // Divide capacity by 2 to take expansion into account
1416             toWrite = audioBuffer.size >> 1;
1417             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1418         } else {
1419             toWrite = audioBuffer.size;
1420             memcpy(audioBuffer.i8, buffer, toWrite);
1421         }
1422         buffer = ((const char *) buffer) + toWrite;
1423         userSize -= toWrite;
1424         written += toWrite;
1425 
1426         releaseBuffer(&audioBuffer);
1427     }
1428 
1429     return written;
1430 }
1431 
1432 // -------------------------------------------------------------------------
1433 
TimedAudioTrack()1434 TimedAudioTrack::TimedAudioTrack() {
1435     mIsTimed = true;
1436 }
1437 
allocateTimedBuffer(size_t size,sp<IMemory> * buffer)1438 status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1439 {
1440     AutoMutex lock(mLock);
1441     status_t result = UNKNOWN_ERROR;
1442 
1443 #if 1
1444     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1445     // while we are accessing the cblk
1446     sp<IAudioTrack> audioTrack = mAudioTrack;
1447     sp<IMemory> iMem = mCblkMemory;
1448 #endif
1449 
1450     // If the track is not invalid already, try to allocate a buffer.  alloc
1451     // fails indicating that the server is dead, flag the track as invalid so
1452     // we can attempt to restore in just a bit.
1453     audio_track_cblk_t* cblk = mCblk;
1454     if (!(cblk->mFlags & CBLK_INVALID)) {
1455         result = mAudioTrack->allocateTimedBuffer(size, buffer);
1456         if (result == DEAD_OBJECT) {
1457             android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1458         }
1459     }
1460 
1461     // If the track is invalid at this point, attempt to restore it. and try the
1462     // allocation one more time.
1463     if (cblk->mFlags & CBLK_INVALID) {
1464         result = restoreTrack_l("allocateTimedBuffer");
1465 
1466         if (result == NO_ERROR) {
1467             result = mAudioTrack->allocateTimedBuffer(size, buffer);
1468         }
1469     }
1470 
1471     return result;
1472 }
1473 
queueTimedBuffer(const sp<IMemory> & buffer,int64_t pts)1474 status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1475                                            int64_t pts)
1476 {
1477     status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1478     {
1479         AutoMutex lock(mLock);
1480         audio_track_cblk_t* cblk = mCblk;
1481         // restart track if it was disabled by audioflinger due to previous underrun
1482         if (buffer->size() != 0 && status == NO_ERROR &&
1483                 (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1484             android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1485             ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1486             // FIXME ignoring status
1487             mAudioTrack->start();
1488         }
1489     }
1490     return status;
1491 }
1492 
setMediaTimeTransform(const LinearTransform & xform,TargetTimeline target)1493 status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1494                                                 TargetTimeline target)
1495 {
1496     return mAudioTrack->setMediaTimeTransform(xform, target);
1497 }
1498 
1499 // -------------------------------------------------------------------------
1500 
processAudioBuffer()1501 nsecs_t AudioTrack::processAudioBuffer()
1502 {
1503     // Currently the AudioTrack thread is not created if there are no callbacks.
1504     // Would it ever make sense to run the thread, even without callbacks?
1505     // If so, then replace this by checks at each use for mCbf != NULL.
1506     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1507 
1508     mLock.lock();
1509     if (mAwaitBoost) {
1510         mAwaitBoost = false;
1511         mLock.unlock();
1512         static const int32_t kMaxTries = 5;
1513         int32_t tryCounter = kMaxTries;
1514         uint32_t pollUs = 10000;
1515         do {
1516             int policy = sched_getscheduler(0);
1517             if (policy == SCHED_FIFO || policy == SCHED_RR) {
1518                 break;
1519             }
1520             usleep(pollUs);
1521             pollUs <<= 1;
1522         } while (tryCounter-- > 0);
1523         if (tryCounter < 0) {
1524             ALOGE("did not receive expected priority boost on time");
1525         }
1526         // Run again immediately
1527         return 0;
1528     }
1529 
1530     // Can only reference mCblk while locked
1531     int32_t flags = android_atomic_and(
1532         ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1533 
1534     // Check for track invalidation
1535     if (flags & CBLK_INVALID) {
1536         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1537         // AudioSystem cache. We should not exit here but after calling the callback so
1538         // that the upper layers can recreate the track
1539         if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1540             status_t status = restoreTrack_l("processAudioBuffer");
1541             mLock.unlock();
1542             // Run again immediately, but with a new IAudioTrack
1543             return 0;
1544         }
1545     }
1546 
1547     bool waitStreamEnd = mState == STATE_STOPPING;
1548     bool active = mState == STATE_ACTIVE;
1549 
1550     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1551     bool newUnderrun = false;
1552     if (flags & CBLK_UNDERRUN) {
1553 #if 0
1554         // Currently in shared buffer mode, when the server reaches the end of buffer,
1555         // the track stays active in continuous underrun state.  It's up to the application
1556         // to pause or stop the track, or set the position to a new offset within buffer.
1557         // This was some experimental code to auto-pause on underrun.   Keeping it here
1558         // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1559         if (mTransfer == TRANSFER_SHARED) {
1560             mState = STATE_PAUSED;
1561             active = false;
1562         }
1563 #endif
1564         if (!mInUnderrun) {
1565             mInUnderrun = true;
1566             newUnderrun = true;
1567         }
1568     }
1569 
1570     // Get current position of server
1571     size_t position = updateAndGetPosition_l();
1572 
1573     // Manage marker callback
1574     bool markerReached = false;
1575     size_t markerPosition = mMarkerPosition;
1576     // FIXME fails for wraparound, need 64 bits
1577     if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1578         mMarkerReached = markerReached = true;
1579     }
1580 
1581     // Determine number of new position callback(s) that will be needed, while locked
1582     size_t newPosCount = 0;
1583     size_t newPosition = mNewPosition;
1584     size_t updatePeriod = mUpdatePeriod;
1585     // FIXME fails for wraparound, need 64 bits
1586     if (updatePeriod > 0 && position >= newPosition) {
1587         newPosCount = ((position - newPosition) / updatePeriod) + 1;
1588         mNewPosition += updatePeriod * newPosCount;
1589     }
1590 
1591     // Cache other fields that will be needed soon
1592     uint32_t loopPeriod = mLoopPeriod;
1593     uint32_t sampleRate = mSampleRate;
1594     uint32_t notificationFrames = mNotificationFramesAct;
1595     if (mRefreshRemaining) {
1596         mRefreshRemaining = false;
1597         mRemainingFrames = notificationFrames;
1598         mRetryOnPartialBuffer = false;
1599     }
1600     size_t misalignment = mProxy->getMisalignment();
1601     uint32_t sequence = mSequence;
1602     sp<AudioTrackClientProxy> proxy = mProxy;
1603 
1604     // These fields don't need to be cached, because they are assigned only by set():
1605     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1606     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1607 
1608     mLock.unlock();
1609 
1610     if (waitStreamEnd) {
1611         struct timespec timeout;
1612         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1613         timeout.tv_nsec = 0;
1614 
1615         status_t status = proxy->waitStreamEndDone(&timeout);
1616         switch (status) {
1617         case NO_ERROR:
1618         case DEAD_OBJECT:
1619         case TIMED_OUT:
1620             mCbf(EVENT_STREAM_END, mUserData, NULL);
1621             {
1622                 AutoMutex lock(mLock);
1623                 // The previously assigned value of waitStreamEnd is no longer valid,
1624                 // since the mutex has been unlocked and either the callback handler
1625                 // or another thread could have re-started the AudioTrack during that time.
1626                 waitStreamEnd = mState == STATE_STOPPING;
1627                 if (waitStreamEnd) {
1628                     mState = STATE_STOPPED;
1629                     mReleased = 0;
1630                 }
1631             }
1632             if (waitStreamEnd && status != DEAD_OBJECT) {
1633                return NS_INACTIVE;
1634             }
1635             break;
1636         }
1637         return 0;
1638     }
1639 
1640     // perform callbacks while unlocked
1641     if (newUnderrun) {
1642         mCbf(EVENT_UNDERRUN, mUserData, NULL);
1643     }
1644     // FIXME we will miss loops if loop cycle was signaled several times since last call
1645     //       to processAudioBuffer()
1646     if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1647         mCbf(EVENT_LOOP_END, mUserData, NULL);
1648     }
1649     if (flags & CBLK_BUFFER_END) {
1650         mCbf(EVENT_BUFFER_END, mUserData, NULL);
1651     }
1652     if (markerReached) {
1653         mCbf(EVENT_MARKER, mUserData, &markerPosition);
1654     }
1655     while (newPosCount > 0) {
1656         size_t temp = newPosition;
1657         mCbf(EVENT_NEW_POS, mUserData, &temp);
1658         newPosition += updatePeriod;
1659         newPosCount--;
1660     }
1661 
1662     if (mObservedSequence != sequence) {
1663         mObservedSequence = sequence;
1664         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1665         // for offloaded tracks, just wait for the upper layers to recreate the track
1666         if (isOffloadedOrDirect()) {
1667             return NS_INACTIVE;
1668         }
1669     }
1670 
1671     // if inactive, then don't run me again until re-started
1672     if (!active) {
1673         return NS_INACTIVE;
1674     }
1675 
1676     // Compute the estimated time until the next timed event (position, markers, loops)
1677     // FIXME only for non-compressed audio
1678     uint32_t minFrames = ~0;
1679     if (!markerReached && position < markerPosition) {
1680         minFrames = markerPosition - position;
1681     }
1682     if (loopPeriod > 0 && loopPeriod < minFrames) {
1683         minFrames = loopPeriod;
1684     }
1685     if (updatePeriod > 0 && updatePeriod < minFrames) {
1686         minFrames = updatePeriod;
1687     }
1688 
1689     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1690     static const uint32_t kPoll = 0;
1691     if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1692         minFrames = kPoll * notificationFrames;
1693     }
1694 
1695     // Convert frame units to time units
1696     nsecs_t ns = NS_WHENEVER;
1697     if (minFrames != (uint32_t) ~0) {
1698         // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1699         static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1700         ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1701     }
1702 
1703     // If not supplying data by EVENT_MORE_DATA, then we're done
1704     if (mTransfer != TRANSFER_CALLBACK) {
1705         return ns;
1706     }
1707 
1708     struct timespec timeout;
1709     const struct timespec *requested = &ClientProxy::kForever;
1710     if (ns != NS_WHENEVER) {
1711         timeout.tv_sec = ns / 1000000000LL;
1712         timeout.tv_nsec = ns % 1000000000LL;
1713         ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1714         requested = &timeout;
1715     }
1716 
1717     while (mRemainingFrames > 0) {
1718 
1719         Buffer audioBuffer;
1720         audioBuffer.frameCount = mRemainingFrames;
1721         size_t nonContig;
1722         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1723         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1724                 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1725         requested = &ClientProxy::kNonBlocking;
1726         size_t avail = audioBuffer.frameCount + nonContig;
1727         ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1728                 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1729         if (err != NO_ERROR) {
1730             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1731                     (isOffloaded() && (err == DEAD_OBJECT))) {
1732                 return 0;
1733             }
1734             ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1735             return NS_NEVER;
1736         }
1737 
1738         if (mRetryOnPartialBuffer && !isOffloaded()) {
1739             mRetryOnPartialBuffer = false;
1740             if (avail < mRemainingFrames) {
1741                 int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1742                 if (ns < 0 || myns < ns) {
1743                     ns = myns;
1744                 }
1745                 return ns;
1746             }
1747         }
1748 
1749         // Divide buffer size by 2 to take into account the expansion
1750         // due to 8 to 16 bit conversion: the callback must fill only half
1751         // of the destination buffer
1752         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1753             audioBuffer.size >>= 1;
1754         }
1755 
1756         size_t reqSize = audioBuffer.size;
1757         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1758         size_t writtenSize = audioBuffer.size;
1759 
1760         // Sanity check on returned size
1761         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1762             ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1763                     reqSize, ssize_t(writtenSize));
1764             return NS_NEVER;
1765         }
1766 
1767         if (writtenSize == 0) {
1768             // The callback is done filling buffers
1769             // Keep this thread going to handle timed events and
1770             // still try to get more data in intervals of WAIT_PERIOD_MS
1771             // but don't just loop and block the CPU, so wait
1772             return WAIT_PERIOD_MS * 1000000LL;
1773         }
1774 
1775         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1776             // 8 to 16 bit conversion, note that source and destination are the same address
1777             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1778             audioBuffer.size <<= 1;
1779         }
1780 
1781         size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1782         audioBuffer.frameCount = releasedFrames;
1783         mRemainingFrames -= releasedFrames;
1784         if (misalignment >= releasedFrames) {
1785             misalignment -= releasedFrames;
1786         } else {
1787             misalignment = 0;
1788         }
1789 
1790         releaseBuffer(&audioBuffer);
1791 
1792         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1793         // if callback doesn't like to accept the full chunk
1794         if (writtenSize < reqSize) {
1795             continue;
1796         }
1797 
1798         // There could be enough non-contiguous frames available to satisfy the remaining request
1799         if (mRemainingFrames <= nonContig) {
1800             continue;
1801         }
1802 
1803 #if 0
1804         // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1805         // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1806         // that total to a sum == notificationFrames.
1807         if (0 < misalignment && misalignment <= mRemainingFrames) {
1808             mRemainingFrames = misalignment;
1809             return (mRemainingFrames * 1100000000LL) / sampleRate;
1810         }
1811 #endif
1812 
1813     }
1814     mRemainingFrames = notificationFrames;
1815     mRetryOnPartialBuffer = true;
1816 
1817     // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1818     return 0;
1819 }
1820 
restoreTrack_l(const char * from)1821 status_t AudioTrack::restoreTrack_l(const char *from)
1822 {
1823     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1824           isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1825     ++mSequence;
1826     status_t result;
1827 
1828     // refresh the audio configuration cache in this process to make sure we get new
1829     // output parameters in createTrack_l()
1830     AudioSystem::clearAudioConfigCache();
1831 
1832     if (isOffloadedOrDirect_l()) {
1833         // FIXME re-creation of offloaded tracks is not yet implemented
1834         return DEAD_OBJECT;
1835     }
1836 
1837     // save the old static buffer position
1838     size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1839 
1840     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1841     // following member variables: mAudioTrack, mCblkMemory and mCblk.
1842     // It will also delete the strong references on previous IAudioTrack and IMemory.
1843     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1844     result = createTrack_l();
1845 
1846     // take the frames that will be lost by track recreation into account in saved position
1847     (void) updateAndGetPosition_l();
1848     mPosition = mReleased;
1849 
1850     if (result == NO_ERROR) {
1851         // continue playback from last known position, but
1852         // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1853         if (mStaticProxy != NULL) {
1854             mLoopPeriod = 0;
1855             mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1856         }
1857         // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1858         //       track destruction have been played? This is critical for SoundPool implementation
1859         //       This must be broken, and needs to be tested/debugged.
1860 #if 0
1861         // restore write index and set other indexes to reflect empty buffer status
1862         if (!strcmp(from, "start")) {
1863             // Make sure that a client relying on callback events indicating underrun or
1864             // the actual amount of audio frames played (e.g SoundPool) receives them.
1865             if (mSharedBuffer == 0) {
1866                 // restart playback even if buffer is not completely filled.
1867                 android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1868             }
1869         }
1870 #endif
1871         if (mState == STATE_ACTIVE) {
1872             result = mAudioTrack->start();
1873         }
1874     }
1875     if (result != NO_ERROR) {
1876         ALOGW("restoreTrack_l() failed status %d", result);
1877         mState = STATE_STOPPED;
1878         mReleased = 0;
1879     }
1880 
1881     return result;
1882 }
1883 
updateAndGetPosition_l()1884 uint32_t AudioTrack::updateAndGetPosition_l()
1885 {
1886     // This is the sole place to read server consumed frames
1887     uint32_t newServer = mProxy->getPosition();
1888     int32_t delta = newServer - mServer;
1889     mServer = newServer;
1890     // TODO There is controversy about whether there can be "negative jitter" in server position.
1891     //      This should be investigated further, and if possible, it should be addressed.
1892     //      A more definite failure mode is infrequent polling by client.
1893     //      One could call (void)getPosition_l() in releaseBuffer(),
1894     //      so mReleased and mPosition are always lock-step as best possible.
1895     //      That should ensure delta never goes negative for infrequent polling
1896     //      unless the server has more than 2^31 frames in its buffer,
1897     //      in which case the use of uint32_t for these counters has bigger issues.
1898     if (delta < 0) {
1899         ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1900         delta = 0;
1901     }
1902     return mPosition += (uint32_t) delta;
1903 }
1904 
setParameters(const String8 & keyValuePairs)1905 status_t AudioTrack::setParameters(const String8& keyValuePairs)
1906 {
1907     AutoMutex lock(mLock);
1908     return mAudioTrack->setParameters(keyValuePairs);
1909 }
1910 
getTimestamp(AudioTimestamp & timestamp)1911 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1912 {
1913     AutoMutex lock(mLock);
1914     // FIXME not implemented for fast tracks; should use proxy and SSQ
1915     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1916         return INVALID_OPERATION;
1917     }
1918 
1919     switch (mState) {
1920     case STATE_ACTIVE:
1921     case STATE_PAUSED:
1922         break; // handle below
1923     case STATE_FLUSHED:
1924     case STATE_STOPPED:
1925         return WOULD_BLOCK;
1926     case STATE_STOPPING:
1927     case STATE_PAUSED_STOPPING:
1928         if (!isOffloaded_l()) {
1929             return INVALID_OPERATION;
1930         }
1931         break; // offloaded tracks handled below
1932     default:
1933         LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
1934         break;
1935     }
1936 
1937     // The presented frame count must always lag behind the consumed frame count.
1938     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1939     status_t status = mAudioTrack->getTimestamp(timestamp);
1940     if (status != NO_ERROR) {
1941         ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
1942         return status;
1943     }
1944     if (isOffloadedOrDirect_l()) {
1945         if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
1946             // use cached paused position in case another offloaded track is running.
1947             timestamp.mPosition = mPausedPosition;
1948             clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
1949             return NO_ERROR;
1950         }
1951 
1952         // Check whether a pending flush or stop has completed, as those commands may
1953         // be asynchronous or return near finish.
1954         if (mStartUs != 0 && mSampleRate != 0) {
1955             static const int kTimeJitterUs = 100000; // 100 ms
1956             static const int k1SecUs = 1000000;
1957 
1958             const int64_t timeNow = getNowUs();
1959 
1960             if (timeNow < mStartUs + k1SecUs) { // within first second of starting
1961                 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
1962                 if (timestampTimeUs < mStartUs) {
1963                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
1964                 }
1965                 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
1966                 const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
1967 
1968                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
1969                     // Verify that the counter can't count faster than the sample rate
1970                     // since the start time.  If greater, then that means we have failed
1971                     // to completely flush or stop the previous playing track.
1972                     ALOGW("incomplete flush or stop:"
1973                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
1974                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
1975                             timestamp.mPosition);
1976                     return WOULD_BLOCK;
1977                 }
1978             }
1979             mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
1980         }
1981     } else {
1982         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
1983         (void) updateAndGetPosition_l();
1984         // Server consumed (mServer) and presented both use the same server time base,
1985         // and server consumed is always >= presented.
1986         // The delta between these represents the number of frames in the buffer pipeline.
1987         // If this delta between these is greater than the client position, it means that
1988         // actually presented is still stuck at the starting line (figuratively speaking),
1989         // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
1990         if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
1991             return INVALID_OPERATION;
1992         }
1993         // Convert timestamp position from server time base to client time base.
1994         // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
1995         // But if we change it to 64-bit then this could fail.
1996         // If (mPosition - mServer) can be negative then should use:
1997         //   (int32_t)(mPosition - mServer)
1998         timestamp.mPosition += mPosition - mServer;
1999         // Immediately after a call to getPosition_l(), mPosition and
2000         // mServer both represent the same frame position.  mPosition is
2001         // in client's point of view, and mServer is in server's point of
2002         // view.  So the difference between them is the "fudge factor"
2003         // between client and server views due to stop() and/or new
2004         // IAudioTrack.  And timestamp.mPosition is initially in server's
2005         // point of view, so we need to apply the same fudge factor to it.
2006     }
2007     return status;
2008 }
2009 
getParameters(const String8 & keys)2010 String8 AudioTrack::getParameters(const String8& keys)
2011 {
2012     audio_io_handle_t output = getOutput();
2013     if (output != AUDIO_IO_HANDLE_NONE) {
2014         return AudioSystem::getParameters(output, keys);
2015     } else {
2016         return String8::empty();
2017     }
2018 }
2019 
isOffloaded() const2020 bool AudioTrack::isOffloaded() const
2021 {
2022     AutoMutex lock(mLock);
2023     return isOffloaded_l();
2024 }
2025 
isDirect() const2026 bool AudioTrack::isDirect() const
2027 {
2028     AutoMutex lock(mLock);
2029     return isDirect_l();
2030 }
2031 
isOffloadedOrDirect() const2032 bool AudioTrack::isOffloadedOrDirect() const
2033 {
2034     AutoMutex lock(mLock);
2035     return isOffloadedOrDirect_l();
2036 }
2037 
2038 
dump(int fd,const Vector<String16> & args __unused) const2039 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2040 {
2041 
2042     const size_t SIZE = 256;
2043     char buffer[SIZE];
2044     String8 result;
2045 
2046     result.append(" AudioTrack::dump\n");
2047     snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2048             mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2049     result.append(buffer);
2050     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2051             mChannelCount, mFrameCount);
2052     result.append(buffer);
2053     snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
2054     result.append(buffer);
2055     snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2056     result.append(buffer);
2057     ::write(fd, result.string(), result.size());
2058     return NO_ERROR;
2059 }
2060 
getUnderrunFrames() const2061 uint32_t AudioTrack::getUnderrunFrames() const
2062 {
2063     AutoMutex lock(mLock);
2064     return mProxy->getUnderrunFrames();
2065 }
2066 
setAttributesFromStreamType(audio_stream_type_t streamType)2067 void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) {
2068     mAttributes.flags = 0x0;
2069 
2070     switch(streamType) {
2071     case AUDIO_STREAM_DEFAULT:
2072     case AUDIO_STREAM_MUSIC:
2073         mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
2074         mAttributes.usage = AUDIO_USAGE_MEDIA;
2075         break;
2076     case AUDIO_STREAM_VOICE_CALL:
2077         mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2078         mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
2079         break;
2080     case AUDIO_STREAM_ENFORCED_AUDIBLE:
2081         mAttributes.flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
2082         // intended fall through, attributes in common with STREAM_SYSTEM
2083     case AUDIO_STREAM_SYSTEM:
2084         mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2085         mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
2086         break;
2087     case AUDIO_STREAM_RING:
2088         mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2089         mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
2090         break;
2091     case AUDIO_STREAM_ALARM:
2092         mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2093         mAttributes.usage = AUDIO_USAGE_ALARM;
2094         break;
2095     case AUDIO_STREAM_NOTIFICATION:
2096         mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2097         mAttributes.usage = AUDIO_USAGE_NOTIFICATION;
2098         break;
2099     case AUDIO_STREAM_BLUETOOTH_SCO:
2100         mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2101         mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
2102         mAttributes.flags |= AUDIO_FLAG_SCO;
2103         break;
2104     case AUDIO_STREAM_DTMF:
2105         mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2106         mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
2107         break;
2108     case AUDIO_STREAM_TTS:
2109         mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2110         mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
2111         break;
2112     default:
2113         ALOGE("invalid stream type %d when converting to attributes", streamType);
2114     }
2115 }
2116 
setStreamTypeFromAttributes(audio_attributes_t & aa)2117 void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) {
2118     // flags to stream type mapping
2119     if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
2120         mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE;
2121         return;
2122     }
2123     if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
2124         mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
2125         return;
2126     }
2127 
2128     // usage to stream type mapping
2129     switch (aa.usage) {
2130     case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2131         // TODO once AudioPolicyManager fully supports audio_attributes_t,
2132         //   remove stream change based on phone state
2133         if (AudioSystem::getPhoneState() == AUDIO_MODE_RINGTONE) {
2134             mStreamType = AUDIO_STREAM_RING;
2135             break;
2136         }
2137         /// FALL THROUGH
2138     case AUDIO_USAGE_MEDIA:
2139     case AUDIO_USAGE_GAME:
2140     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2141         mStreamType = AUDIO_STREAM_MUSIC;
2142         return;
2143     case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2144         mStreamType = AUDIO_STREAM_SYSTEM;
2145         return;
2146     case AUDIO_USAGE_VOICE_COMMUNICATION:
2147         mStreamType = AUDIO_STREAM_VOICE_CALL;
2148         return;
2149 
2150     case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2151         mStreamType = AUDIO_STREAM_DTMF;
2152         return;
2153 
2154     case AUDIO_USAGE_ALARM:
2155         mStreamType = AUDIO_STREAM_ALARM;
2156         return;
2157     case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2158         mStreamType = AUDIO_STREAM_RING;
2159         return;
2160 
2161     case AUDIO_USAGE_NOTIFICATION:
2162     case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2163     case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2164     case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2165     case AUDIO_USAGE_NOTIFICATION_EVENT:
2166         mStreamType = AUDIO_STREAM_NOTIFICATION;
2167         return;
2168 
2169     case AUDIO_USAGE_UNKNOWN:
2170     default:
2171         mStreamType = AUDIO_STREAM_MUSIC;
2172     }
2173 }
2174 
isValidAttributes(const audio_attributes_t * paa)2175 bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
2176     // has flags that map to a strategy?
2177     if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
2178         return true;
2179     }
2180 
2181     // has known usage?
2182     switch (paa->usage) {
2183     case AUDIO_USAGE_UNKNOWN:
2184     case AUDIO_USAGE_MEDIA:
2185     case AUDIO_USAGE_VOICE_COMMUNICATION:
2186     case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2187     case AUDIO_USAGE_ALARM:
2188     case AUDIO_USAGE_NOTIFICATION:
2189     case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2190     case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2191     case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2192     case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2193     case AUDIO_USAGE_NOTIFICATION_EVENT:
2194     case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2195     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2196     case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2197     case AUDIO_USAGE_GAME:
2198         break;
2199     default:
2200         return false;
2201     }
2202     return true;
2203 }
2204 // =========================================================================
2205 
binderDied(const wp<IBinder> & who __unused)2206 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2207 {
2208     sp<AudioTrack> audioTrack = mAudioTrack.promote();
2209     if (audioTrack != 0) {
2210         AutoMutex lock(audioTrack->mLock);
2211         audioTrack->mProxy->binderDied();
2212     }
2213 }
2214 
2215 // =========================================================================
2216 
AudioTrackThread(AudioTrack & receiver,bool bCanCallJava)2217 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2218     : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2219       mIgnoreNextPausedInt(false)
2220 {
2221 }
2222 
~AudioTrackThread()2223 AudioTrack::AudioTrackThread::~AudioTrackThread()
2224 {
2225 }
2226 
threadLoop()2227 bool AudioTrack::AudioTrackThread::threadLoop()
2228 {
2229     {
2230         AutoMutex _l(mMyLock);
2231         if (mPaused) {
2232             mMyCond.wait(mMyLock);
2233             // caller will check for exitPending()
2234             return true;
2235         }
2236         if (mIgnoreNextPausedInt) {
2237             mIgnoreNextPausedInt = false;
2238             mPausedInt = false;
2239         }
2240         if (mPausedInt) {
2241             if (mPausedNs > 0) {
2242                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2243             } else {
2244                 mMyCond.wait(mMyLock);
2245             }
2246             mPausedInt = false;
2247             return true;
2248         }
2249     }
2250     if (exitPending()) {
2251         return false;
2252     }
2253     nsecs_t ns = mReceiver.processAudioBuffer();
2254     switch (ns) {
2255     case 0:
2256         return true;
2257     case NS_INACTIVE:
2258         pauseInternal();
2259         return true;
2260     case NS_NEVER:
2261         return false;
2262     case NS_WHENEVER:
2263         // FIXME increase poll interval, or make event-driven
2264         ns = 1000000000LL;
2265         // fall through
2266     default:
2267         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2268         pauseInternal(ns);
2269         return true;
2270     }
2271 }
2272 
requestExit()2273 void AudioTrack::AudioTrackThread::requestExit()
2274 {
2275     // must be in this order to avoid a race condition
2276     Thread::requestExit();
2277     resume();
2278 }
2279 
pause()2280 void AudioTrack::AudioTrackThread::pause()
2281 {
2282     AutoMutex _l(mMyLock);
2283     mPaused = true;
2284 }
2285 
resume()2286 void AudioTrack::AudioTrackThread::resume()
2287 {
2288     AutoMutex _l(mMyLock);
2289     mIgnoreNextPausedInt = true;
2290     if (mPaused || mPausedInt) {
2291         mPaused = false;
2292         mPausedInt = false;
2293         mMyCond.signal();
2294     }
2295 }
2296 
pauseInternal(nsecs_t ns)2297 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2298 {
2299     AutoMutex _l(mMyLock);
2300     mPausedInt = true;
2301     mPausedNs = ns;
2302 }
2303 
2304 }; // namespace android
2305