• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 
19 #define LOG_TAG "AudioFlinger"
20 //#define LOG_NDEBUG 0
21 
22 #include <math.h>
23 #include <cutils/compiler.h>
24 #include <utils/Log.h>
25 
26 #include <private/media/AudioTrackShared.h>
27 
28 #include <common_time/cc_helper.h>
29 #include <common_time/local_clock.h>
30 
31 #include "AudioMixer.h"
32 #include "AudioFlinger.h"
33 #include "ServiceUtilities.h"
34 
35 #include <media/nbaio/Pipe.h>
36 #include <media/nbaio/PipeReader.h>
37 
38 // ----------------------------------------------------------------------------
39 
40 // Note: the following macro is used for extremely verbose logging message.  In
41 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
42 // 0; but one side effect of this is to turn all LOGV's as well.  Some messages
43 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
44 // turned on.  Do not uncomment the #def below unless you really know what you
45 // are doing and want to see all of the extremely verbose messages.
46 //#define VERY_VERY_VERBOSE_LOGGING
47 #ifdef VERY_VERY_VERBOSE_LOGGING
48 #define ALOGVV ALOGV
49 #else
50 #define ALOGVV(a...) do { } while(0)
51 #endif
52 
53 namespace android {
54 
55 // ----------------------------------------------------------------------------
56 //      TrackBase
57 // ----------------------------------------------------------------------------
58 
59 static volatile int32_t nextTrackId = 55;
60 
61 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(ThreadBase * thread,const sp<Client> & client,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const sp<IMemory> & sharedBuffer,int sessionId,bool isOut)62 AudioFlinger::ThreadBase::TrackBase::TrackBase(
63             ThreadBase *thread,
64             const sp<Client>& client,
65             uint32_t sampleRate,
66             audio_format_t format,
67             audio_channel_mask_t channelMask,
68             size_t frameCount,
69             const sp<IMemory>& sharedBuffer,
70             int sessionId,
71             bool isOut)
72     :   RefBase(),
73         mThread(thread),
74         mClient(client),
75         mCblk(NULL),
76         // mBuffer
77         // mBufferEnd
78         mStepCount(0),
79         mState(IDLE),
80         mSampleRate(sampleRate),
81         mFormat(format),
82         mChannelMask(channelMask),
83         mChannelCount(popcount(channelMask)),
84         mFrameSize(audio_is_linear_pcm(format) ?
85                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
86         mFrameCount(frameCount),
87         mStepServerFailed(false),
88         mSessionId(sessionId),
89         mIsOut(isOut),
90         mServerProxy(NULL),
91         mId(android_atomic_inc(&nextTrackId))
92 {
93     // client == 0 implies sharedBuffer == 0
94     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
95 
96     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
97             sharedBuffer->size());
98 
99     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
100     size_t size = sizeof(audio_track_cblk_t);
101     size_t bufferSize = frameCount * mFrameSize;
102     if (sharedBuffer == 0) {
103         size += bufferSize;
104     }
105 
106     if (client != 0) {
107         mCblkMemory = client->heap()->allocate(size);
108         if (mCblkMemory != 0) {
109             mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
110             // can't assume mCblk != NULL
111         } else {
112             ALOGE("not enough memory for AudioTrack size=%u", size);
113             client->heap()->dump("AudioTrack");
114             return;
115         }
116     } else {
117         // this syntax avoids calling the audio_track_cblk_t constructor twice
118         mCblk = (audio_track_cblk_t *) new uint8_t[size];
119         // assume mCblk != NULL
120     }
121 
122     // construct the shared structure in-place.
123     if (mCblk != NULL) {
124         new(mCblk) audio_track_cblk_t();
125         // clear all buffers
126         mCblk->frameCount_ = frameCount;
127 // uncomment the following lines to quickly test 32-bit wraparound
128 //      mCblk->user = 0xffff0000;
129 //      mCblk->server = 0xffff0000;
130 //      mCblk->userBase = 0xffff0000;
131 //      mCblk->serverBase = 0xffff0000;
132         if (sharedBuffer == 0) {
133             mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
134             memset(mBuffer, 0, bufferSize);
135             // Force underrun condition to avoid false underrun callback until first data is
136             // written to buffer (other flags are cleared)
137             mCblk->flags = CBLK_UNDERRUN;
138         } else {
139             mBuffer = sharedBuffer->pointer();
140         }
141         mBufferEnd = (uint8_t *)mBuffer + bufferSize;
142         mServerProxy = new ServerProxy(mCblk, mBuffer, frameCount, mFrameSize, isOut);
143 
144 #ifdef TEE_SINK
145         if (mTeeSinkTrackEnabled) {
146             NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount);
147             if (pipeFormat != Format_Invalid) {
148                 Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
149                 size_t numCounterOffers = 0;
150                 const NBAIO_Format offers[1] = {pipeFormat};
151                 ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
152                 ALOG_ASSERT(index == 0);
153                 PipeReader *pipeReader = new PipeReader(*pipe);
154                 numCounterOffers = 0;
155                 index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
156                 ALOG_ASSERT(index == 0);
157                 mTeeSink = pipe;
158                 mTeeSource = pipeReader;
159             }
160         }
161 #endif
162 
163     }
164 }
165 
~TrackBase()166 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
167 {
168 #ifdef TEE_SINK
169     dumpTee(-1, mTeeSource, mId);
170 #endif
171     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
172     delete mServerProxy;
173     if (mCblk != NULL) {
174         if (mClient == 0) {
175             delete mCblk;
176         } else {
177             mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
178         }
179     }
180     mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
181     if (mClient != 0) {
182         // Client destructor must run with AudioFlinger mutex locked
183         Mutex::Autolock _l(mClient->audioFlinger()->mLock);
184         // If the client's reference count drops to zero, the associated destructor
185         // must run with AudioFlinger lock held. Thus the explicit clear() rather than
186         // relying on the automatic clear() at end of scope.
187         mClient.clear();
188     }
189 }
190 
191 // AudioBufferProvider interface
192 // getNextBuffer() = 0;
193 // This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)194 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
195 {
196 #ifdef TEE_SINK
197     if (mTeeSink != 0) {
198         (void) mTeeSink->write(buffer->raw, buffer->frameCount);
199     }
200 #endif
201 
202     buffer->raw = NULL;
203     mStepCount = buffer->frameCount;
204     // FIXME See note at getNextBuffer()
205     (void) step();      // ignore return value of step()
206     buffer->frameCount = 0;
207 }
208 
step()209 bool AudioFlinger::ThreadBase::TrackBase::step() {
210     bool result = mServerProxy->step(mStepCount);
211     if (!result) {
212         ALOGV("stepServer failed acquiring cblk mutex");
213         mStepServerFailed = true;
214     }
215     return result;
216 }
217 
reset()218 void AudioFlinger::ThreadBase::TrackBase::reset() {
219     audio_track_cblk_t* cblk = this->cblk();
220 
221     cblk->user = 0;
222     cblk->server = 0;
223     cblk->userBase = 0;
224     cblk->serverBase = 0;
225     mStepServerFailed = false;
226     ALOGV("TrackBase::reset");
227 }
228 
sampleRate() const229 uint32_t AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
230     return mServerProxy->getSampleRate();
231 }
232 
getBuffer(uint32_t offset,uint32_t frames) const233 void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
234     audio_track_cblk_t* cblk = this->cblk();
235     int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase) * mFrameSize;
236     int8_t *bufferEnd = bufferStart + frames * mFrameSize;
237 
238     // Check validity of returned pointer in case the track control block would have been corrupted.
239     ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
240             "TrackBase::getBuffer buffer out of range:\n"
241                 "    start: %p, end %p , mBuffer %p mBufferEnd %p\n"
242                 "    server %u, serverBase %u, user %u, userBase %u, frameSize %u",
243                 bufferStart, bufferEnd, mBuffer, mBufferEnd,
244                 cblk->server, cblk->serverBase, cblk->user, cblk->userBase, mFrameSize);
245 
246     return bufferStart;
247 }
248 
setSyncEvent(const sp<SyncEvent> & event)249 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
250 {
251     mSyncEvents.add(event);
252     return NO_ERROR;
253 }
254 
255 // ----------------------------------------------------------------------------
256 //      Playback
257 // ----------------------------------------------------------------------------
258 
TrackHandle(const sp<AudioFlinger::PlaybackThread::Track> & track)259 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
260     : BnAudioTrack(),
261       mTrack(track)
262 {
263 }
264 
~TrackHandle()265 AudioFlinger::TrackHandle::~TrackHandle() {
266     // just stop the track on deletion, associated resources
267     // will be freed from the main thread once all pending buffers have
268     // been played. Unless it's not in the active track list, in which
269     // case we free everything now...
270     mTrack->destroy();
271 }
272 
getCblk() const273 sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
274     return mTrack->getCblk();
275 }
276 
start()277 status_t AudioFlinger::TrackHandle::start() {
278     return mTrack->start();
279 }
280 
stop()281 void AudioFlinger::TrackHandle::stop() {
282     mTrack->stop();
283 }
284 
flush()285 void AudioFlinger::TrackHandle::flush() {
286     mTrack->flush();
287 }
288 
pause()289 void AudioFlinger::TrackHandle::pause() {
290     mTrack->pause();
291 }
292 
attachAuxEffect(int EffectId)293 status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
294 {
295     return mTrack->attachAuxEffect(EffectId);
296 }
297 
allocateTimedBuffer(size_t size,sp<IMemory> * buffer)298 status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
299                                                          sp<IMemory>* buffer) {
300     if (!mTrack->isTimedTrack())
301         return INVALID_OPERATION;
302 
303     PlaybackThread::TimedTrack* tt =
304             reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
305     return tt->allocateTimedBuffer(size, buffer);
306 }
307 
queueTimedBuffer(const sp<IMemory> & buffer,int64_t pts)308 status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
309                                                      int64_t pts) {
310     if (!mTrack->isTimedTrack())
311         return INVALID_OPERATION;
312 
313     PlaybackThread::TimedTrack* tt =
314             reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
315     return tt->queueTimedBuffer(buffer, pts);
316 }
317 
setMediaTimeTransform(const LinearTransform & xform,int target)318 status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
319     const LinearTransform& xform, int target) {
320 
321     if (!mTrack->isTimedTrack())
322         return INVALID_OPERATION;
323 
324     PlaybackThread::TimedTrack* tt =
325             reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
326     return tt->setMediaTimeTransform(
327         xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
328 }
329 
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)330 status_t AudioFlinger::TrackHandle::onTransact(
331     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
332 {
333     return BnAudioTrack::onTransact(code, data, reply, flags);
334 }
335 
336 // ----------------------------------------------------------------------------
337 
338 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const sp<IMemory> & sharedBuffer,int sessionId,IAudioFlinger::track_flags_t flags)339 AudioFlinger::PlaybackThread::Track::Track(
340             PlaybackThread *thread,
341             const sp<Client>& client,
342             audio_stream_type_t streamType,
343             uint32_t sampleRate,
344             audio_format_t format,
345             audio_channel_mask_t channelMask,
346             size_t frameCount,
347             const sp<IMemory>& sharedBuffer,
348             int sessionId,
349             IAudioFlinger::track_flags_t flags)
350     :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
351             sessionId, true /*isOut*/),
352     mFillingUpStatus(FS_INVALID),
353     // mRetryCount initialized later when needed
354     mSharedBuffer(sharedBuffer),
355     mStreamType(streamType),
356     mName(-1),  // see note below
357     mMainBuffer(thread->mixBuffer()),
358     mAuxBuffer(NULL),
359     mAuxEffectId(0), mHasVolumeController(false),
360     mPresentationCompleteFrames(0),
361     mFlags(flags),
362     mFastIndex(-1),
363     mUnderrunCount(0),
364     mCachedVolume(1.0),
365     mIsInvalid(false)
366 {
367     if (mCblk != NULL) {
368         // to avoid leaking a track name, do not allocate one unless there is an mCblk
369         mName = thread->getTrackName_l(channelMask, sessionId);
370         mCblk->mName = mName;
371         if (mName < 0) {
372             ALOGE("no more track names available");
373             return;
374         }
375         // only allocate a fast track index if we were able to allocate a normal track name
376         if (flags & IAudioFlinger::TRACK_FAST) {
377             ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
378             int i = __builtin_ctz(thread->mFastTrackAvailMask);
379             ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
380             // FIXME This is too eager.  We allocate a fast track index before the
381             //       fast track becomes active.  Since fast tracks are a scarce resource,
382             //       this means we are potentially denying other more important fast tracks from
383             //       being created.  It would be better to allocate the index dynamically.
384             mFastIndex = i;
385             mCblk->mName = i;
386             // Read the initial underruns because this field is never cleared by the fast mixer
387             mObservedUnderruns = thread->getFastTrackUnderruns(i);
388             thread->mFastTrackAvailMask &= ~(1 << i);
389         }
390     }
391     ALOGV("Track constructor name %d, calling pid %d", mName,
392             IPCThreadState::self()->getCallingPid());
393 }
394 
~Track()395 AudioFlinger::PlaybackThread::Track::~Track()
396 {
397     ALOGV("PlaybackThread::Track destructor");
398 }
399 
destroy()400 void AudioFlinger::PlaybackThread::Track::destroy()
401 {
402     // NOTE: destroyTrack_l() can remove a strong reference to this Track
403     // by removing it from mTracks vector, so there is a risk that this Tracks's
404     // destructor is called. As the destructor needs to lock mLock,
405     // we must acquire a strong reference on this Track before locking mLock
406     // here so that the destructor is called only when exiting this function.
407     // On the other hand, as long as Track::destroy() is only called by
408     // TrackHandle destructor, the TrackHandle still holds a strong ref on
409     // this Track with its member mTrack.
410     sp<Track> keep(this);
411     { // scope for mLock
412         sp<ThreadBase> thread = mThread.promote();
413         if (thread != 0) {
414             if (!isOutputTrack()) {
415                 if (mState == ACTIVE || mState == RESUMING) {
416                     AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
417 
418 #ifdef ADD_BATTERY_DATA
419                     // to track the speaker usage
420                     addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
421 #endif
422                 }
423                 AudioSystem::releaseOutput(thread->id());
424             }
425             Mutex::Autolock _l(thread->mLock);
426             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
427             playbackThread->destroyTrack_l(this);
428         }
429     }
430 }
431 
appendDumpHeader(String8 & result)432 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
433 {
434     result.append("   Name Client Type Fmt Chn mask   Session StpCnt fCount S F SRate  "
435                   "L dB  R dB    Server      User     Main buf    Aux Buf  Flags Underruns\n");
436 }
437 
dump(char * buffer,size_t size)438 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
439 {
440     uint32_t vlr = mServerProxy->getVolumeLR();
441     if (isFastTrack()) {
442         sprintf(buffer, "   F %2d", mFastIndex);
443     } else {
444         sprintf(buffer, "   %4d", mName - AudioMixer::TRACK0);
445     }
446     track_state state = mState;
447     char stateChar;
448     switch (state) {
449     case IDLE:
450         stateChar = 'I';
451         break;
452     case TERMINATED:
453         stateChar = 'T';
454         break;
455     case STOPPING_1:
456         stateChar = 's';
457         break;
458     case STOPPING_2:
459         stateChar = '5';
460         break;
461     case STOPPED:
462         stateChar = 'S';
463         break;
464     case RESUMING:
465         stateChar = 'R';
466         break;
467     case ACTIVE:
468         stateChar = 'A';
469         break;
470     case PAUSING:
471         stateChar = 'p';
472         break;
473     case PAUSED:
474         stateChar = 'P';
475         break;
476     case FLUSHED:
477         stateChar = 'F';
478         break;
479     default:
480         stateChar = '?';
481         break;
482     }
483     char nowInUnderrun;
484     switch (mObservedUnderruns.mBitFields.mMostRecent) {
485     case UNDERRUN_FULL:
486         nowInUnderrun = ' ';
487         break;
488     case UNDERRUN_PARTIAL:
489         nowInUnderrun = '<';
490         break;
491     case UNDERRUN_EMPTY:
492         nowInUnderrun = '*';
493         break;
494     default:
495         nowInUnderrun = '?';
496         break;
497     }
498     snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g  "
499             "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
500             (mClient == 0) ? getpid_cached : mClient->pid(),
501             mStreamType,
502             mFormat,
503             mChannelMask,
504             mSessionId,
505             mStepCount,
506             mFrameCount,
507             stateChar,
508             mFillingUpStatus,
509             mServerProxy->getSampleRate(),
510             20.0 * log10((vlr & 0xFFFF) / 4096.0),
511             20.0 * log10((vlr >> 16) / 4096.0),
512             mCblk->server,
513             mCblk->user,
514             (int)mMainBuffer,
515             (int)mAuxBuffer,
516             mCblk->flags,
517             mUnderrunCount,
518             nowInUnderrun);
519 }
520 
521 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts)522 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
523         AudioBufferProvider::Buffer* buffer, int64_t pts)
524 {
525     audio_track_cblk_t* cblk = this->cblk();
526     uint32_t framesReady;
527     uint32_t framesReq = buffer->frameCount;
528 
529     // Check if last stepServer failed, try to step now
530     if (mStepServerFailed) {
531         // FIXME When called by fast mixer, this takes a mutex with tryLock().
532         //       Since the fast mixer is higher priority than client callback thread,
533         //       it does not result in priority inversion for client.
534         //       But a non-blocking solution would be preferable to avoid
535         //       fast mixer being unable to tryLock(), and
536         //       to avoid the extra context switches if the client wakes up,
537         //       discovers the mutex is locked, then has to wait for fast mixer to unlock.
538         if (!step())  goto getNextBuffer_exit;
539         ALOGV("stepServer recovered");
540         mStepServerFailed = false;
541     }
542 
543     // FIXME Same as above
544     framesReady = mServerProxy->framesReady();
545 
546     if (CC_LIKELY(framesReady)) {
547         uint32_t s = cblk->server;
548         uint32_t bufferEnd = cblk->serverBase + mFrameCount;
549 
550         bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
551         if (framesReq > framesReady) {
552             framesReq = framesReady;
553         }
554         if (framesReq > bufferEnd - s) {
555             framesReq = bufferEnd - s;
556         }
557 
558         buffer->raw = getBuffer(s, framesReq);
559         buffer->frameCount = framesReq;
560         return NO_ERROR;
561     }
562 
563 getNextBuffer_exit:
564     buffer->raw = NULL;
565     buffer->frameCount = 0;
566     ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
567     return NOT_ENOUGH_DATA;
568 }
569 
570 // Note that framesReady() takes a mutex on the control block using tryLock().
571 // This could result in priority inversion if framesReady() is called by the normal mixer,
572 // as the normal mixer thread runs at lower
573 // priority than the client's callback thread:  there is a short window within framesReady()
574 // during which the normal mixer could be preempted, and the client callback would block.
575 // Another problem can occur if framesReady() is called by the fast mixer:
576 // the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
577 // FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
framesReady() const578 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
579     return mServerProxy->framesReady();
580 }
581 
582 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const583 bool AudioFlinger::PlaybackThread::Track::isReady() const {
584     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
585         return true;
586     }
587 
588     if (framesReady() >= mFrameCount ||
589             (mCblk->flags & CBLK_FORCEREADY)) {
590         mFillingUpStatus = FS_FILLED;
591         android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
592         return true;
593     }
594     return false;
595 }
596 
start(AudioSystem::sync_event_t event,int triggerSession)597 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
598                                                     int triggerSession)
599 {
600     status_t status = NO_ERROR;
601     ALOGV("start(%d), calling pid %d session %d",
602             mName, IPCThreadState::self()->getCallingPid(), mSessionId);
603 
604     sp<ThreadBase> thread = mThread.promote();
605     if (thread != 0) {
606         Mutex::Autolock _l(thread->mLock);
607         track_state state = mState;
608         // here the track could be either new, or restarted
609         // in both cases "unstop" the track
610         if (state == PAUSED) {
611             mState = TrackBase::RESUMING;
612             ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
613         } else {
614             mState = TrackBase::ACTIVE;
615             ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
616         }
617 
618         if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
619             thread->mLock.unlock();
620             status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
621             thread->mLock.lock();
622 
623 #ifdef ADD_BATTERY_DATA
624             // to track the speaker usage
625             if (status == NO_ERROR) {
626                 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
627             }
628 #endif
629         }
630         if (status == NO_ERROR) {
631             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
632             playbackThread->addTrack_l(this);
633         } else {
634             mState = state;
635             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
636         }
637     } else {
638         status = BAD_VALUE;
639     }
640     return status;
641 }
642 
stop()643 void AudioFlinger::PlaybackThread::Track::stop()
644 {
645     ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
646     sp<ThreadBase> thread = mThread.promote();
647     if (thread != 0) {
648         Mutex::Autolock _l(thread->mLock);
649         track_state state = mState;
650         if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
651             // If the track is not active (PAUSED and buffers full), flush buffers
652             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
653             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
654                 reset();
655                 mState = STOPPED;
656             } else if (!isFastTrack()) {
657                 mState = STOPPED;
658             } else {
659                 // prepareTracks_l() will set state to STOPPING_2 after next underrun,
660                 // and then to STOPPED and reset() when presentation is complete
661                 mState = STOPPING_1;
662             }
663             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
664                     playbackThread);
665         }
666         if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
667             thread->mLock.unlock();
668             AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
669             thread->mLock.lock();
670 
671 #ifdef ADD_BATTERY_DATA
672             // to track the speaker usage
673             addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
674 #endif
675         }
676     }
677 }
678 
pause()679 void AudioFlinger::PlaybackThread::Track::pause()
680 {
681     ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
682     sp<ThreadBase> thread = mThread.promote();
683     if (thread != 0) {
684         Mutex::Autolock _l(thread->mLock);
685         if (mState == ACTIVE || mState == RESUMING) {
686             mState = PAUSING;
687             ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
688             if (!isOutputTrack()) {
689                 thread->mLock.unlock();
690                 AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
691                 thread->mLock.lock();
692 
693 #ifdef ADD_BATTERY_DATA
694                 // to track the speaker usage
695                 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
696 #endif
697             }
698         }
699     }
700 }
701 
flush()702 void AudioFlinger::PlaybackThread::Track::flush()
703 {
704     ALOGV("flush(%d)", mName);
705     sp<ThreadBase> thread = mThread.promote();
706     if (thread != 0) {
707         Mutex::Autolock _l(thread->mLock);
708         if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
709                 mState != PAUSING && mState != IDLE && mState != FLUSHED) {
710             return;
711         }
712         // No point remaining in PAUSED state after a flush => go to
713         // FLUSHED state
714         mState = FLUSHED;
715         // do not reset the track if it is still in the process of being stopped or paused.
716         // this will be done by prepareTracks_l() when the track is stopped.
717         // prepareTracks_l() will see mState == FLUSHED, then
718         // remove from active track list, reset(), and trigger presentation complete
719         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
720         if (playbackThread->mActiveTracks.indexOf(this) < 0) {
721             reset();
722         }
723     }
724 }
725 
reset()726 void AudioFlinger::PlaybackThread::Track::reset()
727 {
728     // Do not reset twice to avoid discarding data written just after a flush and before
729     // the audioflinger thread detects the track is stopped.
730     if (!mResetDone) {
731         TrackBase::reset();
732         // Force underrun condition to avoid false underrun callback until first data is
733         // written to buffer
734         android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
735         android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
736         mFillingUpStatus = FS_FILLING;
737         mResetDone = true;
738         if (mState == FLUSHED) {
739             mState = IDLE;
740         }
741     }
742 }
743 
attachAuxEffect(int EffectId)744 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
745 {
746     status_t status = DEAD_OBJECT;
747     sp<ThreadBase> thread = mThread.promote();
748     if (thread != 0) {
749         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
750         sp<AudioFlinger> af = mClient->audioFlinger();
751 
752         Mutex::Autolock _l(af->mLock);
753 
754         sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
755 
756         if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
757             Mutex::Autolock _dl(playbackThread->mLock);
758             Mutex::Autolock _sl(srcThread->mLock);
759             sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
760             if (chain == 0) {
761                 return INVALID_OPERATION;
762             }
763 
764             sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
765             if (effect == 0) {
766                 return INVALID_OPERATION;
767             }
768             srcThread->removeEffect_l(effect);
769             playbackThread->addEffect_l(effect);
770             // removeEffect_l() has stopped the effect if it was active so it must be restarted
771             if (effect->state() == EffectModule::ACTIVE ||
772                     effect->state() == EffectModule::STOPPING) {
773                 effect->start();
774             }
775 
776             sp<EffectChain> dstChain = effect->chain().promote();
777             if (dstChain == 0) {
778                 srcThread->addEffect_l(effect);
779                 return INVALID_OPERATION;
780             }
781             AudioSystem::unregisterEffect(effect->id());
782             AudioSystem::registerEffect(&effect->desc(),
783                                         srcThread->id(),
784                                         dstChain->strategy(),
785                                         AUDIO_SESSION_OUTPUT_MIX,
786                                         effect->id());
787         }
788         status = playbackThread->attachAuxEffect(this, EffectId);
789     }
790     return status;
791 }
792 
setAuxBuffer(int EffectId,int32_t * buffer)793 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
794 {
795     mAuxEffectId = EffectId;
796     mAuxBuffer = buffer;
797 }
798 
presentationComplete(size_t framesWritten,size_t audioHalFrames)799 bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
800                                                          size_t audioHalFrames)
801 {
802     // a track is considered presented when the total number of frames written to audio HAL
803     // corresponds to the number of frames written when presentationComplete() is called for the
804     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
805     if (mPresentationCompleteFrames == 0) {
806         mPresentationCompleteFrames = framesWritten + audioHalFrames;
807         ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
808                   mPresentationCompleteFrames, audioHalFrames);
809     }
810     if (framesWritten >= mPresentationCompleteFrames) {
811         ALOGV("presentationComplete() session %d complete: framesWritten %d",
812                   mSessionId, framesWritten);
813         triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
814         return true;
815     }
816     return false;
817 }
818 
triggerEvents(AudioSystem::sync_event_t type)819 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
820 {
821     for (int i = 0; i < (int)mSyncEvents.size(); i++) {
822         if (mSyncEvents[i]->type() == type) {
823             mSyncEvents[i]->trigger();
824             mSyncEvents.removeAt(i);
825             i--;
826         }
827     }
828 }
829 
830 // implement VolumeBufferProvider interface
831 
getVolumeLR()832 uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
833 {
834     // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
835     ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
836     uint32_t vlr = mServerProxy->getVolumeLR();
837     uint32_t vl = vlr & 0xFFFF;
838     uint32_t vr = vlr >> 16;
839     // track volumes come from shared memory, so can't be trusted and must be clamped
840     if (vl > MAX_GAIN_INT) {
841         vl = MAX_GAIN_INT;
842     }
843     if (vr > MAX_GAIN_INT) {
844         vr = MAX_GAIN_INT;
845     }
846     // now apply the cached master volume and stream type volume;
847     // this is trusted but lacks any synchronization or barrier so may be stale
848     float v = mCachedVolume;
849     vl *= v;
850     vr *= v;
851     // re-combine into U4.16
852     vlr = (vr << 16) | (vl & 0xFFFF);
853     // FIXME look at mute, pause, and stop flags
854     return vlr;
855 }
856 
setSyncEvent(const sp<SyncEvent> & event)857 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
858 {
859     if (mState == TERMINATED || mState == PAUSED ||
860             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
861                                       (mState == STOPPED)))) {
862         ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
863               mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
864         event->cancel();
865         return INVALID_OPERATION;
866     }
867     (void) TrackBase::setSyncEvent(event);
868     return NO_ERROR;
869 }
870 
invalidate()871 void AudioFlinger::PlaybackThread::Track::invalidate()
872 {
873     // FIXME should use proxy
874     android_atomic_or(CBLK_INVALID, &mCblk->flags);
875     mCblk->cv.signal();
876     mIsInvalid = true;
877 }
878 
879 // ----------------------------------------------------------------------------
880 
881 sp<AudioFlinger::PlaybackThread::TimedTrack>
create(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const sp<IMemory> & sharedBuffer,int sessionId)882 AudioFlinger::PlaybackThread::TimedTrack::create(
883             PlaybackThread *thread,
884             const sp<Client>& client,
885             audio_stream_type_t streamType,
886             uint32_t sampleRate,
887             audio_format_t format,
888             audio_channel_mask_t channelMask,
889             size_t frameCount,
890             const sp<IMemory>& sharedBuffer,
891             int sessionId) {
892     if (!client->reserveTimedTrack())
893         return 0;
894 
895     return new TimedTrack(
896         thread, client, streamType, sampleRate, format, channelMask, frameCount,
897         sharedBuffer, sessionId);
898 }
899 
TimedTrack(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const sp<IMemory> & sharedBuffer,int sessionId)900 AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
901             PlaybackThread *thread,
902             const sp<Client>& client,
903             audio_stream_type_t streamType,
904             uint32_t sampleRate,
905             audio_format_t format,
906             audio_channel_mask_t channelMask,
907             size_t frameCount,
908             const sp<IMemory>& sharedBuffer,
909             int sessionId)
910     : Track(thread, client, streamType, sampleRate, format, channelMask,
911             frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
912       mQueueHeadInFlight(false),
913       mTrimQueueHeadOnRelease(false),
914       mFramesPendingInQueue(0),
915       mTimedSilenceBuffer(NULL),
916       mTimedSilenceBufferSize(0),
917       mTimedAudioOutputOnTime(false),
918       mMediaTimeTransformValid(false)
919 {
920     LocalClock lc;
921     mLocalTimeFreq = lc.getLocalFreq();
922 
923     mLocalTimeToSampleTransform.a_zero = 0;
924     mLocalTimeToSampleTransform.b_zero = 0;
925     mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
926     mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
927     LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
928                             &mLocalTimeToSampleTransform.a_to_b_denom);
929 
930     mMediaTimeToSampleTransform.a_zero = 0;
931     mMediaTimeToSampleTransform.b_zero = 0;
932     mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
933     mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
934     LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
935                             &mMediaTimeToSampleTransform.a_to_b_denom);
936 }
937 
~TimedTrack()938 AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
939     mClient->releaseTimedTrack();
940     delete [] mTimedSilenceBuffer;
941 }
942 
allocateTimedBuffer(size_t size,sp<IMemory> * buffer)943 status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
944     size_t size, sp<IMemory>* buffer) {
945 
946     Mutex::Autolock _l(mTimedBufferQueueLock);
947 
948     trimTimedBufferQueue_l();
949 
950     // lazily initialize the shared memory heap for timed buffers
951     if (mTimedMemoryDealer == NULL) {
952         const int kTimedBufferHeapSize = 512 << 10;
953 
954         mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
955                                               "AudioFlingerTimed");
956         if (mTimedMemoryDealer == NULL)
957             return NO_MEMORY;
958     }
959 
960     sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
961     if (newBuffer == NULL) {
962         newBuffer = mTimedMemoryDealer->allocate(size);
963         if (newBuffer == NULL)
964             return NO_MEMORY;
965     }
966 
967     *buffer = newBuffer;
968     return NO_ERROR;
969 }
970 
971 // caller must hold mTimedBufferQueueLock
trimTimedBufferQueue_l()972 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
973     int64_t mediaTimeNow;
974     {
975         Mutex::Autolock mttLock(mMediaTimeTransformLock);
976         if (!mMediaTimeTransformValid)
977             return;
978 
979         int64_t targetTimeNow;
980         status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
981             ? mCCHelper.getCommonTime(&targetTimeNow)
982             : mCCHelper.getLocalTime(&targetTimeNow);
983 
984         if (OK != res)
985             return;
986 
987         if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
988                                                     &mediaTimeNow)) {
989             return;
990         }
991     }
992 
993     size_t trimEnd;
994     for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
995         int64_t bufEnd;
996 
997         if ((trimEnd + 1) < mTimedBufferQueue.size()) {
998             // We have a next buffer.  Just use its PTS as the PTS of the frame
999             // following the last frame in this buffer.  If the stream is sparse
1000             // (ie, there are deliberate gaps left in the stream which should be
1001             // filled with silence by the TimedAudioTrack), then this can result
1002             // in one extra buffer being left un-trimmed when it could have
1003             // been.  In general, this is not typical, and we would rather
1004             // optimized away the TS calculation below for the more common case
1005             // where PTSes are contiguous.
1006             bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
1007         } else {
1008             // We have no next buffer.  Compute the PTS of the frame following
1009             // the last frame in this buffer by computing the duration of of
1010             // this frame in media time units and adding it to the PTS of the
1011             // buffer.
1012             int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
1013                                / mFrameSize;
1014 
1015             if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
1016                                                                 &bufEnd)) {
1017                 ALOGE("Failed to convert frame count of %lld to media time"
1018                       " duration" " (scale factor %d/%u) in %s",
1019                       frameCount,
1020                       mMediaTimeToSampleTransform.a_to_b_numer,
1021                       mMediaTimeToSampleTransform.a_to_b_denom,
1022                       __PRETTY_FUNCTION__);
1023                 break;
1024             }
1025             bufEnd += mTimedBufferQueue[trimEnd].pts();
1026         }
1027 
1028         if (bufEnd > mediaTimeNow)
1029             break;
1030 
1031         // Is the buffer we want to use in the middle of a mix operation right
1032         // now?  If so, don't actually trim it.  Just wait for the releaseBuffer
1033         // from the mixer which should be coming back shortly.
1034         if (!trimEnd && mQueueHeadInFlight) {
1035             mTrimQueueHeadOnRelease = true;
1036         }
1037     }
1038 
1039     size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
1040     if (trimStart < trimEnd) {
1041         // Update the bookkeeping for framesReady()
1042         for (size_t i = trimStart; i < trimEnd; ++i) {
1043             updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
1044         }
1045 
1046         // Now actually remove the buffers from the queue.
1047         mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
1048     }
1049 }
1050 
trimTimedBufferQueueHead_l(const char * logTag)1051 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
1052         const char* logTag) {
1053     ALOG_ASSERT(mTimedBufferQueue.size() > 0,
1054                 "%s called (reason \"%s\"), but timed buffer queue has no"
1055                 " elements to trim.", __FUNCTION__, logTag);
1056 
1057     updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
1058     mTimedBufferQueue.removeAt(0);
1059 }
1060 
updateFramesPendingAfterTrim_l(const TimedBuffer & buf,const char * logTag)1061 void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
1062         const TimedBuffer& buf,
1063         const char* logTag) {
1064     uint32_t bufBytes        = buf.buffer()->size();
1065     uint32_t consumedAlready = buf.position();
1066 
1067     ALOG_ASSERT(consumedAlready <= bufBytes,
1068                 "Bad bookkeeping while updating frames pending.  Timed buffer is"
1069                 " only %u bytes long, but claims to have consumed %u"
1070                 " bytes.  (update reason: \"%s\")",
1071                 bufBytes, consumedAlready, logTag);
1072 
1073     uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
1074     ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
1075                 "Bad bookkeeping while updating frames pending.  Should have at"
1076                 " least %u queued frames, but we think we have only %u.  (update"
1077                 " reason: \"%s\")",
1078                 bufFrames, mFramesPendingInQueue, logTag);
1079 
1080     mFramesPendingInQueue -= bufFrames;
1081 }
1082 
queueTimedBuffer(const sp<IMemory> & buffer,int64_t pts)1083 status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
1084     const sp<IMemory>& buffer, int64_t pts) {
1085 
1086     {
1087         Mutex::Autolock mttLock(mMediaTimeTransformLock);
1088         if (!mMediaTimeTransformValid)
1089             return INVALID_OPERATION;
1090     }
1091 
1092     Mutex::Autolock _l(mTimedBufferQueueLock);
1093 
1094     uint32_t bufFrames = buffer->size() / mFrameSize;
1095     mFramesPendingInQueue += bufFrames;
1096     mTimedBufferQueue.add(TimedBuffer(buffer, pts));
1097 
1098     return NO_ERROR;
1099 }
1100 
setMediaTimeTransform(const LinearTransform & xform,TimedAudioTrack::TargetTimeline target)1101 status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
1102     const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
1103 
1104     ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
1105            xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
1106            target);
1107 
1108     if (!(target == TimedAudioTrack::LOCAL_TIME ||
1109           target == TimedAudioTrack::COMMON_TIME)) {
1110         return BAD_VALUE;
1111     }
1112 
1113     Mutex::Autolock lock(mMediaTimeTransformLock);
1114     mMediaTimeTransform = xform;
1115     mMediaTimeTransformTarget = target;
1116     mMediaTimeTransformValid = true;
1117 
1118     return NO_ERROR;
1119 }
1120 
1121 #define min(a, b) ((a) < (b) ? (a) : (b))
1122 
1123 // implementation of getNextBuffer for tracks whose buffers have timestamps
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts)1124 status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
1125     AudioBufferProvider::Buffer* buffer, int64_t pts)
1126 {
1127     if (pts == AudioBufferProvider::kInvalidPTS) {
1128         buffer->raw = NULL;
1129         buffer->frameCount = 0;
1130         mTimedAudioOutputOnTime = false;
1131         return INVALID_OPERATION;
1132     }
1133 
1134     Mutex::Autolock _l(mTimedBufferQueueLock);
1135 
1136     ALOG_ASSERT(!mQueueHeadInFlight,
1137                 "getNextBuffer called without releaseBuffer!");
1138 
1139     while (true) {
1140 
1141         // if we have no timed buffers, then fail
1142         if (mTimedBufferQueue.isEmpty()) {
1143             buffer->raw = NULL;
1144             buffer->frameCount = 0;
1145             return NOT_ENOUGH_DATA;
1146         }
1147 
1148         TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1149 
1150         // calculate the PTS of the head of the timed buffer queue expressed in
1151         // local time
1152         int64_t headLocalPTS;
1153         {
1154             Mutex::Autolock mttLock(mMediaTimeTransformLock);
1155 
1156             ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
1157 
1158             if (mMediaTimeTransform.a_to_b_denom == 0) {
1159                 // the transform represents a pause, so yield silence
1160                 timedYieldSilence_l(buffer->frameCount, buffer);
1161                 return NO_ERROR;
1162             }
1163 
1164             int64_t transformedPTS;
1165             if (!mMediaTimeTransform.doForwardTransform(head.pts(),
1166                                                         &transformedPTS)) {
1167                 // the transform failed.  this shouldn't happen, but if it does
1168                 // then just drop this buffer
1169                 ALOGW("timedGetNextBuffer transform failed");
1170                 buffer->raw = NULL;
1171                 buffer->frameCount = 0;
1172                 trimTimedBufferQueueHead_l("getNextBuffer; no transform");
1173                 return NO_ERROR;
1174             }
1175 
1176             if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
1177                 if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
1178                                                           &headLocalPTS)) {
1179                     buffer->raw = NULL;
1180                     buffer->frameCount = 0;
1181                     return INVALID_OPERATION;
1182                 }
1183             } else {
1184                 headLocalPTS = transformedPTS;
1185             }
1186         }
1187 
1188         // adjust the head buffer's PTS to reflect the portion of the head buffer
1189         // that has already been consumed
1190         int64_t effectivePTS = headLocalPTS +
1191                 ((head.position() / mFrameSize) * mLocalTimeFreq / sampleRate());
1192 
1193         // Calculate the delta in samples between the head of the input buffer
1194         // queue and the start of the next output buffer that will be written.
1195         // If the transformation fails because of over or underflow, it means
1196         // that the sample's position in the output stream is so far out of
1197         // whack that it should just be dropped.
1198         int64_t sampleDelta;
1199         if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
1200             ALOGV("*** head buffer is too far from PTS: dropped buffer");
1201             trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
1202                                        " mix");
1203             continue;
1204         }
1205         if (!mLocalTimeToSampleTransform.doForwardTransform(
1206                 (effectivePTS - pts) << 32, &sampleDelta)) {
1207             ALOGV("*** too late during sample rate transform: dropped buffer");
1208             trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
1209             continue;
1210         }
1211 
1212         ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
1213                " sampleDelta=[%d.%08x]",
1214                head.pts(), head.position(), pts,
1215                static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
1216                    + (sampleDelta >> 32)),
1217                static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
1218 
1219         // if the delta between the ideal placement for the next input sample and
1220         // the current output position is within this threshold, then we will
1221         // concatenate the next input samples to the previous output
1222         const int64_t kSampleContinuityThreshold =
1223                 (static_cast<int64_t>(sampleRate()) << 32) / 250;
1224 
1225         // if this is the first buffer of audio that we're emitting from this track
1226         // then it should be almost exactly on time.
1227         const int64_t kSampleStartupThreshold = 1LL << 32;
1228 
1229         if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
1230            (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
1231             // the next input is close enough to being on time, so concatenate it
1232             // with the last output
1233             timedYieldSamples_l(buffer);
1234 
1235             ALOGVV("*** on time: head.pos=%d frameCount=%u",
1236                     head.position(), buffer->frameCount);
1237             return NO_ERROR;
1238         }
1239 
1240         // Looks like our output is not on time.  Reset our on timed status.
1241         // Next time we mix samples from our input queue, then should be within
1242         // the StartupThreshold.
1243         mTimedAudioOutputOnTime = false;
1244         if (sampleDelta > 0) {
1245             // the gap between the current output position and the proper start of
1246             // the next input sample is too big, so fill it with silence
1247             uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
1248 
1249             timedYieldSilence_l(framesUntilNextInput, buffer);
1250             ALOGV("*** silence: frameCount=%u", buffer->frameCount);
1251             return NO_ERROR;
1252         } else {
1253             // the next input sample is late
1254             uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
1255             size_t onTimeSamplePosition =
1256                     head.position() + lateFrames * mFrameSize;
1257 
1258             if (onTimeSamplePosition > head.buffer()->size()) {
1259                 // all the remaining samples in the head are too late, so
1260                 // drop it and move on
1261                 ALOGV("*** too late: dropped buffer");
1262                 trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
1263                 continue;
1264             } else {
1265                 // skip over the late samples
1266                 head.setPosition(onTimeSamplePosition);
1267 
1268                 // yield the available samples
1269                 timedYieldSamples_l(buffer);
1270 
1271                 ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
1272                 return NO_ERROR;
1273             }
1274         }
1275     }
1276 }
1277 
1278 // Yield samples from the timed buffer queue head up to the given output
1279 // buffer's capacity.
1280 //
1281 // Caller must hold mTimedBufferQueueLock
timedYieldSamples_l(AudioBufferProvider::Buffer * buffer)1282 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
1283     AudioBufferProvider::Buffer* buffer) {
1284 
1285     const TimedBuffer& head = mTimedBufferQueue[0];
1286 
1287     buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
1288                    head.position());
1289 
1290     uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
1291                                  mFrameSize);
1292     size_t framesRequested = buffer->frameCount;
1293     buffer->frameCount = min(framesLeftInHead, framesRequested);
1294 
1295     mQueueHeadInFlight = true;
1296     mTimedAudioOutputOnTime = true;
1297 }
1298 
1299 // Yield samples of silence up to the given output buffer's capacity
1300 //
1301 // Caller must hold mTimedBufferQueueLock
timedYieldSilence_l(uint32_t numFrames,AudioBufferProvider::Buffer * buffer)1302 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
1303     uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
1304 
1305     // lazily allocate a buffer filled with silence
1306     if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
1307         delete [] mTimedSilenceBuffer;
1308         mTimedSilenceBufferSize = numFrames * mFrameSize;
1309         mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
1310         memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
1311     }
1312 
1313     buffer->raw = mTimedSilenceBuffer;
1314     size_t framesRequested = buffer->frameCount;
1315     buffer->frameCount = min(numFrames, framesRequested);
1316 
1317     mTimedAudioOutputOnTime = false;
1318 }
1319 
1320 // AudioBufferProvider interface
releaseBuffer(AudioBufferProvider::Buffer * buffer)1321 void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
1322     AudioBufferProvider::Buffer* buffer) {
1323 
1324     Mutex::Autolock _l(mTimedBufferQueueLock);
1325 
1326     // If the buffer which was just released is part of the buffer at the head
1327     // of the queue, be sure to update the amt of the buffer which has been
1328     // consumed.  If the buffer being returned is not part of the head of the
1329     // queue, its either because the buffer is part of the silence buffer, or
1330     // because the head of the timed queue was trimmed after the mixer called
1331     // getNextBuffer but before the mixer called releaseBuffer.
1332     if (buffer->raw == mTimedSilenceBuffer) {
1333         ALOG_ASSERT(!mQueueHeadInFlight,
1334                     "Queue head in flight during release of silence buffer!");
1335         goto done;
1336     }
1337 
1338     ALOG_ASSERT(mQueueHeadInFlight,
1339                 "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
1340                 " head in flight.");
1341 
1342     if (mTimedBufferQueue.size()) {
1343         TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1344 
1345         void* start = head.buffer()->pointer();
1346         void* end   = reinterpret_cast<void*>(
1347                         reinterpret_cast<uint8_t*>(head.buffer()->pointer())
1348                         + head.buffer()->size());
1349 
1350         ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
1351                     "released buffer not within the head of the timed buffer"
1352                     " queue; qHead = [%p, %p], released buffer = %p",
1353                     start, end, buffer->raw);
1354 
1355         head.setPosition(head.position() +
1356                 (buffer->frameCount * mFrameSize));
1357         mQueueHeadInFlight = false;
1358 
1359         ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
1360                     "Bad bookkeeping during releaseBuffer!  Should have at"
1361                     " least %u queued frames, but we think we have only %u",
1362                     buffer->frameCount, mFramesPendingInQueue);
1363 
1364         mFramesPendingInQueue -= buffer->frameCount;
1365 
1366         if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
1367             || mTrimQueueHeadOnRelease) {
1368             trimTimedBufferQueueHead_l("releaseBuffer");
1369             mTrimQueueHeadOnRelease = false;
1370         }
1371     } else {
1372         LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
1373                   " buffers in the timed buffer queue");
1374     }
1375 
1376 done:
1377     buffer->raw = 0;
1378     buffer->frameCount = 0;
1379 }
1380 
framesReady() const1381 size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
1382     Mutex::Autolock _l(mTimedBufferQueueLock);
1383     return mFramesPendingInQueue;
1384 }
1385 
TimedBuffer()1386 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
1387         : mPTS(0), mPosition(0) {}
1388 
TimedBuffer(const sp<IMemory> & buffer,int64_t pts)1389 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
1390     const sp<IMemory>& buffer, int64_t pts)
1391         : mBuffer(buffer), mPTS(pts), mPosition(0) {}
1392 
1393 
1394 // ----------------------------------------------------------------------------
1395 
OutputTrack(PlaybackThread * playbackThread,DuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount)1396 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1397             PlaybackThread *playbackThread,
1398             DuplicatingThread *sourceThread,
1399             uint32_t sampleRate,
1400             audio_format_t format,
1401             audio_channel_mask_t channelMask,
1402             size_t frameCount)
1403     :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
1404                 NULL, 0, IAudioFlinger::TRACK_DEFAULT),
1405     mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
1406 {
1407 
1408     if (mCblk != NULL) {
1409         mOutBuffer.frameCount = 0;
1410         playbackThread->mTracks.add(this);
1411         ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
1412                 "mCblk->frameCount_ %u, mChannelMask 0x%08x mBufferEnd %p",
1413                 mCblk, mBuffer,
1414                 mCblk->frameCount_, mChannelMask, mBufferEnd);
1415         // since client and server are in the same process,
1416         // the buffer has the same virtual address on both sides
1417         mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
1418         mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000));
1419         mClientProxy->setSendLevel(0.0);
1420         mClientProxy->setSampleRate(sampleRate);
1421     } else {
1422         ALOGW("Error creating output track on thread %p", playbackThread);
1423     }
1424 }
1425 
~OutputTrack()1426 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1427 {
1428     clearBufferQueue();
1429     delete mClientProxy;
1430     // superclass destructor will now delete the server proxy and shared memory both refer to
1431 }
1432 
start(AudioSystem::sync_event_t event,int triggerSession)1433 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1434                                                           int triggerSession)
1435 {
1436     status_t status = Track::start(event, triggerSession);
1437     if (status != NO_ERROR) {
1438         return status;
1439     }
1440 
1441     mActive = true;
1442     mRetryCount = 127;
1443     return status;
1444 }
1445 
stop()1446 void AudioFlinger::PlaybackThread::OutputTrack::stop()
1447 {
1448     Track::stop();
1449     clearBufferQueue();
1450     mOutBuffer.frameCount = 0;
1451     mActive = false;
1452 }
1453 
write(int16_t * data,uint32_t frames)1454 bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
1455 {
1456     Buffer *pInBuffer;
1457     Buffer inBuffer;
1458     uint32_t channelCount = mChannelCount;
1459     bool outputBufferFull = false;
1460     inBuffer.frameCount = frames;
1461     inBuffer.i16 = data;
1462 
1463     uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1464 
1465     if (!mActive && frames != 0) {
1466         start();
1467         sp<ThreadBase> thread = mThread.promote();
1468         if (thread != 0) {
1469             MixerThread *mixerThread = (MixerThread *)thread.get();
1470             if (mFrameCount > frames) {
1471                 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1472                     uint32_t startFrames = (mFrameCount - frames);
1473                     pInBuffer = new Buffer;
1474                     pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
1475                     pInBuffer->frameCount = startFrames;
1476                     pInBuffer->i16 = pInBuffer->mBuffer;
1477                     memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
1478                     mBufferQueue.add(pInBuffer);
1479                 } else {
1480                     ALOGW ("OutputTrack::write() %p no more buffers in queue", this);
1481                 }
1482             }
1483         }
1484     }
1485 
1486     while (waitTimeLeftMs) {
1487         // First write pending buffers, then new data
1488         if (mBufferQueue.size()) {
1489             pInBuffer = mBufferQueue.itemAt(0);
1490         } else {
1491             pInBuffer = &inBuffer;
1492         }
1493 
1494         if (pInBuffer->frameCount == 0) {
1495             break;
1496         }
1497 
1498         if (mOutBuffer.frameCount == 0) {
1499             mOutBuffer.frameCount = pInBuffer->frameCount;
1500             nsecs_t startTime = systemTime();
1501             if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
1502                 ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this,
1503                         mThread.unsafe_get());
1504                 outputBufferFull = true;
1505                 break;
1506             }
1507             uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1508             if (waitTimeLeftMs >= waitTimeMs) {
1509                 waitTimeLeftMs -= waitTimeMs;
1510             } else {
1511                 waitTimeLeftMs = 0;
1512             }
1513         }
1514 
1515         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1516                 pInBuffer->frameCount;
1517         memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
1518         mClientProxy->stepUser(outFrames);
1519         pInBuffer->frameCount -= outFrames;
1520         pInBuffer->i16 += outFrames * channelCount;
1521         mOutBuffer.frameCount -= outFrames;
1522         mOutBuffer.i16 += outFrames * channelCount;
1523 
1524         if (pInBuffer->frameCount == 0) {
1525             if (mBufferQueue.size()) {
1526                 mBufferQueue.removeAt(0);
1527                 delete [] pInBuffer->mBuffer;
1528                 delete pInBuffer;
1529                 ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
1530                         mThread.unsafe_get(), mBufferQueue.size());
1531             } else {
1532                 break;
1533             }
1534         }
1535     }
1536 
1537     // If we could not write all frames, allocate a buffer and queue it for next time.
1538     if (inBuffer.frameCount) {
1539         sp<ThreadBase> thread = mThread.promote();
1540         if (thread != 0 && !thread->standby()) {
1541             if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1542                 pInBuffer = new Buffer;
1543                 pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
1544                 pInBuffer->frameCount = inBuffer.frameCount;
1545                 pInBuffer->i16 = pInBuffer->mBuffer;
1546                 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
1547                         sizeof(int16_t));
1548                 mBufferQueue.add(pInBuffer);
1549                 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
1550                         mThread.unsafe_get(), mBufferQueue.size());
1551             } else {
1552                 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
1553                         mThread.unsafe_get(), this);
1554             }
1555         }
1556     }
1557 
1558     // Calling write() with a 0 length buffer, means that no more data will be written:
1559     // If no more buffers are pending, fill output track buffer to make sure it is started
1560     // by output mixer.
1561     if (frames == 0 && mBufferQueue.size() == 0) {
1562         if (mCblk->user < mFrameCount) {
1563             frames = mFrameCount - mCblk->user;
1564             pInBuffer = new Buffer;
1565             pInBuffer->mBuffer = new int16_t[frames * channelCount];
1566             pInBuffer->frameCount = frames;
1567             pInBuffer->i16 = pInBuffer->mBuffer;
1568             memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
1569             mBufferQueue.add(pInBuffer);
1570         } else if (mActive) {
1571             stop();
1572         }
1573     }
1574 
1575     return outputBufferFull;
1576 }
1577 
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)1578 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
1579         AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
1580 {
1581     audio_track_cblk_t* cblk = mCblk;
1582     uint32_t framesReq = buffer->frameCount;
1583 
1584     ALOGVV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
1585     buffer->frameCount  = 0;
1586 
1587     size_t framesAvail;
1588     {
1589         Mutex::Autolock _l(cblk->lock);
1590 
1591         // read the server count again
1592         while (!(framesAvail = mClientProxy->framesAvailable_l())) {
1593             if (CC_UNLIKELY(!mActive)) {
1594                 ALOGV("Not active and NO_MORE_BUFFERS");
1595                 return NO_MORE_BUFFERS;
1596             }
1597             status_t result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
1598             if (result != NO_ERROR) {
1599                 return NO_MORE_BUFFERS;
1600             }
1601         }
1602     }
1603 
1604     if (framesReq > framesAvail) {
1605         framesReq = framesAvail;
1606     }
1607 
1608     uint32_t u = cblk->user;
1609     uint32_t bufferEnd = cblk->userBase + mFrameCount;
1610 
1611     if (framesReq > bufferEnd - u) {
1612         framesReq = bufferEnd - u;
1613     }
1614 
1615     buffer->frameCount  = framesReq;
1616     buffer->raw         = mClientProxy->buffer(u);
1617     return NO_ERROR;
1618 }
1619 
1620 
clearBufferQueue()1621 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
1622 {
1623     size_t size = mBufferQueue.size();
1624 
1625     for (size_t i = 0; i < size; i++) {
1626         Buffer *pBuffer = mBufferQueue.itemAt(i);
1627         delete [] pBuffer->mBuffer;
1628         delete pBuffer;
1629     }
1630     mBufferQueue.clear();
1631 }
1632 
1633 
1634 // ----------------------------------------------------------------------------
1635 //      Record
1636 // ----------------------------------------------------------------------------
1637 
RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack> & recordTrack)1638 AudioFlinger::RecordHandle::RecordHandle(
1639         const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
1640     : BnAudioRecord(),
1641     mRecordTrack(recordTrack)
1642 {
1643 }
1644 
~RecordHandle()1645 AudioFlinger::RecordHandle::~RecordHandle() {
1646     stop_nonvirtual();
1647     mRecordTrack->destroy();
1648 }
1649 
getCblk() const1650 sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
1651     return mRecordTrack->getCblk();
1652 }
1653 
start(int event,int triggerSession)1654 status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
1655         int triggerSession) {
1656     ALOGV("RecordHandle::start()");
1657     return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
1658 }
1659 
stop()1660 void AudioFlinger::RecordHandle::stop() {
1661     stop_nonvirtual();
1662 }
1663 
stop_nonvirtual()1664 void AudioFlinger::RecordHandle::stop_nonvirtual() {
1665     ALOGV("RecordHandle::stop()");
1666     mRecordTrack->stop();
1667 }
1668 
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)1669 status_t AudioFlinger::RecordHandle::onTransact(
1670     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
1671 {
1672     return BnAudioRecord::onTransact(code, data, reply, flags);
1673 }
1674 
1675 // ----------------------------------------------------------------------------
1676 
1677 // RecordTrack constructor must be called with AudioFlinger::mLock held
RecordTrack(RecordThread * thread,const sp<Client> & client,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,int sessionId)1678 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
1679             RecordThread *thread,
1680             const sp<Client>& client,
1681             uint32_t sampleRate,
1682             audio_format_t format,
1683             audio_channel_mask_t channelMask,
1684             size_t frameCount,
1685             int sessionId)
1686     :   TrackBase(thread, client, sampleRate, format,
1687                   channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, false /*isOut*/),
1688         mOverflow(false)
1689 {
1690     ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
1691 }
1692 
~RecordTrack()1693 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
1694 {
1695     ALOGV("%s", __func__);
1696 }
1697 
1698 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts)1699 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
1700         int64_t pts)
1701 {
1702     audio_track_cblk_t* cblk = this->cblk();
1703     uint32_t framesAvail;
1704     uint32_t framesReq = buffer->frameCount;
1705 
1706     // Check if last stepServer failed, try to step now
1707     if (mStepServerFailed) {
1708         if (!step()) {
1709             goto getNextBuffer_exit;
1710         }
1711         ALOGV("stepServer recovered");
1712         mStepServerFailed = false;
1713     }
1714 
1715     // FIXME lock is not actually held, so overrun is possible
1716     framesAvail = mServerProxy->framesAvailableIn_l();
1717 
1718     if (CC_LIKELY(framesAvail)) {
1719         uint32_t s = cblk->server;
1720         uint32_t bufferEnd = cblk->serverBase + mFrameCount;
1721 
1722         if (framesReq > framesAvail) {
1723             framesReq = framesAvail;
1724         }
1725         if (framesReq > bufferEnd - s) {
1726             framesReq = bufferEnd - s;
1727         }
1728 
1729         buffer->raw = getBuffer(s, framesReq);
1730         buffer->frameCount = framesReq;
1731         return NO_ERROR;
1732     }
1733 
1734 getNextBuffer_exit:
1735     buffer->raw = NULL;
1736     buffer->frameCount = 0;
1737     return NOT_ENOUGH_DATA;
1738 }
1739 
start(AudioSystem::sync_event_t event,int triggerSession)1740 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
1741                                                         int triggerSession)
1742 {
1743     sp<ThreadBase> thread = mThread.promote();
1744     if (thread != 0) {
1745         RecordThread *recordThread = (RecordThread *)thread.get();
1746         return recordThread->start(this, event, triggerSession);
1747     } else {
1748         return BAD_VALUE;
1749     }
1750 }
1751 
stop()1752 void AudioFlinger::RecordThread::RecordTrack::stop()
1753 {
1754     sp<ThreadBase> thread = mThread.promote();
1755     if (thread != 0) {
1756         RecordThread *recordThread = (RecordThread *)thread.get();
1757         recordThread->mLock.lock();
1758         bool doStop = recordThread->stop_l(this);
1759         if (doStop) {
1760             TrackBase::reset();
1761             // Force overrun condition to avoid false overrun callback until first data is
1762             // read from buffer
1763             android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
1764         }
1765         recordThread->mLock.unlock();
1766         if (doStop) {
1767             AudioSystem::stopInput(recordThread->id());
1768         }
1769     }
1770 }
1771 
destroy()1772 void AudioFlinger::RecordThread::RecordTrack::destroy()
1773 {
1774     // see comments at AudioFlinger::PlaybackThread::Track::destroy()
1775     sp<RecordTrack> keep(this);
1776     {
1777         sp<ThreadBase> thread = mThread.promote();
1778         if (thread != 0) {
1779             if (mState == ACTIVE || mState == RESUMING) {
1780                 AudioSystem::stopInput(thread->id());
1781             }
1782             AudioSystem::releaseInput(thread->id());
1783             Mutex::Autolock _l(thread->mLock);
1784             RecordThread *recordThread = (RecordThread *) thread.get();
1785             recordThread->destroyTrack_l(this);
1786         }
1787     }
1788 }
1789 
1790 
appendDumpHeader(String8 & result)1791 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
1792 {
1793     result.append("   Clien Fmt Chn mask   Session Step S Serv     User   FrameCount\n");
1794 }
1795 
dump(char * buffer,size_t size)1796 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
1797 {
1798     snprintf(buffer, size, "   %05d %03u 0x%08x %05d   %04u %01d %08x %08x %05d\n",
1799             (mClient == 0) ? getpid_cached : mClient->pid(),
1800             mFormat,
1801             mChannelMask,
1802             mSessionId,
1803             mStepCount,
1804             mState,
1805             mCblk->server,
1806             mCblk->user,
1807             mFrameCount);
1808 }
1809 
1810 }; // namespace android
1811