• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 #define LOG_TAG "AudioHAL:AudioStreamOut"
19 
20 #include <utils/Log.h>
21 
22 #include "AudioHardwareOutput.h"
23 #include "AudioStreamOut.h"
24 
25 // Set to 1 to print timestamp data in CSV format.
26 #ifndef HAL_PRINT_TIMESTAMP_CSV
27 #define HAL_PRINT_TIMESTAMP_CSV 0
28 #endif
29 
30 //#define VERY_VERBOSE_LOGGING
31 #ifdef VERY_VERBOSE_LOGGING
32 #define ALOGVV ALOGV
33 #else
34 #define ALOGVV(a...) do { } while(0)
35 #endif
36 
37 namespace android {
38 
AudioStreamOut(AudioHardwareOutput & owner,bool mcOut)39 AudioStreamOut::AudioStreamOut(AudioHardwareOutput& owner, bool mcOut)
40     : mFramesPresented(0)
41     , mFramesRendered(0)
42     , mFramesWrittenRemainder(0)
43     , mOwnerHAL(owner)
44     , mFramesWritten(0)
45     , mTgtDevices(0)
46     , mAudioFlingerTgtDevices(0)
47     , mIsMCOutput(mcOut)
48     , mIsEncoded(false)
49     , mInStandby(false)
50     , mSPDIFEncoder(this)
51 {
52     assert(mLocalClock.initCheck());
53 
54     mPhysOutputs.setCapacity(3);
55 
56     // Set some reasonable defaults for these.  All of this should be eventually
57     // be overwritten by a specific audio flinger configuration, but it does not
58     // hurt to have something here by default.
59     mInputSampleRate = 48000;
60     mInputChanMask = AUDIO_CHANNEL_OUT_STEREO;
61     mInputFormat = AUDIO_FORMAT_PCM_16_BIT;
62     mInputNominalChunksInFlight = 4;
63     updateInputNums();
64 
65     mThrottleValid = false;
66 
67     memset(&mUSecToLocalTime, 0, sizeof(mUSecToLocalTime));
68     mUSecToLocalTime.a_to_b_numer = mLocalClock.getLocalFreq();
69     mUSecToLocalTime.a_to_b_denom = 1000000;
70     LinearTransform::reduce(&mUSecToLocalTime.a_to_b_numer,
71                             &mUSecToLocalTime.a_to_b_denom);
72 }
73 
~AudioStreamOut()74 AudioStreamOut::~AudioStreamOut()
75 {
76     releaseAllOutputs();
77 }
78 
set(audio_format_t * pFormat,uint32_t * pChannels,uint32_t * pRate)79 status_t AudioStreamOut::set(
80         audio_format_t *pFormat,
81         uint32_t *pChannels,
82         uint32_t *pRate)
83 {
84     Mutex::Autolock _l(mLock);
85     audio_format_t lFormat   = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
86     uint32_t       lChannels = pChannels ? *pChannels : 0;
87     uint32_t       lRate     = pRate ? *pRate : 0;
88 
89     // fix up defaults
90     if (lFormat == AUDIO_FORMAT_DEFAULT) lFormat = format();
91     if (lChannels == 0)                  lChannels = chanMask();
92     if (lRate == 0)                      lRate = sampleRate();
93 
94     if (pFormat)   *pFormat   = lFormat;
95     if (pChannels) *pChannels = lChannels;
96     if (pRate)     *pRate     = lRate;
97 
98     mIsEncoded = !audio_is_linear_pcm(lFormat);
99 
100     if (!mIsMCOutput && !mIsEncoded) {
101         // If this is the primary stream out, then demand our defaults.
102         if ((lFormat   != format()) ||
103             (lChannels != chanMask()) ||
104             (lRate     != sampleRate()))
105             return BAD_VALUE;
106     } else {
107         // Else check to see if our HDMI sink supports this format before proceeding.
108         if (!mOwnerHAL.getHDMIAudioCaps().supportsFormat(lFormat,
109                                                      lRate,
110                                                      audio_channel_count_from_out_mask(lChannels)))
111             return BAD_VALUE;
112     }
113 
114     mInputFormat = lFormat;
115     mInputChanMask = lChannels;
116     mInputSampleRate = lRate;
117     ALOGI("AudioStreamOut::set: lRate = %u, mIsEncoded = %d\n", lRate, mIsEncoded);
118     updateInputNums();
119 
120     return NO_ERROR;
121 }
122 
setTgtDevices(uint32_t tgtDevices)123 void AudioStreamOut::setTgtDevices(uint32_t tgtDevices)
124 {
125     Mutex::Autolock _l(mRoutingLock);
126     if (mTgtDevices != tgtDevices) {
127         mTgtDevices = tgtDevices;
128     }
129 }
130 
standby()131 status_t AudioStreamOut::standby()
132 {
133     mFramesRendered = 0;
134     releaseAllOutputs();
135     mOwnerHAL.standbyStatusUpdate(true, mIsMCOutput);
136     mInStandby = true;
137 
138     return NO_ERROR;
139 }
140 
releaseAllOutputs()141 void AudioStreamOut::releaseAllOutputs() {
142     Mutex::Autolock _l(mRoutingLock);
143 
144     ALOGI("releaseAllOutputs: releasing %d mPhysOutputs", mPhysOutputs.size());
145     AudioOutputList::iterator I;
146     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I)
147         mOwnerHAL.releaseOutput(*this, *I);
148 
149     mPhysOutputs.clear();
150 }
151 
updateInputNums()152 void AudioStreamOut::updateInputNums()
153 {
154     assert(mLocalClock.initCheck());
155 
156     // mInputBufSize determines how many audio frames AudioFlinger is going to
157     // mix at a time.  We also use the mInputBufSize to determine the ALSA
158     // period_size, the number of of samples which need to play out (at most)
159     // before low level ALSA driver code is required to wake up upper levels of
160     // SW to fill a new buffer.  As it turns out, ALSA is going to apply some
161     // rules and modify the period_size which we pass to it.  One of the things
162     // ALSA seems to do is attempt to round the period_size up to a value which
163     // will make the period an integral number of 0.5 mSec.  This round-up
164     // behavior can cause the low levels of ALSA to consume more data per period
165     // than the AudioFlinger mixer has been told to produce.  If there are only
166     // two buffers in flight at any given point in time, this can lead to a
167     // situation where the pipeline ends up slipping an extra buffer and
168     // underflowing.  There are two approaches to mitigate this, both of which
169     // are implemented in this HAL...
170     //
171     // 1) Try as hard as possible to make certain that the buffer size we choose
172     //    results in a period_size which is not going to get rounded up by ALSA.
173     //    This means that we want a buffer size which at the chosen sample rate
174     //    and frame size will be an integral multiple of 1/2 mSec.
175     // 2) Increate the number of chunks we keep in flight.  If the system slips
176     //    a single period, its only really a problem if there is no data left in
177     //    the pipeline waiting to be played out.  The mixer should going to mix
178     //    as fast as possible until the buffer has been topped off.  By
179     //    decreasing the buffer size and increasing the number of buffers in
180     //    flight, we increase the number of interrups and mix events per second,
181     //    but buy ourselves some insurance against the negative side effects of
182     //    slipping one buffer in the schedule.  We end up using 4 buffers at
183     //    10mSec, making the total audio latency somewhere between 40 and 50
184     //    mSec, depending on when a sample begins playback relative to
185     //    AudioFlinger's mixing schedule.
186     //
187     mInputChanCount = audio_channel_count_from_out_mask(mInputChanMask);
188 
189     // Picking a chunk duration 10mSec should satisfy #1 for both major families
190     // of audio sample rates (the 44.1K and 48K families).  In the case of 44.1
191     // (or higher) we will end up with a multiple of 441 frames of audio per
192     // chunk, while for 48K, we will have a multiple of 480 frames of audio per
193     // chunk.  This will not work well for lower sample rates in the 44.1 family
194     // (22.05K and 11.025K); it is unlikely that we will ever be configured to
195     // deliver those rates, and if we ever do, we will need to rely on having
196     // extra chunks in flight to deal with the jitter problem described above.
197     mInputChunkFrames = outputSampleRate() / 100;
198 
199     // FIXME: Currently, audio flinger demands an input buffer size which is a
200     // multiple of 16 audio frames.  Right now, there is no good way to
201     // reconcile this with ALSA round-up behavior described above when the
202     // desired sample rate is a member of the 44.1 family.  For now, we just
203     // round up to the nearest multiple of 16 frames and roll the dice, but
204     // someday it would be good to fix one or the other halves of the problem
205     // (either ALSA or AudioFlinger)
206     mInputChunkFrames = (mInputChunkFrames + 0xF) & ~0xF;
207 
208     ALOGD("AudioStreamOut::updateInputNums: chunk size %u from output rate %u\n",
209         mInputChunkFrames, outputSampleRate());
210 
211     // Buffer size is just the frame size multiplied by the number of
212     // frames per chunk.
213     mInputBufSize = mInputChunkFrames * getBytesPerOutputFrame();
214 
215     // The nominal latency is just the duration of a chunk * the number of
216     // chunks we nominally keep in flight at any given point in time.
217     mInputNominalLatencyUSec = static_cast<uint32_t>(((
218                     static_cast<uint64_t>(mInputChunkFrames)
219                     * 1000000 * mInputNominalChunksInFlight)
220                     / mInputSampleRate));
221 
222     memset(&mLocalTimeToFrames, 0, sizeof(mLocalTimeToFrames));
223     mLocalTimeToFrames.a_to_b_numer = mInputSampleRate;
224     mLocalTimeToFrames.a_to_b_denom = mLocalClock.getLocalFreq();
225     LinearTransform::reduce(
226             &mLocalTimeToFrames.a_to_b_numer,
227             &mLocalTimeToFrames.a_to_b_denom);
228 }
229 
finishedWriteOp(size_t framesWritten,bool needThrottle)230 void AudioStreamOut::finishedWriteOp(size_t framesWritten,
231                                      bool needThrottle)
232 {
233     assert(mLocalClock.initCheck());
234 
235     int64_t now = mLocalClock.getLocalTime();
236 
237     if (!mThrottleValid || !needThrottle) {
238         mThrottleValid = true;
239         mWriteStartLT  = now;
240         mFramesWritten = 0;
241     }
242 
243     size_t framesWrittenAppRate;
244     uint32_t multiplier = getRateMultiplier();
245     if (multiplier != 1) {
246         // Accumulate round-off error from previous call.
247         framesWritten += mFramesWrittenRemainder;
248         // Scale from device sample rate to application rate.
249         framesWrittenAppRate = framesWritten / multiplier;
250         ALOGV("finishedWriteOp() framesWrittenAppRate = %d = %d / %d\n",
251             framesWrittenAppRate, framesWritten, multiplier);
252         // Save remainder for next time to prevent error accumulation.
253         mFramesWrittenRemainder = framesWritten - (framesWrittenAppRate * multiplier);
254     } else {
255         framesWrittenAppRate = framesWritten;
256     }
257 
258     mFramesWritten += framesWrittenAppRate;
259     mFramesPresented += framesWrittenAppRate;
260     mFramesRendered += framesWrittenAppRate;
261 
262     if (needThrottle) {
263         int64_t deltaLT;
264         mLocalTimeToFrames.doReverseTransform(mFramesWritten, &deltaLT);
265         deltaLT += mWriteStartLT;
266         deltaLT -= now;
267 
268         int64_t deltaUSec;
269         mUSecToLocalTime.doReverseTransform(deltaLT, &deltaUSec);
270 
271         if (deltaUSec > 0) {
272             useconds_t sleep_time;
273 
274             // We should never be a full second ahead of schedule; sanity check
275             // our throttle time and cap the max sleep time at 1 second.
276             if (deltaUSec > 1000000)
277                 sleep_time = 1000000;
278             else
279                 sleep_time = static_cast<useconds_t>(deltaUSec);
280 
281             usleep(sleep_time);
282         }
283     }
284 }
285 
286 static const String8 keyRouting(AudioParameter::keyRouting);
287 static const String8 keySupSampleRates("sup_sampling_rates");
288 static const String8 keySupFormats("sup_formats");
289 static const String8 keySupChannels("sup_channels");
setParameters(__unused struct audio_stream * stream,const char * kvpairs)290 status_t AudioStreamOut::setParameters(__unused struct audio_stream *stream, const char *kvpairs)
291 {
292     AudioParameter param = AudioParameter(String8(kvpairs));
293     String8 key = String8(AudioParameter::keyRouting);
294     int tmpInt;
295 
296     if (param.getInt(key, tmpInt) == NO_ERROR) {
297         // The audio HAL handles routing to physical devices entirely
298         // internally and mostly ignores what audio flinger tells it to do.  JiC
299         // there is something (now or in the future) in audio flinger which
300         // cares about the routing value in a call to getParameters, we hang on
301         // to the last routing value set by audio flinger so we can at least be
302         // consistent when we lie to the upper levels about doing what they told
303         // us to do.
304         mAudioFlingerTgtDevices = static_cast<uint32_t>(tmpInt);
305     }
306 
307     return NO_ERROR;
308 }
309 
getParameters(const char * k)310 char* AudioStreamOut::getParameters(const char* k)
311 {
312     AudioParameter param = AudioParameter(String8(k));
313     String8 value;
314 
315     if (param.get(keyRouting, value) == NO_ERROR) {
316         param.addInt(keyRouting, (int)mAudioFlingerTgtDevices);
317     }
318 
319     HDMIAudioCaps& hdmiCaps = mOwnerHAL.getHDMIAudioCaps();
320 
321     if (param.get(keySupSampleRates, value) == NO_ERROR) {
322         if (mIsMCOutput) {
323             hdmiCaps.getRatesForAF(value);
324             param.add(keySupSampleRates, value);
325         } else {
326             param.add(keySupSampleRates, String8("48000"));
327         }
328     }
329 
330     if (param.get(keySupFormats, value) == NO_ERROR) {
331         if (mIsMCOutput) {
332             hdmiCaps.getFmtsForAF(value);
333             param.add(keySupFormats, value);
334         } else {
335             param.add(keySupFormats, String8("AUDIO_FORMAT_PCM_16_BIT"));
336         }
337     }
338 
339     if (param.get(keySupChannels, value) == NO_ERROR) {
340         if (mIsMCOutput) {
341             hdmiCaps.getChannelMasksForAF(value, false);
342             param.add(keySupChannels, value);
343         } else {
344             param.add(keySupChannels, String8("AUDIO_CHANNEL_OUT_STEREO"));
345         }
346     }
347 
348     return strdup(param.toString().string());
349 }
350 
getRateMultiplier() const351 uint32_t AudioStreamOut::getRateMultiplier() const
352 {
353     return (mIsEncoded) ? mSPDIFEncoder.getRateMultiplier() : 1;
354 }
355 
outputSampleRate() const356 uint32_t AudioStreamOut::outputSampleRate() const
357 {
358     return mInputSampleRate * getRateMultiplier();
359 }
360 
getBytesPerOutputFrame()361 int AudioStreamOut::getBytesPerOutputFrame()
362 {
363     return (mIsEncoded) ? mSPDIFEncoder.getBytesPerOutputFrame()
364         : (mInputChanCount * sizeof(int16_t));
365 }
366 
latency() const367 uint32_t AudioStreamOut::latency() const {
368     uint32_t uSecLatency = mInputNominalLatencyUSec;
369     uint32_t vcompDelay = mOwnerHAL.getVideoDelayCompUsec();
370 
371     if (uSecLatency < vcompDelay)
372         return 0;
373 
374     return ((uSecLatency - vcompDelay) / 1000);
375 }
376 
377 // Used to implement get_presentation_position() for Audio HAL.
378 // According to the prototype in audio.h, the frame count should not get
379 // reset on standby().
getPresentationPosition(uint64_t * frames,struct timespec * timestamp)380 status_t AudioStreamOut::getPresentationPosition(uint64_t *frames,
381         struct timespec *timestamp)
382 {
383     Mutex::Autolock _l(mRoutingLock);
384     status_t result = -ENODEV;
385     // The presentation timestamp should be the same for all devices.
386     // Also Molly only has one output device at the moment.
387     // So just use the first one in the list.
388     if (!mPhysOutputs.isEmpty()) {
389         const unsigned int kInsaneAvail = 10 * 48000;
390         unsigned int avail = 0;
391         sp<AudioOutput> audioOutput = mPhysOutputs.itemAt(0);
392         if (audioOutput->getHardwareTimestamp(&avail, timestamp) == 0) {
393             if (avail < kInsaneAvail) {
394                 // FIXME av sync fudge factor
395                 // Use a fudge factor to account for hidden buffering in the
396                 // HDMI output path. This is a hack until we can determine the
397                 // actual buffer sizes.
398                 // Increasing kFudgeMSec will move the audio earlier in
399                 // relation to the video.
400                 const int kFudgeMSec = 50;
401                 int fudgeFrames = kFudgeMSec * sampleRate() / 1000;
402 
403                 // Scale the frames in the driver because it might be running at
404                 // a higher rate for EAC3.
405                 int64_t framesInDriverBuffer =
406                     (int64_t)audioOutput->getKernelBufferSize() - (int64_t)avail;
407                 framesInDriverBuffer = framesInDriverBuffer / getRateMultiplier();
408 
409                 int64_t pendingFrames = framesInDriverBuffer + fudgeFrames;
410                 int64_t signedFrames = mFramesPresented - pendingFrames;
411                 if (pendingFrames < 0) {
412                     ALOGE("getPresentationPosition: negative pendingFrames = %lld",
413                         pendingFrames);
414                 } else if (signedFrames < 0) {
415                     ALOGI("getPresentationPosition: playing silent preroll"
416                         ", mFramesPresented = %llu, pendingFrames = %lld",
417                         mFramesPresented, pendingFrames);
418                 } else {
419 #if HAL_PRINT_TIMESTAMP_CSV
420                     // Print comma separated values for spreadsheet analysis.
421                     uint64_t nanos = (((uint64_t)timestamp->tv_sec) * 1000000000L)
422                             + timestamp->tv_nsec;
423                     ALOGI("getPresentationPosition, %lld, %4u, %lld, %llu",
424                             mFramesPresented, avail, signedFrames, nanos);
425 #endif
426                     *frames = (uint64_t) signedFrames;
427                     result = NO_ERROR;
428                 }
429             } else {
430                 ALOGE("getPresentationPosition: avail too large = %u", avail);
431             }
432         } else {
433             ALOGE("getPresentationPosition: getHardwareTimestamp returned non-zero");
434         }
435     } else {
436         ALOGVV("getPresentationPosition: no physical outputs! This HAL is inactive!");
437     }
438     return result;
439 }
440 
getRenderPosition(__unused uint32_t * dspFrames)441 status_t AudioStreamOut::getRenderPosition(__unused uint32_t *dspFrames)
442 {
443     if (dspFrames == NULL) {
444         return -EINVAL;
445     }
446     if (mPhysOutputs.isEmpty()) {
447         *dspFrames = 0;
448         return -ENODEV;
449     }
450     *dspFrames = (uint32_t) mFramesRendered;
451     return NO_ERROR;
452 }
453 
updateTargetOutputs()454 void AudioStreamOut::updateTargetOutputs()
455 {
456     Mutex::Autolock _l(mRoutingLock);
457 
458     AudioOutputList::iterator I;
459     uint32_t cur_outputs = 0;
460 
461     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I)
462         cur_outputs |= (*I)->devMask();
463 
464     if (cur_outputs == mTgtDevices)
465         return;
466 
467     uint32_t outputsToObtain  = mTgtDevices & ~cur_outputs;
468     uint32_t outputsToRelease = cur_outputs & ~mTgtDevices;
469 
470     // Start by releasing any outputs we should no longer have back to the HAL.
471     if (outputsToRelease) {
472 
473         I = mPhysOutputs.begin();
474         while (I != mPhysOutputs.end()) {
475             if (!(outputsToRelease & (*I)->devMask())) {
476                 ++I;
477                 continue;
478             }
479 
480             outputsToRelease &= ~((*I)->devMask());
481             mOwnerHAL.releaseOutput(*this, *I);
482             I = mPhysOutputs.erase(I);
483         }
484     }
485 
486     if (outputsToRelease) {
487         ALOGW("Bookkeeping error!  Still have outputs to release (%08x), but"
488               " none of them appear to be in the mPhysOutputs list!",
489               outputsToRelease);
490     }
491 
492     // Now attempt to obtain any outputs we should be using, but are not
493     // currently.
494     if (outputsToObtain) {
495         uint32_t mask;
496 
497         // Buffer configuration may need updating now that we have decoded
498         // the start of a stream. For example, EAC3, needs 4X sampleRate.
499         updateInputNums();
500 
501         for (mask = 0x1; outputsToObtain; mask <<= 1) {
502             if (!(mask & outputsToObtain))
503                 continue;
504 
505             sp<AudioOutput> newOutput;
506             status_t res;
507 
508             res = mOwnerHAL.obtainOutput(*this, mask, &newOutput);
509             outputsToObtain &= ~mask;
510 
511             if (OK != res) {
512                 // If we get an error back from obtain output, it means that
513                 // something went really wrong at a lower level (probably failed
514                 // to open the driver).  We should not try to obtain this output
515                 // again, at least until the next routing change.
516                 ALOGW("Failed to obtain output %08x for %s audio stream out."
517                       " (res %d)", mask, getName(), res);
518                 mTgtDevices &= ~mask;
519                 continue;
520             }
521 
522             if (newOutput != NULL) {
523                 // If we actually got an output, go ahead and add it to our list
524                 // of physical outputs.  The rest of the system will handle
525                 // starting it up.  If we didn't get an output, but also go no
526                 // error code, it just means that the output is currently busy
527                 // and should become available soon.
528                 ALOGI("updateTargetOutputs: adding output back to mPhysOutputs");
529                 mPhysOutputs.push_back(newOutput);
530             }
531         }
532     }
533 }
534 
adjustOutputs(int64_t maxTime)535 void AudioStreamOut::adjustOutputs(int64_t maxTime)
536 {
537     AudioOutputList::iterator I;
538 
539     // Check to see if any outputs are active and see what their buffer levels
540     // are.
541     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
542         if ((*I)->getState() == AudioOutput::DMA_START) {
543             int64_t lastWriteTS = (*I)->getLastNextWriteTS();
544             int64_t padAmt;
545 
546             mLocalTimeToFrames.a_zero = lastWriteTS;
547             mLocalTimeToFrames.b_zero = 0;
548             if (mLocalTimeToFrames.doForwardTransform(maxTime,
549                                                       &padAmt)) {
550                 (*I)->adjustDelay(((int32_t)padAmt));
551             }
552         }
553     }
554 }
555 
write(const void * buffer,size_t bytes)556 ssize_t AudioStreamOut::write(const void* buffer, size_t bytes)
557 {
558     uint8_t *data = (uint8_t *)buffer;
559     ALOGVV("AudioStreamOut::write(%u)   0x%02X, 0x%02X, 0x%02X, 0x%02X,"
560           " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
561           " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
562           " 0x%02X, 0x%02X, 0x%02X, 0x%02X ====",
563         bytes, data[0], data[1], data[2], data[3],
564         data[4], data[5], data[6], data[7],
565         data[8], data[9], data[10], data[11],
566         data[12], data[13], data[14], data[15]
567         );
568     if (mIsEncoded) {
569         return mSPDIFEncoder.write(buffer, bytes);
570     } else {
571         return writeInternal(buffer, bytes);
572     }
573 }
574 
writeInternal(const void * buffer,size_t bytes)575 ssize_t AudioStreamOut::writeInternal(const void* buffer, size_t bytes)
576 {
577     uint8_t *data = (uint8_t *)buffer;
578     ALOGVV("AudioStreamOut::write_l(%u) 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
579           " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
580           " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
581           " 0x%02X, 0x%02X, 0x%02X, 0x%02X",
582         bytes, data[0], data[1], data[2], data[3],
583         data[4], data[5], data[6], data[7],
584         data[8], data[9], data[10], data[11],
585         data[12], data[13], data[14], data[15]
586         );
587 
588     // Note: no lock is obtained here.  Calls to write and getNextWriteTimestamp
589     // happen only on the AudioFlinger mixer thread which owns this particular
590     // output stream, so there is no need to worry that there will be two
591     // threads in this instance method concurrently.
592     //
593     // In addition, only calls to write change the contents of the mPhysOutputs
594     // collection (during the call to updateTargetOutputs).  updateTargetOutputs
595     // will hold the routing lock during the operation, as should any reader of
596     // mPhysOutputs, unless the reader is a call to write or
597     // getNextWriteTimestamp (we know that it is safe for write and gnwt to read
598     // the collection because the only collection mutator is the same thread
599     // which calls write and gnwt).
600 
601     // If the stream is in standby, then the first write should bring it out
602     // of standby
603     if (mInStandby) {
604         mOwnerHAL.standbyStatusUpdate(false, mIsMCOutput);
605         mInStandby = false;
606     }
607 
608     updateTargetOutputs();
609 
610     // If any of our outputs is in the PRIMED state when ::write is called, it
611     // means one of two things.  First, it could be that the DMA output really
612     // has not started yet.  This is odd, but certainly not impossible.  The
613     // other possibility is that AudioFlinger is in its silence-pushing mode and
614     // is not calling getNextWriteTimestamp.  After an output is primed, its in
615     // GNWTS where the amt of padding to compensate for different DMA start
616     // times is taken into account.  Go ahead and force a call to GNWTS, just to
617     // be certain that we have checked recently and are not stuck in silence
618     // fill mode.  Failure to do this will cause the AudioOutput state machine
619     // to eventually give up on DMA starting and reset the output over and over
620     // again (spamming the log and producing general confusion).
621     //
622     // While we are in the process of checking our various output states, check
623     // to see if any outputs have made it to the ACTIVE state.  Pass this
624     // information along to the call to processOneChunk.  If any of our outputs
625     // are waiting to be primed while other outputs have made it to steady
626     // state, we need to change our priming behavior slightly.  Instead of
627     // filling an output's buffer completely, we want to fill it to slightly
628     // less than full and let the adjustDelay mechanism take care of the rest.
629     //
630     // Failure to do this during steady state operation will almost certainly
631     // lead to the new output being over-filled relative to the other outputs
632     // causing it to be slightly out of sync.
633     AudioOutputList::iterator I;
634     bool checkDMAStart = false;
635     bool hasActiveOutputs = false;
636     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
637         if (AudioOutput::PRIMED == (*I)->getState())
638             checkDMAStart = true;
639 
640         if ((*I)->getState() == AudioOutput::ACTIVE)
641             hasActiveOutputs = true;
642     }
643 
644     if (checkDMAStart) {
645         int64_t junk;
646         getNextWriteTimestamp_internal(&junk);
647     }
648 
649     // We always call processOneChunk on the outputs, as it is the
650     // tick for their state machines.
651     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
652         (*I)->processOneChunk((uint8_t *)buffer, bytes, hasActiveOutputs);
653     }
654 
655     // If we don't actually have any physical outputs to write to, just sleep
656     // for the proper amt of time in order to simulate the throttle that writing
657     // to the hardware would impose.
658     finishedWriteOp(bytes / getBytesPerOutputFrame(), (0 == mPhysOutputs.size()));
659 
660     return static_cast<ssize_t>(bytes);
661 }
662 
getNextWriteTimestamp(int64_t * timestamp)663 status_t AudioStreamOut::getNextWriteTimestamp(int64_t *timestamp)
664 {
665     return getNextWriteTimestamp_internal(timestamp);
666 }
667 
getNextWriteTimestamp_internal(int64_t * timestamp)668 status_t AudioStreamOut::getNextWriteTimestamp_internal(
669         int64_t *timestamp)
670 {
671     int64_t max_time = LLONG_MIN;
672     bool    max_time_valid = false;
673     bool    need_adjust = false;
674 
675     // Across all of our physical outputs, figure out the max time when
676     // a write operation will hit the speakers.  Assume that if an
677     // output cannot answer the question, its because it has never
678     // started or because it has recently underflowed and needs to be
679     // restarted.  If this is the case, we will need to prime the
680     // pipeline with a chunk's worth of data before proceeding.
681     // If any of the outputs indicate a discontinuity (meaning that the
682     // DMA start time was valid and is now invalid, or was and is valid
683     // but was different from before; almost certainly caused by a low
684     // level underfow), then just stop now.  We will need to reset and
685     // re-prime all of the outputs in order to make certain that the
686     // lead-times on all of the outputs match.
687 
688     AudioOutputList::iterator I;
689     bool discon = false;
690 
691     // Find the largest next write timestamp. The goal is to make EVERY
692     // output have the same value, but we also need this to pass back
693     // up the layers.
694     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
695         int64_t tmp;
696         if (OK == (*I)->getNextWriteTimestamp(&tmp, &discon)) {
697             if (!max_time_valid || (max_time < tmp)) {
698                 max_time = tmp;
699                 max_time_valid = true;
700             }
701         }
702     }
703 
704     // Check the state of each output and determine if we need to align them.
705     // Make sure to do this after we have called each outputs'
706     // getNextWriteTimestamp as the transition from PRIMED to DMA_START happens
707     // there.
708     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
709         if ((*I)->getState() == AudioOutput::DMA_START) {
710             need_adjust = true;
711             break;
712         }
713     }
714 
715     // At this point, if we still have not found at least one output
716     // who knows when their data is going to hit the speakers, then we
717     // just can't answer the getNextWriteTimestamp question and we
718     // should give up.
719     if (!max_time_valid) {
720         return INVALID_OPERATION;
721     }
722 
723     // Stuff silence into the non-aligned outputs so that the effective
724     // timestamp is the same for all the outputs.
725     if (need_adjust)
726         adjustOutputs(max_time);
727 
728     // We are done. The time at which the next written audio should
729     // hit the speakers is just max_time plus the maximum amt of delay
730     // compensation in the system.
731     *timestamp = max_time;
732     return OK;
733 }
734 
735 #define DUMP(a...) \
736     snprintf(buffer, SIZE, a); \
737     buffer[SIZE - 1] = 0; \
738     result.append(buffer);
739 #define B2STR(b) b ? "true" : "false"
740 
dump(int fd)741 status_t AudioStreamOut::dump(int fd)
742 {
743     const size_t SIZE = 256;
744     char buffer[SIZE];
745     String8 result;
746     DUMP("\n%s AudioStreamOut::dump\n", getName());
747     DUMP("\tsample rate            : %d\n", sampleRate());
748     DUMP("\tbuffer size            : %d\n", bufferSize());
749     DUMP("\tchannel mask           : 0x%04x\n", chanMask());
750     DUMP("\tformat                 : %d\n", format());
751     DUMP("\tdevice mask            : 0x%04x\n", mTgtDevices);
752     DUMP("\tIn standby             : %s\n", mInStandby? "yes" : "no");
753 
754     mRoutingLock.lock();
755     AudioOutputList outSnapshot(mPhysOutputs);
756     mRoutingLock.unlock();
757 
758     AudioOutputList::iterator I;
759     for (I = outSnapshot.begin(); I != outSnapshot.end(); ++I)
760         (*I)->dump(result);
761 
762     ::write(fd, result.string(), result.size());
763 
764     return NO_ERROR;
765 }
766 
767 #undef B2STR
768 #undef DUMP
769 
770 }  // android
771