• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 #define LOG_TAG "AudioHAL_AudioStreamOut"
19 
20 #include <inttypes.h>
21 #include <utils/Log.h>
22 
23 #include "AudioHardwareOutput.h"
24 #include "AudioStreamOut.h"
25 
26 // Set to 1 to print timestamp data in CSV format.
27 #ifndef HAL_PRINT_TIMESTAMP_CSV
28 #define HAL_PRINT_TIMESTAMP_CSV 0
29 #endif
30 
31 //#define VERY_VERBOSE_LOGGING
32 #ifdef VERY_VERBOSE_LOGGING
33 #define ALOGVV ALOGV
34 #else
35 #define ALOGVV(a...) do { } while(0)
36 #endif
37 
38 namespace android {
39 
AudioStreamOut(AudioHardwareOutput & owner,bool mcOut,bool isIec958NonAudio)40 AudioStreamOut::AudioStreamOut(AudioHardwareOutput& owner, bool mcOut, bool isIec958NonAudio)
41     : mRenderPosition(0)
42     , mFramesPresented(0)
43     , mLastPresentationPosition(0)
44     , mLastPresentationValid(false)
45     , mOwnerHAL(owner)
46     , mFramesWritten(0)
47     , mTgtDevices(0)
48     , mAudioFlingerTgtDevices(0)
49     , mIsMCOutput(mcOut)
50     , mInStandby(false)
51     , mIsIec958NonAudio(isIec958NonAudio)
52     , mReportedAvailFail(false)
53 {
54     assert(mLocalClock.initCheck());
55 
56     mPhysOutputs.setCapacity(3);
57 
58     // Set some reasonable defaults for these.  All of this should eventually
59     // be overwritten by a specific audio flinger configuration, but it does not
60     // hurt to have something here by default.
61     mInputSampleRate = 48000;
62     mInputChanMask = AUDIO_CHANNEL_OUT_STEREO;
63     mInputFormat = AUDIO_FORMAT_PCM_16_BIT;
64     mInputNominalChunksInFlight = 4; // pcm_open() fails if not 4!
65     updateInputNums();
66 
67     mThrottleValid = false;
68 
69     memset(&mUSecToLocalTime, 0, sizeof(mUSecToLocalTime));
70     mUSecToLocalTime.a_to_b_numer = mLocalClock.getLocalFreq();
71     mUSecToLocalTime.a_to_b_denom = 1000000;
72     LinearTransform::reduce(&mUSecToLocalTime.a_to_b_numer,
73                             &mUSecToLocalTime.a_to_b_denom);
74 }
75 
~AudioStreamOut()76 AudioStreamOut::~AudioStreamOut()
77 {
78     releaseAllOutputs();
79 }
80 
set(audio_format_t * pFormat,uint32_t * pChannels,uint32_t * pRate)81 status_t AudioStreamOut::set(
82         audio_format_t *pFormat,
83         uint32_t *pChannels,
84         uint32_t *pRate)
85 {
86     Mutex::Autolock _l(mRoutingLock);
87     audio_format_t lFormat   = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
88     uint32_t       lChannels = pChannels ? *pChannels : 0;
89     uint32_t       lRate     = pRate ? *pRate : 0;
90 
91     // fix up defaults
92     if (lFormat == AUDIO_FORMAT_DEFAULT) lFormat = format();
93     if (lChannels == 0)                  lChannels = chanMask();
94     if (lRate == 0)                      lRate = sampleRate();
95 
96     if (pFormat)   *pFormat   = lFormat;
97     if (pChannels) *pChannels = lChannels;
98     if (pRate)     *pRate     = lRate;
99 
100     if (!audio_is_linear_pcm(lFormat)) {
101         ALOGW("set: format 0x%08X needs to be wrapped in SPDIF data burst", lFormat);
102         return BAD_VALUE;
103     }
104 
105     if (!mIsMCOutput) {
106         // If this is the primary stream out, then demand our defaults.
107         if ((lFormat != AUDIO_FORMAT_PCM_16_BIT && lFormat != AUDIO_FORMAT_PCM_8_24_BIT) ||
108             (lChannels != chanMask()) ||
109             (lRate     != sampleRate())) {
110             ALOGW("set: parameters incompatible with defaults");
111             return BAD_VALUE;
112         }
113     } else {
114         // Else check to see if our HDMI sink supports this format before proceeding.
115         if (!mOwnerHAL.getHDMIAudioCaps().supportsFormat(
116                 lFormat, lRate, audio_channel_count_from_out_mask(lChannels),
117                 mIsIec958NonAudio)) {
118             ALOGW("set: parameters incompatible with hdmi capabilities");
119             return BAD_VALUE;
120         }
121     }
122 
123     mInputFormat = lFormat;
124     mInputChanMask = lChannels;
125     mInputSampleRate = lRate;
126     ALOGI("AudioStreamOut::set: rate = %u, format = 0x%08X\n", lRate, lFormat);
127     updateInputNums();
128 
129     return NO_ERROR;
130 }
131 
setTgtDevices(uint32_t tgtDevices)132 void AudioStreamOut::setTgtDevices(uint32_t tgtDevices)
133 {
134     Mutex::Autolock _l(mRoutingLock);
135     if (mTgtDevices != tgtDevices) {
136         mTgtDevices = tgtDevices;
137     }
138 }
139 
standbyHardware()140 status_t AudioStreamOut::standbyHardware()
141 {
142     releaseAllOutputs();
143     mOwnerHAL.standbyStatusUpdate(true, mIsMCOutput);
144     mInStandby = true;
145     return NO_ERROR;
146 }
147 
standby()148 status_t AudioStreamOut::standby()
149 {
150     ALOGI("standby: ==========================");
151     mRenderPosition = 0;
152     mLastPresentationValid = false;
153     // Don't reset the presentation position.
154     return standbyHardware();
155 }
156 
releaseAllOutputs()157 void AudioStreamOut::releaseAllOutputs() {
158     Mutex::Autolock _l(mRoutingLock);
159     ALOGI("releaseAllOutputs: releasing %d mPhysOutputs", mPhysOutputs.size());
160     AudioOutputList::iterator I;
161     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I)
162         mOwnerHAL.releaseOutput(*this, *I);
163 
164     mPhysOutputs.clear();
165 }
166 
pause()167 status_t AudioStreamOut::pause()
168 {
169     ALOGI("pause: ==========================");
170     mLastPresentationValid = false;
171     return standbyHardware();
172 }
173 
resume()174 status_t AudioStreamOut::resume()
175 {
176     ALOGI("resume: ==========================");
177     return NO_ERROR;
178 }
179 
flush()180 status_t AudioStreamOut::flush()
181 {
182     ALOGI("flush: ==========================");
183     mRenderPosition = 0;
184     mFramesPresented = 0;
185     Mutex::Autolock _l(mPresentationLock);
186     mLastPresentationPosition = 0;
187     mLastPresentationValid = false;
188     return NO_ERROR;
189 }
190 
updateInputNums()191 void AudioStreamOut::updateInputNums()
192 {
193     assert(mLocalClock.initCheck());
194 
195     mInputChanCount = audio_channel_count_from_out_mask(mInputChanMask);
196 
197     // 512 is good for AC3 and DTS passthrough.
198     mInputChunkFrames = 512 * ((outputSampleRate() + 48000 - 1) / 48000);
199 
200     ALOGV("updateInputNums: chunk size %u from output rate %u\n",
201         mInputChunkFrames, outputSampleRate());
202 
203     mInputFrameSize = mInputChanCount * audio_bytes_per_sample(mInputFormat);
204 
205     // Buffer size is just the frame size multiplied by the number of
206     // frames per chunk.
207     mInputBufSize = mInputChunkFrames * mInputFrameSize;
208 
209     // The nominal latency is just the duration of a chunk * the number of
210     // chunks we nominally keep in flight at any given point in time.
211     mInputNominalLatencyUSec = static_cast<uint32_t>(((
212                     static_cast<uint64_t>(mInputChunkFrames)
213                     * 1000000 * mInputNominalChunksInFlight)
214                     / mInputSampleRate));
215 
216     memset(&mLocalTimeToFrames, 0, sizeof(mLocalTimeToFrames));
217     mLocalTimeToFrames.a_to_b_numer = mInputSampleRate;
218     mLocalTimeToFrames.a_to_b_denom = mLocalClock.getLocalFreq();
219     LinearTransform::reduce(
220             &mLocalTimeToFrames.a_to_b_numer,
221             &mLocalTimeToFrames.a_to_b_denom);
222 }
223 
finishedWriteOp(size_t framesWritten,bool needThrottle)224 void AudioStreamOut::finishedWriteOp(size_t framesWritten,
225                                      bool needThrottle)
226 {
227     assert(mLocalClock.initCheck());
228 
229     int64_t now = mLocalClock.getLocalTime();
230 
231     if (!mThrottleValid || !needThrottle) {
232         mThrottleValid = true;
233         mWriteStartLT  = now;
234         mFramesWritten = 0;
235     }
236 
237     mFramesWritten += framesWritten;
238     mFramesPresented += framesWritten;
239     mRenderPosition += framesWritten;
240 
241     if (needThrottle) {
242         int64_t deltaLT;
243         mLocalTimeToFrames.doReverseTransform(mFramesWritten, &deltaLT);
244         deltaLT += mWriteStartLT;
245         deltaLT -= now;
246 
247         int64_t deltaUSec;
248         mUSecToLocalTime.doReverseTransform(deltaLT, &deltaUSec);
249 
250         if (deltaUSec > 0) {
251             useconds_t sleep_time;
252 
253             // We should never be a full second ahead of schedule; sanity check
254             // our throttle time and cap the max sleep time at 1 second.
255             if (deltaUSec > 1000000) {
256                 ALOGW("throttle time clipped! deltaLT = %" PRIi64 " deltaUSec = %" PRIi64,
257                     deltaLT, deltaUSec);
258                 sleep_time = 1000000;
259             } else {
260                 sleep_time = static_cast<useconds_t>(deltaUSec);
261             }
262             usleep(sleep_time);
263         }
264     }
265 }
266 
267 static const String8 keyRouting(AudioParameter::keyRouting);
268 static const String8 keySupSampleRates("sup_sampling_rates");
269 static const String8 keySupFormats("sup_formats");
270 static const String8 keySupChannels("sup_channels");
setParameters(__unused struct audio_stream * stream,const char * kvpairs)271 status_t AudioStreamOut::setParameters(__unused struct audio_stream *stream, const char *kvpairs)
272 {
273     AudioParameter param = AudioParameter(String8(kvpairs));
274     String8 key = String8(AudioParameter::keyRouting);
275     int tmpInt;
276 
277     if (param.getInt(key, tmpInt) == NO_ERROR) {
278         // The audio HAL handles routing to physical devices entirely
279         // internally and mostly ignores what audio flinger tells it to do.  JiC
280         // there is something (now or in the future) in audio flinger which
281         // cares about the routing value in a call to getParameters, we hang on
282         // to the last routing value set by audio flinger so we can at least be
283         // consistent when we lie to the upper levels about doing what they told
284         // us to do.
285         mAudioFlingerTgtDevices = static_cast<uint32_t>(tmpInt);
286     }
287 
288     return NO_ERROR;
289 }
290 
getParameters(const char * k)291 char* AudioStreamOut::getParameters(const char* k)
292 {
293     AudioParameter param = AudioParameter(String8(k));
294     String8 value;
295 
296     if (param.get(keyRouting, value) == NO_ERROR) {
297         param.addInt(keyRouting, (int)mAudioFlingerTgtDevices);
298     }
299 
300     HDMIAudioCaps& hdmiCaps = mOwnerHAL.getHDMIAudioCaps();
301 
302     if (param.get(keySupSampleRates, value) == NO_ERROR) {
303         if (mIsMCOutput) {
304             hdmiCaps.getRatesForAF(value);
305             param.add(keySupSampleRates, value);
306         } else {
307             param.add(keySupSampleRates, String8("48000"));
308         }
309     }
310 
311     if (param.get(keySupFormats, value) == NO_ERROR) {
312         if (mIsMCOutput) {
313             hdmiCaps.getFmtsForAF(value);
314             param.add(keySupFormats, value);
315         } else {
316             param.add(keySupFormats, String8("AUDIO_FORMAT_PCM_16_BIT|AUDIO_FORMAT_PCM_8_24_BIT"));
317         }
318     }
319 
320     if (param.get(keySupChannels, value) == NO_ERROR) {
321         if (mIsMCOutput) {
322             hdmiCaps.getChannelMasksForAF(value);
323             param.add(keySupChannels, value);
324         } else {
325             param.add(keySupChannels, String8("AUDIO_CHANNEL_OUT_STEREO"));
326         }
327     }
328 
329     return strdup(param.toString().string());
330 }
331 
outputSampleRate() const332 uint32_t AudioStreamOut::outputSampleRate() const
333 {
334     return mInputSampleRate;
335 }
336 
latency() const337 uint32_t AudioStreamOut::latency() const {
338     uint32_t uSecLatency = mInputNominalLatencyUSec;
339     uint32_t vcompDelay = mOwnerHAL.getVideoDelayCompUsec();
340 
341     if (uSecLatency < vcompDelay)
342         return 0;
343 
344     return ((uSecLatency - vcompDelay) / 1000);
345 }
346 
347 // Used to implement get_presentation_position() for Audio HAL.
348 // According to the prototype in audio.h, the frame count should not get
349 // reset on standby().
getPresentationPosition(uint64_t * frames,struct timespec * timestamp)350 status_t AudioStreamOut::getPresentationPosition(uint64_t *frames,
351         struct timespec *timestamp)
352 {
353     status_t result = -ENODEV;
354     // If we cannot get a lock then try to return a cached position and timestamp.
355     // It is better to return an old timestamp then to wait for a fresh one.
356     if (mRoutingLock.tryLock() != OK) {
357         // We failed to get the lock. It is probably held by a blocked write().
358         if (mLastPresentationValid) {
359             // Use cached position.
360             // Use mutex because this cluster of variables may be getting
361             // updated by the write thread.
362             Mutex::Autolock _l(mPresentationLock);
363             *frames = mLastPresentationPosition;
364             *timestamp = mLastPresentationTime;
365             result = NO_ERROR;
366         }
367         return result;
368     }
369 
370     // Lock succeeded so it is safe to call this.
371     result = getPresentationPosition_l(frames, timestamp);
372 
373     mRoutingLock.unlock();
374     return result;
375 }
376 
377 // Used to implement get_presentation_position() for Audio HAL.
378 // According to the prototype in audio.h, the frame count should not get
379 // reset on standby().
380 // mRoutingLock should be locked before calling this method.
getPresentationPosition_l(uint64_t * frames,struct timespec * timestamp)381 status_t AudioStreamOut::getPresentationPosition_l(uint64_t *frames,
382         struct timespec *timestamp)
383 {
384     status_t result = -ENODEV;
385     // The presentation timestamp should be the same for all devices.
386     // Also Molly only has one output device at the moment.
387     // So just use the first one in the list.
388     if (!mPhysOutputs.isEmpty()) {
389         unsigned int avail = 0;
390         sp<AudioOutput> audioOutput = mPhysOutputs.itemAt(0);
391         if (audioOutput->getHardwareTimestamp(&avail, timestamp) == OK) {
392 
393             int64_t framesInDriverBuffer = (int64_t)audioOutput->getKernelBufferSize() - (int64_t)avail;
394             if (framesInDriverBuffer >= 0) {
395                 // FIXME av sync fudge factor
396                 // Use a fudge factor to account for hidden buffering in the
397                 // HDMI output path. This is a hack until we can determine the
398                 // actual buffer sizes.
399                 // Increasing kFudgeMSec will move the audio earlier in
400                 // relation to the video.
401                 const int kFudgeMSec = 50;
402                 int fudgeFrames = kFudgeMSec * sampleRate() / 1000;
403                 int64_t pendingFrames = framesInDriverBuffer + fudgeFrames;
404 
405                 int64_t signedFrames = mFramesPresented - pendingFrames;
406                 if (signedFrames < 0) {
407                     ALOGV("getPresentationPosition: playing silent preroll"
408                             ", mFramesPresented = %" PRIu64 ", pendingFrames = %" PRIi64,
409                             mFramesPresented, pendingFrames);
410                 } else {
411     #if HAL_PRINT_TIMESTAMP_CSV
412                     // Print comma separated values for spreadsheet analysis.
413                     uint64_t nanos = (((uint64_t)timestamp->tv_sec) * 1000000000L)
414                             + timestamp->tv_nsec;
415                     ALOGI("getPresentationPosition, %" PRIu64 ", %4u, %" PRIi64 ", %" PRIu64,
416                             mFramesPresented, avail, signedFrames, nanos);
417     #endif
418                     uint64_t unsignedFrames = (uint64_t) signedFrames;
419 
420                     {
421                         Mutex::Autolock _l(mPresentationLock);
422                         // Check for retrograde timestamps.
423                         if (unsignedFrames < mLastPresentationPosition) {
424                             ALOGW("getPresentationPosition: RETROGRADE timestamp, diff = %" PRId64,
425                                 (int64_t)(unsignedFrames - mLastPresentationPosition));
426                             if (mLastPresentationValid) {
427                                 // Use previous presentation position and time.
428                                 *timestamp = mLastPresentationTime;
429                                 *frames = mLastPresentationPosition;
430                                 result = NO_ERROR;
431                             }
432                             // else return error
433                         } else {
434                             *frames = unsignedFrames;
435                             // Save cached data that we can use when the HAL is locked.
436                             mLastPresentationPosition = unsignedFrames;
437                             mLastPresentationTime = *timestamp;
438                             result = NO_ERROR;
439                         }
440                     }
441                 }
442             } else {
443                 ALOGE("getPresentationPosition: avail too large = %u", avail);
444             }
445             mReportedAvailFail = false;
446         } else {
447             ALOGW_IF(!mReportedAvailFail,
448                     "getPresentationPosition: getHardwareTimestamp returned non-zero");
449             mReportedAvailFail = true;
450         }
451     } else {
452         ALOGVV("getPresentationPosition: no physical outputs! This HAL is inactive!");
453     }
454     mLastPresentationValid = result == NO_ERROR;
455     return result;
456 }
457 
getRenderPosition(__unused uint32_t * dspFrames)458 status_t AudioStreamOut::getRenderPosition(__unused uint32_t *dspFrames)
459 {
460     if (dspFrames == NULL) {
461         return -EINVAL;
462     }
463     *dspFrames = (uint32_t) mRenderPosition;
464     return NO_ERROR;
465 }
466 
updateTargetOutputs()467 void AudioStreamOut::updateTargetOutputs()
468 {
469     Mutex::Autolock _l(mRoutingLock);
470     AudioOutputList::iterator I;
471     uint32_t cur_outputs = 0;
472 
473     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I)
474         cur_outputs |= (*I)->devMask();
475 
476     if (cur_outputs == mTgtDevices)
477         return;
478 
479     uint32_t outputsToObtain  = mTgtDevices & ~cur_outputs;
480     uint32_t outputsToRelease = cur_outputs & ~mTgtDevices;
481 
482     // Start by releasing any outputs we should no longer have back to the HAL.
483     if (outputsToRelease) {
484 
485         I = mPhysOutputs.begin();
486         while (I != mPhysOutputs.end()) {
487             if (!(outputsToRelease & (*I)->devMask())) {
488                 ++I;
489                 continue;
490             }
491 
492             outputsToRelease &= ~((*I)->devMask());
493             mOwnerHAL.releaseOutput(*this, *I);
494             I = mPhysOutputs.erase(I);
495         }
496     }
497 
498     if (outputsToRelease) {
499         ALOGW("Bookkeeping error!  Still have outputs to release (%08x), but"
500               " none of them appear to be in the mPhysOutputs list!",
501               outputsToRelease);
502     }
503 
504     // Now attempt to obtain any outputs we should be using, but are not
505     // currently.
506     if (outputsToObtain) {
507         uint32_t mask;
508 
509         // Buffer configuration may need updating now that we have decoded
510         // the start of a stream. For example, EAC3, needs 4X sampleRate.
511         updateInputNums();
512 
513         for (mask = 0x1; outputsToObtain; mask <<= 1) {
514             if (!(mask & outputsToObtain))
515                 continue;
516 
517             sp<AudioOutput> newOutput;
518             status_t res;
519 
520             res = mOwnerHAL.obtainOutput(*this, mask, &newOutput);
521             outputsToObtain &= ~mask;
522 
523             if (OK != res) {
524                 // If we get an error back from obtain output, it means that
525                 // something went really wrong at a lower level (probably failed
526                 // to open the driver).  We should not try to obtain this output
527                 // again, at least until the next routing change.
528                 ALOGW("Failed to obtain output %08x for %s audio stream out."
529                       " (res %d)", mask, getName(), res);
530                 mTgtDevices &= ~mask;
531                 continue;
532             }
533 
534             if (newOutput != NULL) {
535                 // If we actually got an output, go ahead and add it to our list
536                 // of physical outputs.  The rest of the system will handle
537                 // starting it up.  If we didn't get an output, but also got no
538                 // error code, it just means that the output is currently busy
539                 // and should become available soon.
540                 ALOGI("updateTargetOutputs: adding output back to mPhysOutputs");
541                 mPhysOutputs.push_back(newOutput);
542             }
543         }
544     }
545 }
546 
adjustOutputs(int64_t maxTime)547 void AudioStreamOut::adjustOutputs(int64_t maxTime)
548 {
549     int64_t a_zero_original = mLocalTimeToFrames.a_zero;
550     int64_t b_zero_original = mLocalTimeToFrames.b_zero;
551     AudioOutputList::iterator I;
552 
553     // Check to see if any outputs are active and see what their buffer levels
554     // are.
555     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
556         if ((*I)->getState() == AudioOutput::DMA_START) {
557             int64_t lastWriteTS = (*I)->getLastNextWriteTS();
558             int64_t padAmt;
559 
560             mLocalTimeToFrames.a_zero = lastWriteTS;
561             mLocalTimeToFrames.b_zero = 0;
562             if (mLocalTimeToFrames.doForwardTransform(maxTime,
563                                                       &padAmt)) {
564                 (*I)->adjustDelay(((int32_t)padAmt));
565             }
566         }
567     }
568     // Restore original offset so that the sleep time calculation for
569     // throttling is not broken in finishedWriteOp().
570     mLocalTimeToFrames.a_zero = a_zero_original;
571     mLocalTimeToFrames.b_zero = b_zero_original;
572 }
573 
write(const void * buffer,size_t bytes)574 ssize_t AudioStreamOut::write(const void* buffer, size_t bytes)
575 {
576     uint8_t *data = (uint8_t *)buffer;
577     ALOGVV("AudioStreamOut::write_l(%u) 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
578           " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
579           " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
580           " 0x%02X, 0x%02X, 0x%02X, 0x%02X",
581         bytes, data[0], data[1], data[2], data[3],
582         data[4], data[5], data[6], data[7],
583         data[8], data[9], data[10], data[11],
584         data[12], data[13], data[14], data[15]
585         );
586 
587     //
588     // Note that only calls to write change the contents of the mPhysOutputs
589     // collection (during the call to updateTargetOutputs).  updateTargetOutputs
590     // will hold the routing lock during the operation, as should any reader of
591     // mPhysOutputs, unless the reader is a call to write or
592     // getNextWriteTimestamp (we know that it is safe for write and gnwt to read
593     // the collection because the only collection mutator is the same thread
594     // which calls write and gnwt).
595 
596     // If the stream is in standby, then the first write should bring it out
597     // of standby
598     if (mInStandby) {
599         mOwnerHAL.standbyStatusUpdate(false, mIsMCOutput);
600         mInStandby = false;
601     }
602 
603     updateTargetOutputs(); // locks mRoutingLock
604 
605     // If any of our outputs is in the PRIMED state when ::write is called, it
606     // means one of two things.  First, it could be that the DMA output really
607     // has not started yet.  This is odd, but certainly not impossible.  The
608     // other possibility is that AudioFlinger is in its silence-pushing mode and
609     // is not calling getNextWriteTimestamp.  After an output is primed, its in
610     // GNWTS where the amount of padding to compensate for different DMA start
611     // times is taken into account.  Go ahead and force a call to GNWTS, just to
612     // be certain that we have checked recently and are not stuck in silence
613     // fill mode.  Failure to do this will cause the AudioOutput state machine
614     // to eventually give up on DMA starting and reset the output over and over
615     // again (spamming the log and producing general confusion).
616     //
617     // While we are in the process of checking our various output states, check
618     // to see if any outputs have made it to the ACTIVE state.  Pass this
619     // information along to the call to processOneChunk.  If any of our outputs
620     // are waiting to be primed while other outputs have made it to steady
621     // state, we need to change our priming behavior slightly.  Instead of
622     // filling an output's buffer completely, we want to fill it to slightly
623     // less than full and let the adjustDelay mechanism take care of the rest.
624     //
625     // Failure to do this during steady state operation will almost certainly
626     // lead to the new output being over-filled relative to the other outputs
627     // causing it to be slightly out of sync.
628     AudioOutputList::iterator I;
629     bool checkDMAStart = false;
630     bool hasActiveOutputs = false;
631     {
632         Mutex::Autolock _l(mRoutingLock);
633         for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
634             if (AudioOutput::PRIMED == (*I)->getState())
635                 checkDMAStart = true;
636 
637             if ((*I)->getState() == AudioOutput::ACTIVE)
638                 hasActiveOutputs = true;
639         }
640     }
641     if (checkDMAStart) {
642         int64_t junk;
643         getNextWriteTimestamp_internal(&junk);
644     }
645 
646     // We always call processOneChunk on the outputs, as it is the
647     // tick for their state machines.
648     {
649         Mutex::Autolock _l(mRoutingLock);
650         for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
651             (*I)->processOneChunk((uint8_t *)buffer, bytes, hasActiveOutputs, mInputFormat);
652         }
653 
654         // If we don't actually have any physical outputs to write to, just sleep
655         // for the proper amount of time in order to simulate the throttle that writing
656         // to the hardware would impose.
657         uint32_t framesWritten = bytes / mInputFrameSize;
658         finishedWriteOp(framesWritten, (0 == mPhysOutputs.size()));
659     }
660 
661     // Load presentation position cache because we will normally be locked when it is called.
662     {
663         Mutex::Autolock _l(mRoutingLock);
664         uint64_t frames;
665         struct timespec timestamp;
666         getPresentationPosition_l(&frames, &timestamp);
667     }
668     return static_cast<ssize_t>(bytes);
669 }
670 
getNextWriteTimestamp(int64_t * timestamp)671 status_t AudioStreamOut::getNextWriteTimestamp(int64_t *timestamp)
672 {
673     return getNextWriteTimestamp_internal(timestamp);
674 }
675 
getNextWriteTimestamp_internal(int64_t * timestamp)676 status_t AudioStreamOut::getNextWriteTimestamp_internal(
677         int64_t *timestamp)
678 {
679     int64_t max_time = LLONG_MIN;
680     bool    max_time_valid = false;
681     bool    need_adjust = false;
682 
683     // Across all of our physical outputs, figure out the max time when
684     // a write operation will hit the speakers.  Assume that if an
685     // output cannot answer the question, its because it has never
686     // started or because it has recently underflowed and needs to be
687     // restarted.  If this is the case, we will need to prime the
688     // pipeline with a chunk's worth of data before proceeding.
689     // If any of the outputs indicate a discontinuity (meaning that the
690     // DMA start time was valid and is now invalid, or was and is valid
691     // but was different from before; almost certainly caused by a low
692     // level underfow), then just stop now.  We will need to reset and
693     // re-prime all of the outputs in order to make certain that the
694     // lead-times on all of the outputs match.
695 
696     AudioOutputList::iterator I;
697     bool discon = false;
698 
699     // Find the largest next write timestamp. The goal is to make EVERY
700     // output have the same value, but we also need this to pass back
701     // up the layers.
702     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
703         int64_t tmp;
704         if (OK == (*I)->getNextWriteTimestamp(&tmp, &discon)) {
705             if (!max_time_valid || (max_time < tmp)) {
706                 max_time = tmp;
707                 max_time_valid = true;
708             }
709         }
710     }
711 
712     // Check the state of each output and determine if we need to align them.
713     // Make sure to do this after we have called each outputs'
714     // getNextWriteTimestamp as the transition from PRIMED to DMA_START happens
715     // there.
716     for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
717         if ((*I)->getState() == AudioOutput::DMA_START) {
718             need_adjust = true;
719             break;
720         }
721     }
722 
723     // At this point, if we still have not found at least one output
724     // who knows when their data is going to hit the speakers, then we
725     // just can't answer the getNextWriteTimestamp question and we
726     // should give up.
727     if (!max_time_valid) {
728         return INVALID_OPERATION;
729     }
730 
731     // Stuff silence into the non-aligned outputs so that the effective
732     // timestamp is the same for all the outputs.
733     if (need_adjust)
734         adjustOutputs(max_time);
735 
736     // We are done. The time at which the next written audio should
737     // hit the speakers is just max_time plus the maximum amt of delay
738     // compensation in the system.
739     *timestamp = max_time;
740     return OK;
741 }
742 
743 #define DUMP(a...) \
744     snprintf(buffer, SIZE, a); \
745     buffer[SIZE - 1] = 0; \
746     result.append(buffer);
747 #define B2STR(b) b ? "true" : "false"
748 
dump(int fd)749 status_t AudioStreamOut::dump(int fd)
750 {
751     const size_t SIZE = 256;
752     char buffer[SIZE];
753     String8 result;
754     DUMP("\n%s AudioStreamOut::dump\n", getName());
755     DUMP("\tsample rate            : %d\n", sampleRate());
756     DUMP("\tbuffer size            : %d\n", bufferSize());
757     DUMP("\tchannel mask           : 0x%04x\n", chanMask());
758     DUMP("\tformat                 : %d\n", format());
759     DUMP("\tdevice mask            : 0x%04x\n", mTgtDevices);
760     DUMP("\tIn standby             : %s\n", mInStandby? "yes" : "no");
761 
762     mRoutingLock.lock();
763     AudioOutputList outSnapshot(mPhysOutputs);
764     mRoutingLock.unlock();
765 
766     AudioOutputList::iterator I;
767     for (I = outSnapshot.begin(); I != outSnapshot.end(); ++I)
768         (*I)->dump(result);
769 
770     ::write(fd, result.string(), result.size());
771 
772     return NO_ERROR;
773 }
774 
775 #undef B2STR
776 #undef DUMP
777 
778 }  // android
779