• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #include <utils/Log.h>
19 
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21 
22 #include <utils/Trace.h>
23 
24 #include "client/AudioStreamInternalPlay.h"
25 #include "utility/AudioClock.h"
26 
27 // We do this after the #includes because if a header uses ALOG.
28 // it would fail on the reference to mInService.
29 #undef LOG_TAG
30 // This file is used in both client and server processes.
31 // This is needed to make sense of the logs more easily.
32 #define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
33                             : "AudioStreamInternalPlay_Client")
34 
35 using android::WrappingBuffer;
36 
37 using namespace aaudio;
38 
AudioStreamInternalPlay(AAudioServiceInterface & serviceInterface,bool inService)39 AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface,
40                                                        bool inService)
41         : AudioStreamInternal(serviceInterface, inService) {
42 
43 }
44 
~AudioStreamInternalPlay()45 AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
46 
47 constexpr int kRampMSec = 10; // time to apply a change in volume
48 
open(const AudioStreamBuilder & builder)49 aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
50     aaudio_result_t result = AudioStreamInternal::open(builder);
51     if (result == AAUDIO_OK) {
52         result = mFlowGraph.configure(getFormat(),
53                              getSamplesPerFrame(),
54                              getDeviceFormat(),
55                              getDeviceChannelCount());
56 
57         if (result != AAUDIO_OK) {
58             releaseCloseFinal();
59         }
60         // Sample rate is constrained to common values by now and should not overflow.
61         int32_t numFrames = kRampMSec * getSampleRate() / AAUDIO_MILLIS_PER_SECOND;
62         mFlowGraph.setRampLengthInFrames(numFrames);
63     }
64     return result;
65 }
66 
67 // This must be called under mStreamLock.
requestPause()68 aaudio_result_t AudioStreamInternalPlay::requestPause()
69 {
70     aaudio_result_t result = stopCallback();
71     if (result != AAUDIO_OK) {
72         return result;
73     }
74     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
75         ALOGW("%s() mServiceStreamHandle invalid", __func__);
76         return AAUDIO_ERROR_INVALID_STATE;
77     }
78 
79     mClockModel.stop(AudioClock::getNanoseconds());
80     setState(AAUDIO_STREAM_STATE_PAUSING);
81     mAtomicInternalTimestamp.clear();
82     return mServiceInterface.pauseStream(mServiceStreamHandle);
83 }
84 
requestFlush()85 aaudio_result_t AudioStreamInternalPlay::requestFlush() {
86     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
87         ALOGW("%s() mServiceStreamHandle invalid", __func__);
88         return AAUDIO_ERROR_INVALID_STATE;
89     }
90 
91     setState(AAUDIO_STREAM_STATE_FLUSHING);
92     return mServiceInterface.flushStream(mServiceStreamHandle);
93 }
94 
advanceClientToMatchServerPosition()95 void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
96     int64_t readCounter = mAudioEndpoint->getDataReadCounter();
97     int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();
98 
99     // Bump offset so caller does not see the retrograde motion in getFramesRead().
100     int64_t offset = writeCounter - readCounter;
101     mFramesOffsetFromService += offset;
102     ALOGV("%s() readN = %lld, writeN = %lld, offset = %lld", __func__,
103           (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
104 
105     // Force writeCounter to match readCounter.
106     // This is because we cannot change the read counter in the hardware.
107     mAudioEndpoint->setDataWriteCounter(readCounter);
108 }
109 
onFlushFromServer()110 void AudioStreamInternalPlay::onFlushFromServer() {
111     advanceClientToMatchServerPosition();
112 }
113 
114 // Write the data, block if needed and timeoutMillis > 0
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)115 aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
116                                                int64_t timeoutNanoseconds) {
117     return processData((void *)buffer, numFrames, timeoutNanoseconds);
118 }
119 
120 // Write as much data as we can without blocking.
processDataNow(void * buffer,int32_t numFrames,int64_t currentNanoTime,int64_t * wakeTimePtr)121 aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
122                                               int64_t currentNanoTime, int64_t *wakeTimePtr) {
123     aaudio_result_t result = processCommands();
124     if (result != AAUDIO_OK) {
125         return result;
126     }
127 
128     const char *traceName = "aaWrNow";
129     ATRACE_BEGIN(traceName);
130 
131     if (mClockModel.isStarting()) {
132         // Still haven't got any timestamps from server.
133         // Keep waiting until we get some valid timestamps then start writing to the
134         // current buffer position.
135         ALOGV("%s() wait for valid timestamps", __func__);
136         // Sleep very briefly and hope we get a timestamp soon.
137         *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
138         ATRACE_END();
139         return 0;
140     }
141     // If we have gotten this far then we have at least one timestamp from server.
142 
143     // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
144     if (mAudioEndpoint->isFreeRunning()) {
145         // Update data queue based on the timing model.
146         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
147         // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
148         mAudioEndpoint->setDataReadCounter(estimatedReadCounter);
149     }
150 
151     if (mNeedCatchUp.isRequested()) {
152         // Catch an MMAP pointer that is already advancing.
153         // This will avoid initial underruns caused by a slow cold start.
154         advanceClientToMatchServerPosition();
155         mNeedCatchUp.acknowledge();
156     }
157 
158     // If the read index passed the write index then consider it an underrun.
159     // For shared streams, the xRunCount is passed up from the service.
160     if (mAudioEndpoint->isFreeRunning() && mAudioEndpoint->getFullFramesAvailable() < 0) {
161         mXRunCount++;
162         if (ATRACE_ENABLED()) {
163             ATRACE_INT("aaUnderRuns", mXRunCount);
164         }
165     }
166 
167     // Write some data to the buffer.
168     //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
169     int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
170     //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
171     //    numFrames, framesWritten);
172     if (ATRACE_ENABLED()) {
173         ATRACE_INT("aaWrote", framesWritten);
174     }
175 
176     // Sleep if there is too much data in the buffer.
177     // Calculate an ideal time to wake up.
178     if (wakeTimePtr != nullptr
179             && (mAudioEndpoint->getFullFramesAvailable() >= getBufferSize())) {
180         // By default wake up a few milliseconds from now.  // TODO review
181         int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
182         aaudio_stream_state_t state = getState();
183         //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
184         //      AAudio_convertStreamStateToText(state));
185         switch (state) {
186             case AAUDIO_STREAM_STATE_OPEN:
187             case AAUDIO_STREAM_STATE_STARTING:
188                 if (framesWritten != 0) {
189                     // Don't wait to write more data. Just prime the buffer.
190                     wakeTime = currentNanoTime;
191                 }
192                 break;
193             case AAUDIO_STREAM_STATE_STARTED:
194             {
195                 // Sleep until the readCounter catches up and we only have
196                 // the getBufferSize() frames of data sitting in the buffer.
197                 int64_t nextReadPosition = mAudioEndpoint->getDataWriteCounter() - getBufferSize();
198                 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
199             }
200                 break;
201             default:
202                 break;
203         }
204         *wakeTimePtr = wakeTime;
205 
206     }
207 
208     ATRACE_END();
209     return framesWritten;
210 }
211 
212 
writeNowWithConversion(const void * buffer,int32_t numFrames)213 aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
214                                                             int32_t numFrames) {
215     WrappingBuffer wrappingBuffer;
216     uint8_t *byteBuffer = (uint8_t *) buffer;
217     int32_t framesLeft = numFrames;
218 
219     mAudioEndpoint->getEmptyFramesAvailable(&wrappingBuffer);
220 
221     // Write data in one or two parts.
222     int partIndex = 0;
223     while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
224         int32_t framesToWrite = framesLeft;
225         int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
226         if (framesAvailable > 0) {
227             if (framesToWrite > framesAvailable) {
228                 framesToWrite = framesAvailable;
229             }
230 
231             int32_t numBytes = getBytesPerFrame() * framesToWrite;
232 
233             mFlowGraph.process((void *)byteBuffer,
234                                wrappingBuffer.data[partIndex],
235                                framesToWrite);
236 
237             byteBuffer += numBytes;
238             framesLeft -= framesToWrite;
239         } else {
240             break;
241         }
242         partIndex++;
243     }
244     int32_t framesWritten = numFrames - framesLeft;
245     mAudioEndpoint->advanceWriteIndex(framesWritten);
246 
247     return framesWritten;
248 }
249 
getFramesRead()250 int64_t AudioStreamInternalPlay::getFramesRead() {
251     if (mAudioEndpoint) {
252         const int64_t framesReadHardware = isClockModelInControl()
253                 ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
254                 : mAudioEndpoint->getDataReadCounter();
255         // Add service offset and prevent retrograde motion.
256         mLastFramesRead = std::max(mLastFramesRead, framesReadHardware + mFramesOffsetFromService);
257     }
258     return mLastFramesRead;
259 }
260 
getFramesWritten()261 int64_t AudioStreamInternalPlay::getFramesWritten() {
262     if (mAudioEndpoint) {
263         mLastFramesWritten = mAudioEndpoint->getDataWriteCounter()
264                              + mFramesOffsetFromService;
265     }
266     return mLastFramesWritten;
267 }
268 
269 
270 // Render audio in the application callback and then write the data to the stream.
callbackLoop()271 void *AudioStreamInternalPlay::callbackLoop() {
272     ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
273     aaudio_result_t result = AAUDIO_OK;
274     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
275     if (!isDataCallbackSet()) return NULL;
276     int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
277 
278     // result might be a frame count
279     while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
280         // Call application using the AAudio callback interface.
281         callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
282 
283         if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
284             // Write audio data to stream. This is a BLOCKING WRITE!
285             result = write(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
286             if ((result != mCallbackFrames)) {
287                 if (result >= 0) {
288                     // Only wrote some of the frames requested. Must have timed out.
289                     result = AAUDIO_ERROR_TIMEOUT;
290                 }
291                 maybeCallErrorCallback(result);
292                 break;
293             }
294         } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
295             ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
296             result = systemStopFromCallback();
297             break;
298         }
299     }
300 
301     ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
302           __func__, result, (int) isActive());
303     return NULL;
304 }
305 
306 //------------------------------------------------------------------------------
307 // Implementation of PlayerBase
doSetVolume()308 status_t AudioStreamInternalPlay::doSetVolume() {
309     float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
310     ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
311           __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
312     mFlowGraph.setTargetVolume(combinedVolume);
313     return android::NO_ERROR;
314 }
315