1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #include <utils/Log.h>
19
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21
22 #include <media/MediaMetricsItem.h>
23 #include <utils/Trace.h>
24
25 #include "client/AudioStreamInternalPlay.h"
26 #include "utility/AudioClock.h"
27
28 // We do this after the #includes because if a header uses ALOG.
29 // it would fail on the reference to mInService.
30 #undef LOG_TAG
31 // This file is used in both client and server processes.
32 // This is needed to make sense of the logs more easily.
33 #define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
34 : "AudioStreamInternalPlay_Client")
35
36 using android::status_t;
37 using android::WrappingBuffer;
38
39 using namespace aaudio;
40
AudioStreamInternalPlay(AAudioServiceInterface & serviceInterface,bool inService)41 AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
42 bool inService)
43 : AudioStreamInternal(serviceInterface, inService) {
44
45 }
46
47 constexpr int kRampMSec = 10; // time to apply a change in volume
48
open(const AudioStreamBuilder & builder)49 aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
50 aaudio_result_t result = AudioStreamInternal::open(builder);
51 if (result == AAUDIO_OK) {
52 result = mFlowGraph.configure(getFormat(),
53 getSamplesPerFrame(),
54 getDeviceFormat(),
55 getDeviceChannelCount(),
56 getRequireMonoBlend(),
57 getAudioBalance(),
58 (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE));
59
60 if (result != AAUDIO_OK) {
61 safeReleaseClose();
62 }
63 // Sample rate is constrained to common values by now and should not overflow.
64 int32_t numFrames = kRampMSec * getSampleRate() / AAUDIO_MILLIS_PER_SECOND;
65 mFlowGraph.setRampLengthInFrames(numFrames);
66 }
67 return result;
68 }
69
70 // This must be called under mStreamLock.
requestPause_l()71 aaudio_result_t AudioStreamInternalPlay::requestPause_l()
72 {
73 aaudio_result_t result = stopCallback_l();
74 if (result != AAUDIO_OK) {
75 return result;
76 }
77 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
78 ALOGW("%s() mServiceStreamHandle invalid", __func__);
79 return AAUDIO_ERROR_INVALID_STATE;
80 }
81
82 mClockModel.stop(AudioClock::getNanoseconds());
83 setState(AAUDIO_STREAM_STATE_PAUSING);
84 mAtomicInternalTimestamp.clear();
85 return mServiceInterface.pauseStream(mServiceStreamHandleInfo);
86 }
87
requestFlush_l()88 aaudio_result_t AudioStreamInternalPlay::requestFlush_l() {
89 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
90 ALOGW("%s() mServiceStreamHandle invalid", __func__);
91 return AAUDIO_ERROR_INVALID_STATE;
92 }
93
94 setState(AAUDIO_STREAM_STATE_FLUSHING);
95 return mServiceInterface.flushStream(mServiceStreamHandleInfo);
96 }
97
prepareBuffersForStart()98 void AudioStreamInternalPlay::prepareBuffersForStart() {
99 // Prevent stale data from being played.
100 mAudioEndpoint->eraseDataMemory();
101 }
102
advanceClientToMatchServerPosition(int32_t serverMargin)103 void AudioStreamInternalPlay::advanceClientToMatchServerPosition(int32_t serverMargin) {
104 int64_t readCounter = mAudioEndpoint->getDataReadCounter() + serverMargin;
105 int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();
106
107 // Bump offset so caller does not see the retrograde motion in getFramesRead().
108 int64_t offset = writeCounter - readCounter;
109 mFramesOffsetFromService += offset;
110 ALOGV("%s() readN = %lld, writeN = %lld, offset = %lld", __func__,
111 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
112
113 // Force writeCounter to match readCounter.
114 // This is because we cannot change the read counter in the hardware.
115 mAudioEndpoint->setDataWriteCounter(readCounter);
116 }
117
onFlushFromServer()118 void AudioStreamInternalPlay::onFlushFromServer() {
119 advanceClientToMatchServerPosition(0 /*serverMargin*/);
120 }
121
122 // Write the data, block if needed and timeoutMillis > 0
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)123 aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
124 int64_t timeoutNanoseconds) {
125 return processData((void *)buffer, numFrames, timeoutNanoseconds);
126 }
127
128 // Write as much data as we can without blocking.
processDataNow(void * buffer,int32_t numFrames,int64_t currentNanoTime,int64_t * wakeTimePtr)129 aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
130 int64_t currentNanoTime, int64_t *wakeTimePtr) {
131 aaudio_result_t result = processCommands();
132 if (result != AAUDIO_OK) {
133 return result;
134 }
135
136 const char *traceName = "aaWrNow";
137 ATRACE_BEGIN(traceName);
138
139 if (mClockModel.isStarting()) {
140 // Still haven't got any timestamps from server.
141 // Keep waiting until we get some valid timestamps then start writing to the
142 // current buffer position.
143 ALOGV("%s() wait for valid timestamps", __func__);
144 // Sleep very briefly and hope we get a timestamp soon.
145 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
146 ATRACE_END();
147 return 0;
148 }
149 // If we have gotten this far then we have at least one timestamp from server.
150
151 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
152 if (mAudioEndpoint->isFreeRunning()) {
153 // Update data queue based on the timing model.
154 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
155 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
156 mAudioEndpoint->setDataReadCounter(estimatedReadCounter);
157 }
158
159 if (mNeedCatchUp.isRequested()) {
160 // Catch an MMAP pointer that is already advancing.
161 // This will avoid initial underruns caused by a slow cold start.
162 // We add a one burst margin in case the DSP advances before we can write the data.
163 // This can help prevent the beginning of the stream from being skipped.
164 advanceClientToMatchServerPosition(getFramesPerBurst());
165 mNeedCatchUp.acknowledge();
166 }
167
168 // If the read index passed the write index then consider it an underrun.
169 // For shared streams, the xRunCount is passed up from the service.
170 if (mAudioEndpoint->isFreeRunning() && mAudioEndpoint->getFullFramesAvailable() < 0) {
171 mXRunCount++;
172 if (ATRACE_ENABLED()) {
173 ATRACE_INT("aaUnderRuns", mXRunCount);
174 }
175 }
176
177 // Write some data to the buffer.
178 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
179 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
180 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
181 // numFrames, framesWritten);
182 if (ATRACE_ENABLED()) {
183 ATRACE_INT("aaWrote", framesWritten);
184 }
185
186 // Sleep if there is too much data in the buffer.
187 // Calculate an ideal time to wake up.
188 if (wakeTimePtr != nullptr
189 && (mAudioEndpoint->getFullFramesAvailable() >= getBufferSize())) {
190 // By default wake up a few milliseconds from now. // TODO review
191 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
192 aaudio_stream_state_t state = getState();
193 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
194 // AAudio_convertStreamStateToText(state));
195 switch (state) {
196 case AAUDIO_STREAM_STATE_OPEN:
197 case AAUDIO_STREAM_STATE_STARTING:
198 if (framesWritten != 0) {
199 // Don't wait to write more data. Just prime the buffer.
200 wakeTime = currentNanoTime;
201 }
202 break;
203 case AAUDIO_STREAM_STATE_STARTED:
204 {
205 // Calculate when there will be room available to write to the buffer.
206 // If the appBufferSize is smaller than the endpointBufferSize then
207 // we will have room to write data beyond the appBufferSize.
208 // That is a technique used to reduce glitches without adding latency.
209 const int32_t appBufferSize = getBufferSize();
210 // The endpoint buffer size is set to the maximum that can be written.
211 // If we use it then we must carve out some room to write data when we wake up.
212 const int32_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
213 - getFramesPerBurst();
214 const int32_t bestBufferSize = std::min(appBufferSize, endBufferSize);
215 int64_t targetReadPosition = mAudioEndpoint->getDataWriteCounter() - bestBufferSize;
216 wakeTime = mClockModel.convertPositionToTime(targetReadPosition);
217 }
218 break;
219 default:
220 break;
221 }
222 *wakeTimePtr = wakeTime;
223
224 }
225
226 ATRACE_END();
227 return framesWritten;
228 }
229
230
writeNowWithConversion(const void * buffer,int32_t numFrames)231 aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
232 int32_t numFrames) {
233 WrappingBuffer wrappingBuffer;
234 uint8_t *byteBuffer = (uint8_t *) buffer;
235 int32_t framesLeft = numFrames;
236
237 mAudioEndpoint->getEmptyFramesAvailable(&wrappingBuffer);
238
239 // Write data in one or two parts.
240 int partIndex = 0;
241 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
242 int32_t framesToWrite = framesLeft;
243 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
244 if (framesAvailable > 0) {
245 if (framesToWrite > framesAvailable) {
246 framesToWrite = framesAvailable;
247 }
248
249 int32_t numBytes = getBytesPerFrame() * framesToWrite;
250
251 mFlowGraph.process((void *)byteBuffer,
252 wrappingBuffer.data[partIndex],
253 framesToWrite);
254
255 byteBuffer += numBytes;
256 framesLeft -= framesToWrite;
257 } else {
258 break;
259 }
260 partIndex++;
261 }
262 int32_t framesWritten = numFrames - framesLeft;
263 mAudioEndpoint->advanceWriteIndex(framesWritten);
264
265 return framesWritten;
266 }
267
getFramesRead()268 int64_t AudioStreamInternalPlay::getFramesRead() {
269 if (mAudioEndpoint) {
270 const int64_t framesReadHardware = isClockModelInControl()
271 ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
272 : mAudioEndpoint->getDataReadCounter();
273 // Add service offset and prevent retrograde motion.
274 mLastFramesRead = std::max(mLastFramesRead, framesReadHardware + mFramesOffsetFromService);
275 }
276 return mLastFramesRead;
277 }
278
getFramesWritten()279 int64_t AudioStreamInternalPlay::getFramesWritten() {
280 if (mAudioEndpoint) {
281 mLastFramesWritten = mAudioEndpoint->getDataWriteCounter()
282 + mFramesOffsetFromService;
283 }
284 return mLastFramesWritten;
285 }
286
287
288 // Render audio in the application callback and then write the data to the stream.
callbackLoop()289 void *AudioStreamInternalPlay::callbackLoop() {
290 ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
291 aaudio_result_t result = AAUDIO_OK;
292 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
293 if (!isDataCallbackSet()) return nullptr;
294 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
295
296 // result might be a frame count
297 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
298 // Call application using the AAudio callback interface.
299 callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
300
301 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
302 // Write audio data to stream. This is a BLOCKING WRITE!
303 result = write(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
304 if ((result != mCallbackFrames)) {
305 if (result >= 0) {
306 // Only wrote some of the frames requested. Must have timed out.
307 result = AAUDIO_ERROR_TIMEOUT;
308 }
309 maybeCallErrorCallback(result);
310 break;
311 }
312 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
313 ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
314 result = systemStopInternal();
315 break;
316 }
317 }
318
319 ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
320 __func__, result, (int) isActive());
321 return nullptr;
322 }
323
324 //------------------------------------------------------------------------------
325 // Implementation of PlayerBase
doSetVolume()326 status_t AudioStreamInternalPlay::doSetVolume() {
327 float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
328 ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
329 __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
330 mFlowGraph.setTargetVolume(combinedVolume);
331 return android::NO_ERROR;
332 }
333