1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #include <utils/Log.h>
19
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21
22 #include <algorithm>
23
24 #include <media/MediaMetricsItem.h>
25 #include <utils/Trace.h>
26
27 #include "client/AudioStreamInternalPlay.h"
28 #include "utility/AudioClock.h"
29
30 // We do this after the #includes because if a header uses ALOG.
31 // it would fail on the reference to mInService.
32 #undef LOG_TAG
33 // This file is used in both client and server processes.
34 // This is needed to make sense of the logs more easily.
35 #define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
36 : "AudioStreamInternalPlay_Client")
37
38 using android::status_t;
39 using android::WrappingBuffer;
40
41 using namespace aaudio;
42
AudioStreamInternalPlay(AAudioServiceInterface & serviceInterface,bool inService)43 AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
44 bool inService)
45 : AudioStreamInternal(serviceInterface, inService) {
46
47 }
48
49 constexpr int kRampMSec = 10; // time to apply a change in volume
50
open(const AudioStreamBuilder & builder)51 aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
52 aaudio_result_t result = AudioStreamInternal::open(builder);
53 const bool useVolumeRamps = (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE);
54 if (result == AAUDIO_OK) {
55 result = mFlowGraph.configure(getFormat(),
56 getSamplesPerFrame(),
57 getSampleRate(),
58 getDeviceFormat(),
59 getDeviceSamplesPerFrame(),
60 getDeviceSampleRate(),
61 getRequireMonoBlend(),
62 useVolumeRamps,
63 getAudioBalance(),
64 aaudio::resampler::MultiChannelResampler::Quality::Medium);
65
66 if (result != AAUDIO_OK) {
67 safeReleaseClose();
68 }
69 // Sample rate is constrained to common values by now and should not overflow.
70 int32_t numFrames = kRampMSec * getSampleRate() / AAUDIO_MILLIS_PER_SECOND;
71 mFlowGraph.setRampLengthInFrames(numFrames);
72 }
73 return result;
74 }
75
76 // This must be called under mStreamLock.
requestPause_l()77 aaudio_result_t AudioStreamInternalPlay::requestPause_l()
78 {
79 aaudio_result_t result = stopCallback_l();
80 if (result != AAUDIO_OK) {
81 return result;
82 }
83 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
84 ALOGW("%s() mServiceStreamHandle invalid", __func__);
85 return AAUDIO_ERROR_INVALID_STATE;
86 }
87
88 mClockModel.stop(AudioClock::getNanoseconds());
89 setState(AAUDIO_STREAM_STATE_PAUSING);
90 mAtomicInternalTimestamp.clear();
91 return mServiceInterface.pauseStream(mServiceStreamHandleInfo);
92 }
93
requestFlush_l()94 aaudio_result_t AudioStreamInternalPlay::requestFlush_l() {
95 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
96 ALOGW("%s() mServiceStreamHandle invalid", __func__);
97 return AAUDIO_ERROR_INVALID_STATE;
98 }
99
100 setState(AAUDIO_STREAM_STATE_FLUSHING);
101 return mServiceInterface.flushStream(mServiceStreamHandleInfo);
102 }
103
prepareBuffersForStart()104 void AudioStreamInternalPlay::prepareBuffersForStart() {
105 // Reset volume ramps to avoid a starting noise.
106 // This was called here instead of AudioStreamInternal so that
107 // it will be easier to backport.
108 mFlowGraph.reset();
109 // Prevent stale data from being played.
110 mAudioEndpoint->eraseDataMemory();
111 // All data has been erased. To avoid mixer for the shared stream use stale
112 // counters, which may cause the service side thinking stream starts flowing before
113 // the client actually writes data, advance the client to match server position.
114 advanceClientToMatchServerPosition(0 /*serverMargin*/);
115 }
116
prepareBuffersForStop()117 void AudioStreamInternalPlay::prepareBuffersForStop() {
118 // If this is a shared stream and the FIFO is being read by the mixer then
119 // we don't have to worry about the DSP reading past the valid data. We can skip all this.
120 if(!mAudioEndpoint->isFreeRunning()) {
121 return;
122 }
123 // Sleep until the DSP has read all of the data written.
124 int64_t validFramesInBuffer =
125 mAudioEndpoint->getDataWriteCounter() - mAudioEndpoint->getDataReadCounter();
126 if (validFramesInBuffer >= 0) {
127 int64_t emptyFramesInBuffer = ((int64_t) getBufferCapacity()) - validFramesInBuffer;
128
129 // Prevent stale data from being played if the DSP is still running.
130 // Erase some of the FIFO memory in front of the DSP read cursor.
131 // Subtract one burst so we do not accidentally erase data that the DSP might be using.
132 int64_t framesToErase = std::max((int64_t) 0,
133 emptyFramesInBuffer - getFramesPerBurst());
134 mAudioEndpoint->eraseEmptyDataMemory(framesToErase);
135
136 // Sleep until we are confident the DSP has consumed all of the valid data.
137 // Sleep for one extra burst as a safety margin because the IsochronousClockModel
138 // is not perfectly accurate.
139 // The ClockModel uses the server frame position so do not use getFramesWritten().
140 int64_t positionInEmptyMemory = mAudioEndpoint->getDataWriteCounter() + getFramesPerBurst();
141 int64_t timeAllConsumed = mClockModel.convertPositionToTime(positionInEmptyMemory);
142 int64_t durationAllConsumed = timeAllConsumed - AudioClock::getNanoseconds();
143 // Prevent sleeping for too long.
144 durationAllConsumed = std::min(200 * AAUDIO_NANOS_PER_MILLISECOND, durationAllConsumed);
145 AudioClock::sleepForNanos(durationAllConsumed);
146 }
147
148 // Erase all of the memory in case the DSP keeps going and wraps around.
149 mAudioEndpoint->eraseDataMemory();
150
151 // Wait for the last buffer to reach the DAC.
152 // This is because the expected behavior of stop() is that all data written to the stream
153 // should be played before the hardware actually shuts down.
154 // This is different than pause(), where we just end as soon as possible.
155 // This can be important when, for example, playing car navigation and
156 // you want the user to hear the complete instruction.
157 if (mAtomicInternalTimestamp.isValid()) {
158 // Use timestamps to calculate the latency between the DSP reading
159 // a frame and when it reaches the DAC.
160 // This code assumes that timestamps are accurate.
161 Timestamp timestamp = mAtomicInternalTimestamp.read();
162 int64_t dacPosition = timestamp.getPosition();
163 int64_t hardwareReadTime = mClockModel.convertPositionToTime(dacPosition);
164 int64_t hardwareLatencyNanos = timestamp.getNanoseconds() - hardwareReadTime;
165 ALOGD("%s() hardwareLatencyNanos = %lld", __func__,
166 (long long) hardwareLatencyNanos);
167 // Prevent sleeping for too long.
168 hardwareLatencyNanos = std::min(30 * AAUDIO_NANOS_PER_MILLISECOND,
169 hardwareLatencyNanos);
170 AudioClock::sleepForNanos(hardwareLatencyNanos);
171 }
172 }
173
advanceClientToMatchServerPosition(int32_t serverMargin)174 void AudioStreamInternalPlay::advanceClientToMatchServerPosition(int32_t serverMargin) {
175 int64_t readCounter = mAudioEndpoint->getDataReadCounter() + serverMargin;
176 int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();
177
178 // Bump offset so caller does not see the retrograde motion in getFramesRead().
179 int64_t offset = writeCounter - readCounter;
180 mFramesOffsetFromService += offset;
181 ALOGV("%s() readN = %lld, writeN = %lld, offset = %lld", __func__,
182 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
183
184 // Force writeCounter to match readCounter.
185 // This is because we cannot change the read counter in the hardware.
186 mAudioEndpoint->setDataWriteCounter(readCounter);
187 }
188
onFlushFromServer()189 void AudioStreamInternalPlay::onFlushFromServer() {
190 advanceClientToMatchServerPosition(0 /*serverMargin*/);
191 }
192
193 // Write the data, block if needed and timeoutMillis > 0
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)194 aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
195 int64_t timeoutNanoseconds) {
196 return processData((void *)buffer, numFrames, timeoutNanoseconds);
197 }
198
199 // Write as much data as we can without blocking.
processDataNow(void * buffer,int32_t numFrames,int64_t currentNanoTime,int64_t * wakeTimePtr)200 aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
201 int64_t currentNanoTime, int64_t *wakeTimePtr) {
202 aaudio_result_t result = processCommands();
203 if (result != AAUDIO_OK) {
204 return result;
205 }
206
207 const char *traceName = "aaWrNow";
208 ATRACE_BEGIN(traceName);
209
210 if (mClockModel.isStarting()) {
211 // Still haven't got any timestamps from server.
212 // Keep waiting until we get some valid timestamps then start writing to the
213 // current buffer position.
214 ALOGV("%s() wait for valid timestamps", __func__);
215 // Sleep very briefly and hope we get a timestamp soon.
216 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
217 ATRACE_END();
218 return 0;
219 }
220 // If we have gotten this far then we have at least one timestamp from server.
221
222 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
223 if (mAudioEndpoint->isFreeRunning()) {
224 // Update data queue based on the timing model.
225 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
226 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
227 mAudioEndpoint->setDataReadCounter(estimatedReadCounter);
228 }
229
230 if (mNeedCatchUp.isRequested()) {
231 // Catch an MMAP pointer that is already advancing.
232 // This will avoid initial underruns caused by a slow cold start.
233 // We add a one burst margin in case the DSP advances before we can write the data.
234 // This can help prevent the beginning of the stream from being skipped.
235 advanceClientToMatchServerPosition(getFramesPerBurst());
236 mNeedCatchUp.acknowledge();
237 }
238
239 // If the read index passed the write index then consider it an underrun.
240 // For shared streams, the xRunCount is passed up from the service.
241 if (mAudioEndpoint->isFreeRunning() && mAudioEndpoint->getFullFramesAvailable() < 0) {
242 mXRunCount++;
243 if (ATRACE_ENABLED()) {
244 ATRACE_INT("aaUnderRuns", mXRunCount);
245 }
246 }
247
248 // Write some data to the buffer.
249 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
250 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
251 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
252 // numFrames, framesWritten);
253 if (ATRACE_ENABLED()) {
254 ATRACE_INT("aaWrote", framesWritten);
255 }
256
257 // Sleep if there is too much data in the buffer.
258 // Calculate an ideal time to wake up.
259 if (wakeTimePtr != nullptr
260 && (mAudioEndpoint->getFullFramesAvailable() >= getDeviceBufferSize())) {
261 // By default wake up a few milliseconds from now. // TODO review
262 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
263 aaudio_stream_state_t state = getState();
264 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
265 // AAudio_convertStreamStateToText(state));
266 switch (state) {
267 case AAUDIO_STREAM_STATE_OPEN:
268 case AAUDIO_STREAM_STATE_STARTING:
269 if (framesWritten != 0) {
270 // Don't wait to write more data. Just prime the buffer.
271 wakeTime = currentNanoTime;
272 }
273 break;
274 case AAUDIO_STREAM_STATE_STARTED:
275 {
276 // Calculate when there will be room available to write to the buffer.
277 // If the appBufferSize is smaller than the endpointBufferSize then
278 // we will have room to write data beyond the appBufferSize.
279 // That is a technique used to reduce glitches without adding latency.
280 const int64_t appBufferSize = getDeviceBufferSize();
281 // The endpoint buffer size is set to the maximum that can be written.
282 // If we use it then we must carve out some room to write data when we wake up.
283 const int64_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
284 - getDeviceFramesPerBurst();
285 const int64_t bestBufferSize = std::min(appBufferSize, endBufferSize);
286 int64_t targetReadPosition = mAudioEndpoint->getDataWriteCounter() - bestBufferSize;
287 wakeTime = mClockModel.convertPositionToTime(targetReadPosition);
288 }
289 break;
290 default:
291 break;
292 }
293 *wakeTimePtr = wakeTime;
294
295 }
296
297 ATRACE_END();
298 return framesWritten;
299 }
300
301
writeNowWithConversion(const void * buffer,int32_t numFrames)302 aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
303 int32_t numFrames) {
304 WrappingBuffer wrappingBuffer;
305 uint8_t *byteBuffer = (uint8_t *) buffer;
306 int32_t framesLeftInByteBuffer = numFrames;
307
308 mAudioEndpoint->getEmptyFramesAvailable(&wrappingBuffer);
309
310 // Write data in one or two parts.
311 int partIndex = 0;
312 int framesWrittenToAudioEndpoint = 0;
313 while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
314 int32_t framesAvailableInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
315 uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
316
317 if (framesAvailableInWrappingBuffer > 0) {
318 // Pull data from the flowgraph in case there is residual data.
319 const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.pull(
320 (void*) currentWrappingBuffer,
321 framesAvailableInWrappingBuffer);
322
323 const int32_t numBytesActuallyWrittenToWrappingBuffer =
324 framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
325 currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
326 framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
327 framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
328 } else {
329 break;
330 }
331
332 // Put data from byteBuffer into the flowgraph one buffer (8 frames) at a time.
333 // Continuously pull as much data as possible from the flowgraph into the wrapping buffer.
334 // The return value of mFlowGraph.process is the number of frames actually pulled.
335 while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
336 int32_t framesToWriteFromByteBuffer = std::min(flowgraph::kDefaultBufferSize,
337 framesLeftInByteBuffer);
338 // If the wrapping buffer is running low, write one frame at a time.
339 if (framesAvailableInWrappingBuffer < flowgraph::kDefaultBufferSize) {
340 framesToWriteFromByteBuffer = 1;
341 }
342
343 const int32_t numBytesToWriteFromByteBuffer = getBytesPerFrame() *
344 framesToWriteFromByteBuffer;
345
346 //ALOGD("%s() framesLeftInByteBuffer %d, framesAvailableInWrappingBuffer %d"
347 // "framesToWriteFromByteBuffer %d, numBytesToWriteFromByteBuffer %d"
348 // , __func__, framesLeftInByteBuffer, framesAvailableInWrappingBuffer,
349 // framesToWriteFromByteBuffer, numBytesToWriteFromByteBuffer);
350
351 const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.process(
352 (void *)byteBuffer,
353 framesToWriteFromByteBuffer,
354 (void *)currentWrappingBuffer,
355 framesAvailableInWrappingBuffer);
356
357 byteBuffer += numBytesToWriteFromByteBuffer;
358 framesLeftInByteBuffer -= framesToWriteFromByteBuffer;
359 const int32_t numBytesActuallyWrittenToWrappingBuffer =
360 framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
361 currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
362 framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
363 framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
364
365 //ALOGD("%s() numBytesActuallyWrittenToWrappingBuffer %d, framesLeftInByteBuffer %d"
366 // "framesActuallyWrittenToWrappingBuffer %d, numBytesToWriteFromByteBuffer %d"
367 // "framesWrittenToAudioEndpoint %d"
368 // , __func__, numBytesActuallyWrittenToWrappingBuffer, framesLeftInByteBuffer,
369 // framesActuallyWrittenToWrappingBuffer, numBytesToWriteFromByteBuffer,
370 // framesWrittenToAudioEndpoint);
371 }
372 partIndex++;
373 }
374 //ALOGD("%s() framesWrittenToAudioEndpoint %d, numFrames %d"
375 // "framesLeftInByteBuffer %d"
376 // , __func__, framesWrittenToAudioEndpoint, numFrames,
377 // framesLeftInByteBuffer);
378
379 // The audio endpoint should reference the number of frames written to the wrapping buffer.
380 mAudioEndpoint->advanceWriteIndex(framesWrittenToAudioEndpoint);
381
382 // The internal code should use the number of frames read from the app.
383 return numFrames - framesLeftInByteBuffer;
384 }
385
getFramesRead()386 int64_t AudioStreamInternalPlay::getFramesRead() {
387 if (mAudioEndpoint) {
388 const int64_t framesReadHardware = isClockModelInControl()
389 ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
390 : mAudioEndpoint->getDataReadCounter();
391 // Add service offset and prevent retrograde motion.
392 mLastFramesRead = std::max(mLastFramesRead, framesReadHardware + mFramesOffsetFromService);
393 }
394 return mLastFramesRead;
395 }
396
getFramesWritten()397 int64_t AudioStreamInternalPlay::getFramesWritten() {
398 if (mAudioEndpoint) {
399 mLastFramesWritten = std::max(
400 mLastFramesWritten,
401 mAudioEndpoint->getDataWriteCounter() + mFramesOffsetFromService);
402 }
403 return mLastFramesWritten;
404 }
405
406 // Render audio in the application callback and then write the data to the stream.
callbackLoop()407 void *AudioStreamInternalPlay::callbackLoop() {
408 ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
409 aaudio_result_t result = AAUDIO_OK;
410 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
411 if (!isDataCallbackSet()) return nullptr;
412 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
413
414 // result might be a frame count
415 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
416 // Call application using the AAudio callback interface.
417 callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
418
419 // Write audio data to stream. This is a BLOCKING WRITE!
420 // Write data regardless of the callbackResult because we assume the data
421 // is valid even when the callback returns AAUDIO_CALLBACK_RESULT_STOP.
422 // Imagine a callback that is playing a large sound in menory.
423 // When it gets to the end of the sound it can partially fill
424 // the last buffer with the end of the sound, then zero pad the buffer, then return STOP.
425 // If the callback has no valid data then it should zero-fill the entire buffer.
426 result = write(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
427 if ((result != mCallbackFrames)) {
428 if (result >= 0) {
429 // Only wrote some of the frames requested. The stream can be disconnected
430 // or timed out.
431 processCommands();
432 result = isDisconnected() ? AAUDIO_ERROR_DISCONNECTED : AAUDIO_ERROR_TIMEOUT;
433 }
434 maybeCallErrorCallback(result);
435 break;
436 }
437
438 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
439 ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
440 result = systemStopInternal();
441 break;
442 }
443 }
444
445 ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
446 __func__, result, (int) isActive());
447 return nullptr;
448 }
449
450 //------------------------------------------------------------------------------
451 // Implementation of PlayerBase
doSetVolume()452 status_t AudioStreamInternalPlay::doSetVolume() {
453 float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
454 ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
455 __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
456 mFlowGraph.setTargetVolume(combinedVolume);
457 return android::NO_ERROR;
458 }
459