• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "AAudio"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20 
21 #include "client/AudioStreamInternalPlay.h"
22 #include "utility/AudioClock.h"
23 
24 using android::WrappingBuffer;
25 
26 using namespace aaudio;
27 
AudioStreamInternalPlay(AAudioServiceInterface & serviceInterface,bool inService)28 AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface,
29                                                        bool inService)
30         : AudioStreamInternal(serviceInterface, inService) {
31 
32 }
33 
~AudioStreamInternalPlay()34 AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
35 
36 
37 // Write the data, block if needed and timeoutMillis > 0
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)38 aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
39                                            int64_t timeoutNanoseconds)
40 
41 {
42     return processData((void *)buffer, numFrames, timeoutNanoseconds);
43 }
44 
45 // Write as much data as we can without blocking.
processDataNow(void * buffer,int32_t numFrames,int64_t currentNanoTime,int64_t * wakeTimePtr)46 aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
47                                               int64_t currentNanoTime, int64_t *wakeTimePtr) {
48     aaudio_result_t result = processCommands();
49     if (result != AAUDIO_OK) {
50         return result;
51     }
52 
53     if (mAudioEndpoint.isFreeRunning()) {
54         //ALOGD("AudioStreamInternal::processDataNow() - update read counter");
55         // Update data queue based on the timing model.
56         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
57         mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
58     }
59     // TODO else query from endpoint cuz set by actual reader, maybe
60 
61     // If the read index passed the write index then consider it an underrun.
62     if (mAudioEndpoint.getFullFramesAvailable() < 0) {
63         mXRunCount++;
64     }
65 
66     // Write some data to the buffer.
67     //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
68     int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
69     //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
70     //    numFrames, framesWritten);
71 
72     // Calculate an ideal time to wake up.
73     if (wakeTimePtr != nullptr && framesWritten >= 0) {
74         // By default wake up a few milliseconds from now.  // TODO review
75         int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
76         aaudio_stream_state_t state = getState();
77         //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
78         //      AAudio_convertStreamStateToText(state));
79         switch (state) {
80             case AAUDIO_STREAM_STATE_OPEN:
81             case AAUDIO_STREAM_STATE_STARTING:
82                 if (framesWritten != 0) {
83                     // Don't wait to write more data. Just prime the buffer.
84                     wakeTime = currentNanoTime;
85                 }
86                 break;
87             case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
88             {
89                 uint32_t burstSize = mFramesPerBurst;
90                 if (burstSize < 32) {
91                     burstSize = 32; // TODO review
92                 }
93 
94                 uint64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + burstSize;
95                 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
96             }
97                 break;
98             default:
99                 break;
100         }
101         *wakeTimePtr = wakeTime;
102 
103     }
104 //    ALOGD("AudioStreamInternal::processDataNow finished: now = %llu, read# = %llu, wrote# = %llu",
105 //         (unsigned long long)currentNanoTime,
106 //         (unsigned long long)mAudioEndpoint.getDataReadCounter(),
107 //         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
108     return framesWritten;
109 }
110 
111 
writeNowWithConversion(const void * buffer,int32_t numFrames)112 aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
113                                                             int32_t numFrames) {
114     // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
115     //              buffer, numFrames);
116     WrappingBuffer wrappingBuffer;
117     uint8_t *source = (uint8_t *) buffer;
118     int32_t framesLeft = numFrames;
119 
120     mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
121 
122     // Read data in one or two parts.
123     int partIndex = 0;
124     while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
125         int32_t framesToWrite = framesLeft;
126         int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
127         if (framesAvailable > 0) {
128             if (framesToWrite > framesAvailable) {
129                 framesToWrite = framesAvailable;
130             }
131             int32_t numBytes = getBytesPerFrame() * framesToWrite;
132             int32_t numSamples = framesToWrite * getSamplesPerFrame();
133             // Data conversion.
134             float levelFrom;
135             float levelTo;
136             bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
137                                                    &levelFrom, &levelTo);
138             // The formats are validated when the stream is opened so we do not have to
139             // check for illegal combinations here.
140             // TODO factor this out into a utility function
141             if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
142                 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
143                     AAudio_linearRamp(
144                             (const float *) source,
145                             (float *) wrappingBuffer.data[partIndex],
146                             framesToWrite,
147                             getSamplesPerFrame(),
148                             levelFrom,
149                             levelTo);
150                 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
151                     if (ramping) {
152                         AAudioConvert_floatToPcm16(
153                                 (const float *) source,
154                                 (int16_t *) wrappingBuffer.data[partIndex],
155                                 framesToWrite,
156                                 getSamplesPerFrame(),
157                                 levelFrom,
158                                 levelTo);
159                     } else {
160                         AAudioConvert_floatToPcm16(
161                                 (const float *) source,
162                                 (int16_t *) wrappingBuffer.data[partIndex],
163                                 numSamples,
164                                 levelTo);
165                     }
166                 }
167             } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
168                 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
169                     if (ramping) {
170                         AAudioConvert_pcm16ToFloat(
171                                 (const int16_t *) source,
172                                 (float *) wrappingBuffer.data[partIndex],
173                                 framesToWrite,
174                                 getSamplesPerFrame(),
175                                 levelFrom,
176                                 levelTo);
177                     } else {
178                         AAudioConvert_pcm16ToFloat(
179                                 (const int16_t *) source,
180                                 (float *) wrappingBuffer.data[partIndex],
181                                 numSamples,
182                                 levelTo);
183                     }
184                 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
185                     AAudio_linearRamp(
186                             (const int16_t *) source,
187                             (int16_t *) wrappingBuffer.data[partIndex],
188                             framesToWrite,
189                             getSamplesPerFrame(),
190                             levelFrom,
191                             levelTo);
192                 }
193             }
194             source += numBytes;
195             framesLeft -= framesToWrite;
196         } else {
197             break;
198         }
199         partIndex++;
200     }
201     int32_t framesWritten = numFrames - framesLeft;
202     mAudioEndpoint.advanceWriteIndex(framesWritten);
203 
204     if (framesWritten > 0) {
205         incrementFramesWritten(framesWritten);
206     }
207     // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
208     return framesWritten;
209 }
210 
211 
getFramesRead()212 int64_t AudioStreamInternalPlay::getFramesRead()
213 {
214     int64_t framesRead =
215             mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
216             + mFramesOffsetFromService;
217     // Prevent retrograde motion.
218     if (framesRead < mLastFramesRead) {
219         framesRead = mLastFramesRead;
220     } else {
221         mLastFramesRead = framesRead;
222     }
223     ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
224     return framesRead;
225 }
226 
getFramesWritten()227 int64_t AudioStreamInternalPlay::getFramesWritten()
228 {
229     int64_t getFramesWritten = mAudioEndpoint.getDataWriteCounter()
230                                + mFramesOffsetFromService;
231     ALOGD("AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
232     return getFramesWritten;
233 }
234 
235 
236 // Render audio in the application callback and then write the data to the stream.
callbackLoop()237 void *AudioStreamInternalPlay::callbackLoop() {
238     aaudio_result_t result = AAUDIO_OK;
239     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
240     AAudioStream_dataCallback appCallback = getDataCallbackProc();
241     if (appCallback == nullptr) return NULL;
242 
243     // result might be a frame count
244     while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
245         // Call application using the AAudio callback interface.
246         callbackResult = (*appCallback)(
247                 (AAudioStream *) this,
248                 getDataCallbackUserData(),
249                 mCallbackBuffer,
250                 mCallbackFrames);
251 
252         if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
253             // Write audio data to stream.
254             int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
255 
256             // This is a BLOCKING WRITE!
257             result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
258             if ((result != mCallbackFrames)) {
259                 ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
260                 if (result >= 0) {
261                     // Only wrote some of the frames requested. Must have timed out.
262                     result = AAUDIO_ERROR_TIMEOUT;
263                 }
264                 AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
265                 if (errorCallback != nullptr) {
266                     (*errorCallback)(
267                             (AAudioStream *) this,
268                             getErrorCallbackUserData(),
269                             result);
270                 }
271                 break;
272             }
273         } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
274             ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
275             break;
276         }
277     }
278 
279     ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
280           result, (int) isActive());
281     return NULL;
282 }
283