• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "AAudioServiceStreamShared"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20 
21 #include <iomanip>
22 #include <iostream>
23 #include <mutex>
24 
25 #include <aaudio/AAudio.h>
26 
27 #include "binding/AAudioServiceMessage.h"
28 #include "AAudioServiceStreamBase.h"
29 #include "AAudioServiceStreamShared.h"
30 #include "AAudioEndpointManager.h"
31 #include "AAudioService.h"
32 #include "AAudioServiceEndpoint.h"
33 
34 using namespace android;
35 using namespace aaudio;
36 
37 #define MIN_BURSTS_PER_BUFFER       2
38 #define DEFAULT_BURSTS_PER_BUFFER   16
39 // This is an arbitrary range. TODO review.
40 #define MAX_FRAMES_PER_BUFFER       (32 * 1024)
41 
AAudioServiceStreamShared(AAudioService & audioService)42 AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
43     : AAudioServiceStreamBase(audioService)
44     , mTimestampPositionOffset(0)
45     , mXRunCount(0) {
46 }
47 
dumpHeader()48 std::string AAudioServiceStreamShared::dumpHeader() {
49     std::stringstream result;
50     result << AAudioServiceStreamBase::dumpHeader();
51     result << "    Write#     Read#   Avail   XRuns";
52     return result.str();
53 }
54 
dump() const55 std::string AAudioServiceStreamShared::dump() const NO_THREAD_SAFETY_ANALYSIS {
56     std::stringstream result;
57 
58     const bool isLocked = AAudio_tryUntilTrue(
59             [this]()->bool { return audioDataQueueLock.try_lock(); } /* f */,
60             50 /* times */,
61             20 /* sleepMs */);
62     if (!isLocked) {
63         result << "AAudioServiceStreamShared may be deadlocked\n";
64     }
65 
66     result << AAudioServiceStreamBase::dump();
67 
68     result << mAudioDataQueue->dump();
69     result << std::setw(8) << getXRunCount();
70 
71     if (isLocked) {
72         audioDataQueueLock.unlock();
73     }
74 
75     return result.str();
76 }
77 
calculateBufferCapacity(int32_t requestedCapacityFrames,int32_t framesPerBurst,int32_t requestedSampleRate,int32_t deviceSampleRate)78 int32_t AAudioServiceStreamShared::calculateBufferCapacity(int32_t requestedCapacityFrames,
79                                                            int32_t framesPerBurst,
80                                                            int32_t requestedSampleRate,
81                                                            int32_t deviceSampleRate) {
82     if (requestedSampleRate != AAUDIO_UNSPECIFIED && requestedSampleRate != deviceSampleRate) {
83         // When sample rate conversion is needed, we use the device sample rate and the
84         // requested sample rate to scale the capacity in configureDataInformation().
85         // Thus, we should scale the capacity here to cancel out the
86         // (requestedSampleRate / deviceSampleRate) scaling there.
87 
88         requestedCapacityFrames = static_cast<int64_t>(requestedCapacityFrames) * deviceSampleRate
89                                   / requestedSampleRate;
90         ALOGV("calculateBufferCapacity() scaled buffer capacity to %d frames, requested SR = %d"
91               ", device SR = %d",
92               requestedCapacityFrames, requestedSampleRate, deviceSampleRate);
93     }
94 
95     if (requestedCapacityFrames > MAX_FRAMES_PER_BUFFER) {
96         ALOGE("calculateBufferCapacity() requested capacity %d > max %d",
97               requestedCapacityFrames, MAX_FRAMES_PER_BUFFER);
98         return AAUDIO_ERROR_OUT_OF_RANGE;
99     }
100 
101     // Determine how many bursts will fit in the buffer.
102     int32_t numBursts;
103     if (requestedCapacityFrames == AAUDIO_UNSPECIFIED) {
104         // Use fewer bursts if default is too many.
105         if ((DEFAULT_BURSTS_PER_BUFFER * framesPerBurst) > MAX_FRAMES_PER_BUFFER) {
106             numBursts = MAX_FRAMES_PER_BUFFER / framesPerBurst;
107         } else {
108             numBursts = DEFAULT_BURSTS_PER_BUFFER;
109         }
110     } else {
111         // round up to nearest burst boundary
112         numBursts = (requestedCapacityFrames + framesPerBurst - 1) / framesPerBurst;
113     }
114 
115     // Clip to bare minimum.
116     if (numBursts < MIN_BURSTS_PER_BUFFER) {
117         numBursts = MIN_BURSTS_PER_BUFFER;
118     }
119     // Check for numeric overflow.
120     if (numBursts > 0x8000 || framesPerBurst > 0x8000) {
121         ALOGE("calculateBufferCapacity() overflow, capacity = %d * %d",
122               numBursts, framesPerBurst);
123         return AAUDIO_ERROR_OUT_OF_RANGE;
124     }
125     int32_t capacityInFrames = numBursts * framesPerBurst;
126 
127     // Final range check.
128     if (capacityInFrames > MAX_FRAMES_PER_BUFFER) {
129         ALOGE("calculateBufferCapacity() calc capacity %d > max %d",
130               capacityInFrames, MAX_FRAMES_PER_BUFFER);
131         return AAUDIO_ERROR_OUT_OF_RANGE;
132     }
133     ALOGV("calculateBufferCapacity() requested %d frames, actual = %d",
134           requestedCapacityFrames, capacityInFrames);
135     return capacityInFrames;
136 }
137 
open(const aaudio::AAudioStreamRequest & request)138 aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request)  {
139 
140     sp<AAudioServiceStreamShared> keep(this);
141 
142     if (request.getConstantConfiguration().getSharingMode() != AAUDIO_SHARING_MODE_SHARED) {
143         ALOGE("%s() sharingMode mismatch %d", __func__,
144               request.getConstantConfiguration().getSharingMode());
145         return AAUDIO_ERROR_INTERNAL;
146     }
147 
148     aaudio_result_t result = AAudioServiceStreamBase::open(request);
149     if (result != AAUDIO_OK) {
150         return result;
151     }
152 
153     const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
154 
155     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
156     if (endpoint == nullptr) {
157         result = AAUDIO_ERROR_INVALID_STATE;
158         goto error;
159     }
160 
161     // Use the sample rate of the endpoint as each shared stream should use its own SRC.
162     setSampleRate(endpoint->getSampleRate());
163 
164     // Is the request compatible with the shared endpoint?
165     setFormat(configurationInput.getFormat());
166     if (getFormat() == AUDIO_FORMAT_DEFAULT) {
167         setFormat(AUDIO_FORMAT_PCM_FLOAT);
168     } else if (getFormat() != AUDIO_FORMAT_PCM_FLOAT) {
169         ALOGD("%s() audio_format_t mAudioFormat = %d, need FLOAT", __func__, getFormat());
170         result = AAUDIO_ERROR_INVALID_FORMAT;
171         goto error;
172     }
173 
174     setChannelMask(configurationInput.getChannelMask());
175     if (getChannelMask() == AAUDIO_UNSPECIFIED) {
176         setChannelMask(endpoint->getChannelMask());
177     } else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
178         ALOGD("%s() mSamplesPerFrame = %#x, need %#x",
179               __func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
180         result = AAUDIO_ERROR_OUT_OF_RANGE;
181         goto error;
182     }
183 
184     setBufferCapacity(calculateBufferCapacity(configurationInput.getBufferCapacity(),
185                                               mFramesPerBurst, configurationInput.getSampleRate(),
186                                               getSampleRate()));
187     if (getBufferCapacity() < 0) {
188         result = getBufferCapacity(); // negative error code
189         setBufferCapacity(0);
190         goto error;
191     }
192 
193     {
194         std::lock_guard<std::mutex> lock(audioDataQueueLock);
195         // Create audio data shared memory buffer for client.
196         mAudioDataQueue = std::make_shared<SharedRingBuffer>();
197         result = mAudioDataQueue->allocate(calculateBytesPerFrame(), getBufferCapacity());
198         if (result != AAUDIO_OK) {
199             ALOGE("%s() could not allocate FIFO with %d frames",
200                   __func__, getBufferCapacity());
201             result = AAUDIO_ERROR_NO_MEMORY;
202             goto error;
203         }
204     }
205 
206     result = endpoint->registerStream(keep);
207     if (result != AAUDIO_OK) {
208         goto error;
209     }
210 
211     setState(AAUDIO_STREAM_STATE_OPEN);
212     return AAUDIO_OK;
213 
214 error:
215     close();
216     return result;
217 }
218 
219 /**
220  * Get an immutable description of the data queue created by this service.
221  */
getAudioDataDescription_l(AudioEndpointParcelable * parcelable)222 aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription_l(
223         AudioEndpointParcelable* parcelable)
224 {
225     std::lock_guard<std::mutex> lock(audioDataQueueLock);
226     if (mAudioDataQueue == nullptr) {
227         ALOGW("%s(): mUpMessageQueue null! - stream not open", __func__);
228         return AAUDIO_ERROR_NULL;
229     }
230     // Gather information on the data queue.
231     mAudioDataQueue->fillParcelable(parcelable,
232                                     parcelable->mDownDataQueueParcelable);
233     parcelable->mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
234     return AAUDIO_OK;
235 }
236 
markTransferTime(Timestamp & timestamp)237 void AAudioServiceStreamShared::markTransferTime(Timestamp &timestamp) {
238     mAtomicStreamTimestamp.write(timestamp);
239 }
240 
241 // Get timestamp that was written by mixer or distributor.
getFreeRunningPosition_l(int64_t * positionFrames,int64_t * timeNanos)242 aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition_l(int64_t *positionFrames,
243                                                                     int64_t *timeNanos) {
244     // TODO Get presentation timestamp from the HAL
245     if (mAtomicStreamTimestamp.isValid()) {
246         Timestamp timestamp = mAtomicStreamTimestamp.read();
247         *positionFrames = timestamp.getPosition();
248         *timeNanos = timestamp.getNanoseconds();
249         return AAUDIO_OK;
250     } else {
251         return AAUDIO_ERROR_UNAVAILABLE;
252     }
253 }
254 
255 // Get timestamp from lower level service.
getHardwareTimestamp_l(int64_t * positionFrames,int64_t * timeNanos)256 aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp_l(int64_t *positionFrames,
257                                                                   int64_t *timeNanos) {
258 
259     int64_t position = 0;
260     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
261     if (endpoint == nullptr) {
262         ALOGW("%s() has no endpoint", __func__);
263         return AAUDIO_ERROR_INVALID_STATE;
264     }
265 
266     aaudio_result_t result = endpoint->getTimestamp(&position, timeNanos);
267     if (result == AAUDIO_OK) {
268         int64_t offset = mTimestampPositionOffset.load();
269         // TODO, do not go below starting value
270         position -= offset; // Offset from shared MMAP stream
271         ALOGV("%s() %8lld = %8lld - %8lld",
272               __func__, (long long) position, (long long) (position + offset), (long long) offset);
273     }
274     *positionFrames = position;
275     return result;
276 }
277 
writeDataIfRoom(int64_t mmapFramesRead,const void * buffer,int32_t numFrames)278 void AAudioServiceStreamShared::writeDataIfRoom(int64_t mmapFramesRead,
279                                                 const void *buffer, int32_t numFrames) {
280     int64_t clientFramesWritten = 0;
281 
282     // Lock the AudioFifo to protect against close.
283     std::lock_guard <std::mutex> lock(audioDataQueueLock);
284 
285     if (mAudioDataQueue != nullptr) {
286         std::shared_ptr<FifoBuffer> fifo = mAudioDataQueue->getFifoBuffer();
287         // Determine offset between framePosition in client's stream
288         // vs the underlying MMAP stream.
289         clientFramesWritten = fifo->getWriteCounter();
290         // There are two indices that refer to the same frame.
291         int64_t positionOffset = mmapFramesRead - clientFramesWritten;
292         setTimestampPositionOffset(positionOffset);
293 
294         // Is the buffer too full to write a burst?
295         if (fifo->getEmptyFramesAvailable() < getFramesPerBurst()) {
296             incrementXRunCount();
297         } else {
298             fifo->write(buffer, numFrames);
299         }
300         clientFramesWritten = fifo->getWriteCounter();
301     }
302 
303     if (clientFramesWritten > 0) {
304         // This timestamp represents the completion of data being written into the
305         // client buffer. It is sent to the client and used in the timing model
306         // to decide when data will be available to read.
307         Timestamp timestamp(clientFramesWritten, AudioClock::getNanoseconds());
308         markTransferTime(timestamp);
309     }
310 }
311