1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "AudioStreamInternal"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22
23 #include <stdint.h>
24
25 #include <binder/IServiceManager.h>
26
27 #include <aaudio/AAudio.h>
28 #include <cutils/properties.h>
29
30 #include <media/MediaMetricsItem.h>
31 #include <utils/Trace.h>
32
33 #include "AudioEndpointParcelable.h"
34 #include "binding/AAudioStreamRequest.h"
35 #include "binding/AAudioStreamConfiguration.h"
36 #include "binding/AAudioServiceMessage.h"
37 #include "core/AudioGlobal.h"
38 #include "core/AudioStreamBuilder.h"
39 #include "fifo/FifoBuffer.h"
40 #include "utility/AudioClock.h"
41 #include <media/AidlConversion.h>
42
43 #include "AudioStreamInternal.h"
44
45 // We do this after the #includes because if a header uses ALOG.
46 // it would fail on the reference to mInService.
47 #undef LOG_TAG
48 // This file is used in both client and server processes.
49 // This is needed to make sense of the logs more easily.
50 #define LOG_TAG (mInService ? "AudioStreamInternal_Service" : "AudioStreamInternal_Client")
51
52 using android::Mutex;
53 using android::WrappingBuffer;
54 using android::content::AttributionSourceState;
55
56 using namespace aaudio;
57
58 #define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
59
60 // Wait at least this many times longer than the operation should take.
61 #define MIN_TIMEOUT_OPERATIONS 4
62
63 #define LOG_TIMESTAMPS 0
64
AudioStreamInternal(AAudioServiceInterface & serviceInterface,bool inService)65 AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
66 : AudioStream()
67 , mClockModel()
68 , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
69 , mInService(inService)
70 , mServiceInterface(serviceInterface)
71 , mAtomicInternalTimestamp()
72 , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
73 , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
74 {
75 }
76
~AudioStreamInternal()77 AudioStreamInternal::~AudioStreamInternal() {
78 ALOGD("%s() %p called", __func__, this);
79 }
80
open(const AudioStreamBuilder & builder)81 aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
82
83 aaudio_result_t result = AAUDIO_OK;
84 int32_t framesPerBurst;
85 int32_t framesPerHardwareBurst;
86 AAudioStreamRequest request;
87 AAudioStreamConfiguration configurationOutput;
88
89 if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
90 ALOGE("%s - already open! state = %d", __func__, getState());
91 return AAUDIO_ERROR_INVALID_STATE;
92 }
93
94 // Copy requested parameters to the stream.
95 result = AudioStream::open(builder);
96 if (result < 0) {
97 return result;
98 }
99
100 const int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
101 int32_t burstMicros = 0;
102
103 const audio_format_t requestedFormat = getFormat();
104 // We have to do volume scaling. So we prefer FLOAT format.
105 if (requestedFormat == AUDIO_FORMAT_DEFAULT) {
106 setFormat(AUDIO_FORMAT_PCM_FLOAT);
107 }
108 // Request FLOAT for the shared mixer or the device.
109 request.getConfiguration().setFormat(AUDIO_FORMAT_PCM_FLOAT);
110
111 // TODO b/182392769: use attribution source util
112 AttributionSourceState attributionSource;
113 attributionSource.uid = VALUE_OR_FATAL(android::legacy2aidl_uid_t_int32_t(getuid()));
114 attributionSource.pid = VALUE_OR_FATAL(android::legacy2aidl_pid_t_int32_t(getpid()));
115 attributionSource.packageName = builder.getOpPackageName();
116 attributionSource.attributionTag = builder.getAttributionTag();
117 attributionSource.token = sp<android::BBinder>::make();
118
119 // Build the request to send to the server.
120 request.setAttributionSource(attributionSource);
121 request.setSharingModeMatchRequired(isSharingModeMatchRequired());
122 request.setInService(isInService());
123
124 request.getConfiguration().setDeviceId(getDeviceId());
125 request.getConfiguration().setSampleRate(getSampleRate());
126 request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
127 request.getConfiguration().setDirection(getDirection());
128 request.getConfiguration().setSharingMode(getSharingMode());
129
130 request.getConfiguration().setUsage(getUsage());
131 request.getConfiguration().setContentType(getContentType());
132 request.getConfiguration().setInputPreset(getInputPreset());
133 request.getConfiguration().setPrivacySensitive(isPrivacySensitive());
134
135 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
136
137 mDeviceChannelCount = getSamplesPerFrame(); // Assume it will be the same. Update if not.
138
139 mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
140 if (mServiceStreamHandle < 0
141 && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
142 && getDirection() == AAUDIO_DIRECTION_OUTPUT
143 && !isInService()) {
144 // if that failed then try switching from mono to stereo if OUTPUT.
145 // Only do this in the client. Otherwise we end up with a mono mixer in the service
146 // that writes to a stereo MMAP stream.
147 ALOGD("%s() - openStream() returned %d, try switching from MONO to STEREO",
148 __func__, mServiceStreamHandle);
149 request.getConfiguration().setSamplesPerFrame(2); // stereo
150 mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
151 }
152 if (mServiceStreamHandle < 0) {
153 return mServiceStreamHandle;
154 }
155
156 // This must match the key generated in oboeservice/AAudioServiceStreamBase.cpp
157 // so the client can have permission to log.
158 if (!mInService) {
159 // No need to log if it is from service side.
160 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_STREAM)
161 + std::to_string(mServiceStreamHandle);
162 }
163
164 android::mediametrics::LogItem(mMetricsId)
165 .set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
166 AudioGlobal_convertPerformanceModeToText(builder.getPerformanceMode()))
167 .set(AMEDIAMETRICS_PROP_SHARINGMODE,
168 AudioGlobal_convertSharingModeToText(builder.getSharingMode()))
169 .set(AMEDIAMETRICS_PROP_ENCODINGCLIENT,
170 android::toString(requestedFormat).c_str()).record();
171
172 result = configurationOutput.validate();
173 if (result != AAUDIO_OK) {
174 goto error;
175 }
176 // Save results of the open.
177 if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
178 setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
179 }
180 mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
181
182 setSampleRate(configurationOutput.getSampleRate());
183 setDeviceId(configurationOutput.getDeviceId());
184 setSessionId(configurationOutput.getSessionId());
185 setSharingMode(configurationOutput.getSharingMode());
186
187 setUsage(configurationOutput.getUsage());
188 setContentType(configurationOutput.getContentType());
189 setInputPreset(configurationOutput.getInputPreset());
190
191 // Save device format so we can do format conversion and volume scaling together.
192 setDeviceFormat(configurationOutput.getFormat());
193
194 result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
195 if (result != AAUDIO_OK) {
196 goto error;
197 }
198
199 // Resolve parcelable into a descriptor.
200 result = mEndPointParcelable.resolve(&mEndpointDescriptor);
201 if (result != AAUDIO_OK) {
202 goto error;
203 }
204
205 // Configure endpoint based on descriptor.
206 mAudioEndpoint = std::make_unique<AudioEndpoint>();
207 result = mAudioEndpoint->configure(&mEndpointDescriptor, getDirection());
208 if (result != AAUDIO_OK) {
209 goto error;
210 }
211
212 framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
213
214 // Scale up the burst size to meet the minimum equivalent in microseconds.
215 // This is to avoid waking the CPU too often when the HW burst is very small
216 // or at high sample rates.
217 framesPerBurst = framesPerHardwareBurst;
218 do {
219 if (burstMicros > 0) { // skip first loop
220 framesPerBurst *= 2;
221 }
222 burstMicros = framesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
223 } while (burstMicros < burstMinMicros);
224 ALOGD("%s() original HW burst = %d, minMicros = %d => SW burst = %d\n",
225 __func__, framesPerHardwareBurst, burstMinMicros, framesPerBurst);
226
227 // Validate final burst size.
228 if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
229 ALOGE("%s - framesPerBurst out of range = %d", __func__, framesPerBurst);
230 result = AAUDIO_ERROR_OUT_OF_RANGE;
231 goto error;
232 }
233 setFramesPerBurst(framesPerBurst); // only save good value
234
235 mBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
236 if (mBufferCapacityInFrames < getFramesPerBurst()
237 || mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
238 ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
239 result = AAUDIO_ERROR_OUT_OF_RANGE;
240 goto error;
241 }
242
243 mClockModel.setSampleRate(getSampleRate());
244 mClockModel.setFramesPerBurst(framesPerHardwareBurst);
245
246 if (isDataCallbackSet()) {
247 mCallbackFrames = builder.getFramesPerDataCallback();
248 if (mCallbackFrames > getBufferCapacity() / 2) {
249 ALOGW("%s - framesPerCallback too big = %d, capacity = %d",
250 __func__, mCallbackFrames, getBufferCapacity());
251 result = AAUDIO_ERROR_OUT_OF_RANGE;
252 goto error;
253
254 } else if (mCallbackFrames < 0) {
255 ALOGW("%s - framesPerCallback negative", __func__);
256 result = AAUDIO_ERROR_OUT_OF_RANGE;
257 goto error;
258
259 }
260 if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
261 mCallbackFrames = getFramesPerBurst();
262 }
263
264 const int32_t callbackBufferSize = mCallbackFrames * getBytesPerFrame();
265 mCallbackBuffer = std::make_unique<uint8_t[]>(callbackBufferSize);
266 }
267
268 // For debugging and analyzing the distribution of MMAP timestamps.
269 // For OUTPUT, use a NEGATIVE offset to move the CPU writes further BEFORE the HW reads.
270 // For INPUT, use a POSITIVE offset to move the CPU reads further AFTER the HW writes.
271 // You can use this offset to reduce glitching.
272 // You can also use this offset to force glitching. By iterating over multiple
273 // values you can reveal the distribution of the hardware timing jitter.
274 if (mAudioEndpoint->isFreeRunning()) { // MMAP?
275 int32_t offsetMicros = (getDirection() == AAUDIO_DIRECTION_OUTPUT)
276 ? AAudioProperty_getOutputMMapOffsetMicros()
277 : AAudioProperty_getInputMMapOffsetMicros();
278 // This log is used to debug some tricky glitch issues. Please leave.
279 ALOGD_IF(offsetMicros, "%s() - %s mmap offset = %d micros",
280 __func__,
281 (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "output" : "input",
282 offsetMicros);
283 mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
284 }
285
286 setBufferSize(mBufferCapacityInFrames / 2); // Default buffer size to match Q
287
288 setState(AAUDIO_STREAM_STATE_OPEN);
289
290 return result;
291
292 error:
293 safeReleaseClose();
294 return result;
295 }
296
297 // This must be called under mStreamLock.
release_l()298 aaudio_result_t AudioStreamInternal::release_l() {
299 aaudio_result_t result = AAUDIO_OK;
300 ALOGD("%s(): mServiceStreamHandle = 0x%08X", __func__, mServiceStreamHandle);
301 if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
302 aaudio_stream_state_t currentState = getState();
303 // Don't release a stream while it is running. Stop it first.
304 // If DISCONNECTED then we should still try to stop in case the
305 // error callback is still running.
306 if (isActive() || currentState == AAUDIO_STREAM_STATE_DISCONNECTED) {
307 requestStop_l();
308 }
309
310 logReleaseBufferState();
311
312 setState(AAUDIO_STREAM_STATE_CLOSING);
313 aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
314 mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
315
316 mServiceInterface.closeStream(serviceStreamHandle);
317 mCallbackBuffer.reset();
318
319 // Update local frame counters so we can query them after releasing the endpoint.
320 getFramesRead();
321 getFramesWritten();
322 mAudioEndpoint.reset();
323 result = mEndPointParcelable.close();
324 aaudio_result_t result2 = AudioStream::release_l();
325 return (result != AAUDIO_OK) ? result : result2;
326 } else {
327 return AAUDIO_ERROR_INVALID_HANDLE;
328 }
329 }
330
aaudio_callback_thread_proc(void * context)331 static void *aaudio_callback_thread_proc(void *context)
332 {
333 AudioStreamInternal *stream = (AudioStreamInternal *)context;
334 //LOGD("oboe_callback_thread, stream = %p", stream);
335 if (stream != NULL) {
336 return stream->callbackLoop();
337 } else {
338 return NULL;
339 }
340 }
341
342 /*
343 * It normally takes about 20-30 msec to start a stream on the server.
344 * But the first time can take as much as 200-300 msec. The HW
345 * starts right away so by the time the client gets a chance to write into
346 * the buffer, it is already in a deep underflow state. That can cause the
347 * XRunCount to be non-zero, which could lead an app to tune its latency higher.
348 * To avoid this problem, we set a request for the processing code to start the
349 * client stream at the same position as the server stream.
350 * The processing code will then save the current offset
351 * between client and server and apply that to any position given to the app.
352 */
requestStart_l()353 aaudio_result_t AudioStreamInternal::requestStart_l()
354 {
355 int64_t startTime;
356 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
357 ALOGD("requestStart() mServiceStreamHandle invalid");
358 return AAUDIO_ERROR_INVALID_STATE;
359 }
360 if (isActive()) {
361 ALOGD("requestStart() already active");
362 return AAUDIO_ERROR_INVALID_STATE;
363 }
364
365 aaudio_stream_state_t originalState = getState();
366 if (originalState == AAUDIO_STREAM_STATE_DISCONNECTED) {
367 ALOGD("requestStart() but DISCONNECTED");
368 return AAUDIO_ERROR_DISCONNECTED;
369 }
370 setState(AAUDIO_STREAM_STATE_STARTING);
371
372 // Clear any stale timestamps from the previous run.
373 drainTimestampsFromService();
374
375 prepareBuffersForStart(); // tell subclasses to get ready
376
377 aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);
378 if (result == AAUDIO_ERROR_INVALID_HANDLE) {
379 ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
380 // Stealing was added in R. Coerce result to improve backward compatibility.
381 result = AAUDIO_ERROR_DISCONNECTED;
382 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
383 }
384
385 startTime = AudioClock::getNanoseconds();
386 mClockModel.start(startTime);
387 mNeedCatchUp.request(); // Ask data processing code to catch up when first timestamp received.
388
389 // Start data callback thread.
390 if (result == AAUDIO_OK && isDataCallbackSet()) {
391 // Launch the callback loop thread.
392 int64_t periodNanos = mCallbackFrames
393 * AAUDIO_NANOS_PER_SECOND
394 / getSampleRate();
395 mCallbackEnabled.store(true);
396 result = createThread_l(periodNanos, aaudio_callback_thread_proc, this);
397 }
398 if (result != AAUDIO_OK) {
399 setState(originalState);
400 }
401 return result;
402 }
403
calculateReasonableTimeout(int32_t framesPerOperation)404 int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
405
406 // Wait for at least a second or some number of callbacks to join the thread.
407 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
408 * framesPerOperation
409 * AAUDIO_NANOS_PER_SECOND)
410 / getSampleRate();
411 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
412 timeoutNanoseconds = MIN_TIMEOUT_NANOS;
413 }
414 return timeoutNanoseconds;
415 }
416
calculateReasonableTimeout()417 int64_t AudioStreamInternal::calculateReasonableTimeout() {
418 return calculateReasonableTimeout(getFramesPerBurst());
419 }
420
421 // This must be called under mStreamLock.
stopCallback_l()422 aaudio_result_t AudioStreamInternal::stopCallback_l()
423 {
424 if (isDataCallbackSet()
425 && (isActive() || getState() == AAUDIO_STREAM_STATE_DISCONNECTED)) {
426 mCallbackEnabled.store(false);
427 aaudio_result_t result = joinThread_l(NULL); // may temporarily unlock mStreamLock
428 if (result == AAUDIO_ERROR_INVALID_HANDLE) {
429 ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
430 result = AAUDIO_OK;
431 }
432 return result;
433 } else {
434 ALOGD("%s() skipped, isDataCallbackSet() = %d, isActive() = %d, getState() = %d", __func__,
435 isDataCallbackSet(), isActive(), getState());
436 return AAUDIO_OK;
437 }
438 }
439
requestStop_l()440 aaudio_result_t AudioStreamInternal::requestStop_l() {
441 aaudio_result_t result = stopCallback_l();
442 if (result != AAUDIO_OK) {
443 ALOGW("%s() stop callback returned %d, returning early", __func__, result);
444 return result;
445 }
446 // The stream may have been unlocked temporarily to let a callback finish
447 // and the callback may have stopped the stream.
448 // Check to make sure the stream still needs to be stopped.
449 // See also AudioStream::safeStop_l().
450 if (!(isActive() || getState() == AAUDIO_STREAM_STATE_DISCONNECTED)) {
451 ALOGD("%s() returning early, not active or disconnected", __func__);
452 return AAUDIO_OK;
453 }
454
455 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
456 ALOGW("%s() mServiceStreamHandle invalid = 0x%08X",
457 __func__, mServiceStreamHandle);
458 return AAUDIO_ERROR_INVALID_STATE;
459 }
460
461 mClockModel.stop(AudioClock::getNanoseconds());
462 setState(AAUDIO_STREAM_STATE_STOPPING);
463 mAtomicInternalTimestamp.clear();
464
465 result = mServiceInterface.stopStream(mServiceStreamHandle);
466 if (result == AAUDIO_ERROR_INVALID_HANDLE) {
467 ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
468 result = AAUDIO_OK;
469 }
470 return result;
471 }
472
registerThread()473 aaudio_result_t AudioStreamInternal::registerThread() {
474 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
475 ALOGW("%s() mServiceStreamHandle invalid", __func__);
476 return AAUDIO_ERROR_INVALID_STATE;
477 }
478 return mServiceInterface.registerAudioThread(mServiceStreamHandle,
479 gettid(),
480 getPeriodNanoseconds());
481 }
482
unregisterThread()483 aaudio_result_t AudioStreamInternal::unregisterThread() {
484 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
485 ALOGW("%s() mServiceStreamHandle invalid", __func__);
486 return AAUDIO_ERROR_INVALID_STATE;
487 }
488 return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, gettid());
489 }
490
startClient(const android::AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * portHandle)491 aaudio_result_t AudioStreamInternal::startClient(const android::AudioClient& client,
492 const audio_attributes_t *attr,
493 audio_port_handle_t *portHandle) {
494 ALOGV("%s() called", __func__);
495 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
496 return AAUDIO_ERROR_INVALID_STATE;
497 }
498 aaudio_result_t result = mServiceInterface.startClient(mServiceStreamHandle,
499 client, attr, portHandle);
500 ALOGV("%s(%d) returning %d", __func__, *portHandle, result);
501 return result;
502 }
503
stopClient(audio_port_handle_t portHandle)504 aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t portHandle) {
505 ALOGV("%s(%d) called", __func__, portHandle);
506 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
507 return AAUDIO_ERROR_INVALID_STATE;
508 }
509 aaudio_result_t result = mServiceInterface.stopClient(mServiceStreamHandle, portHandle);
510 ALOGV("%s(%d) returning %d", __func__, portHandle, result);
511 return result;
512 }
513
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)514 aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
515 int64_t *framePosition,
516 int64_t *timeNanoseconds) {
517 // Generated in server and passed to client. Return latest.
518 if (mAtomicInternalTimestamp.isValid()) {
519 Timestamp timestamp = mAtomicInternalTimestamp.read();
520 int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
521 if (position >= 0) {
522 *framePosition = position;
523 *timeNanoseconds = timestamp.getNanoseconds();
524 return AAUDIO_OK;
525 }
526 }
527 return AAUDIO_ERROR_INVALID_STATE;
528 }
529
updateStateMachine()530 aaudio_result_t AudioStreamInternal::updateStateMachine() {
531 if (isDataCallbackActive()) {
532 return AAUDIO_OK; // state is getting updated by the callback thread read/write call
533 }
534 return processCommands();
535 }
536
logTimestamp(AAudioServiceMessage & command)537 void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
538 static int64_t oldPosition = 0;
539 static int64_t oldTime = 0;
540 int64_t framePosition = command.timestamp.position;
541 int64_t nanoTime = command.timestamp.timestamp;
542 ALOGD("logTimestamp: timestamp says framePosition = %8lld at nanoTime %lld",
543 (long long) framePosition,
544 (long long) nanoTime);
545 int64_t nanosDelta = nanoTime - oldTime;
546 if (nanosDelta > 0 && oldTime > 0) {
547 int64_t framesDelta = framePosition - oldPosition;
548 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
549 ALOGD("logTimestamp: framesDelta = %8lld, nanosDelta = %8lld, rate = %lld",
550 (long long) framesDelta, (long long) nanosDelta, (long long) rate);
551 }
552 oldPosition = framePosition;
553 oldTime = nanoTime;
554 }
555
onTimestampService(AAudioServiceMessage * message)556 aaudio_result_t AudioStreamInternal::onTimestampService(AAudioServiceMessage *message) {
557 #if LOG_TIMESTAMPS
558 logTimestamp(*message);
559 #endif
560 processTimestamp(message->timestamp.position,
561 message->timestamp.timestamp + mTimeOffsetNanos);
562 return AAUDIO_OK;
563 }
564
onTimestampHardware(AAudioServiceMessage * message)565 aaudio_result_t AudioStreamInternal::onTimestampHardware(AAudioServiceMessage *message) {
566 Timestamp timestamp(message->timestamp.position, message->timestamp.timestamp);
567 mAtomicInternalTimestamp.write(timestamp);
568 return AAUDIO_OK;
569 }
570
onEventFromServer(AAudioServiceMessage * message)571 aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
572 aaudio_result_t result = AAUDIO_OK;
573 switch (message->event.event) {
574 case AAUDIO_SERVICE_EVENT_STARTED:
575 ALOGD("%s - got AAUDIO_SERVICE_EVENT_STARTED", __func__);
576 if (getState() == AAUDIO_STREAM_STATE_STARTING) {
577 setState(AAUDIO_STREAM_STATE_STARTED);
578 }
579 break;
580 case AAUDIO_SERVICE_EVENT_PAUSED:
581 ALOGD("%s - got AAUDIO_SERVICE_EVENT_PAUSED", __func__);
582 if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
583 setState(AAUDIO_STREAM_STATE_PAUSED);
584 }
585 break;
586 case AAUDIO_SERVICE_EVENT_STOPPED:
587 ALOGD("%s - got AAUDIO_SERVICE_EVENT_STOPPED", __func__);
588 if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
589 setState(AAUDIO_STREAM_STATE_STOPPED);
590 }
591 break;
592 case AAUDIO_SERVICE_EVENT_FLUSHED:
593 ALOGD("%s - got AAUDIO_SERVICE_EVENT_FLUSHED", __func__);
594 if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
595 setState(AAUDIO_STREAM_STATE_FLUSHED);
596 onFlushFromServer();
597 }
598 break;
599 case AAUDIO_SERVICE_EVENT_DISCONNECTED:
600 // Prevent hardware from looping on old data and making buzzing sounds.
601 if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
602 mAudioEndpoint->eraseDataMemory();
603 }
604 result = AAUDIO_ERROR_DISCONNECTED;
605 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
606 ALOGW("%s - AAUDIO_SERVICE_EVENT_DISCONNECTED - FIFO cleared", __func__);
607 break;
608 case AAUDIO_SERVICE_EVENT_VOLUME:
609 ALOGD("%s - AAUDIO_SERVICE_EVENT_VOLUME %lf", __func__, message->event.dataDouble);
610 mStreamVolume = (float)message->event.dataDouble;
611 doSetVolume();
612 break;
613 case AAUDIO_SERVICE_EVENT_XRUN:
614 mXRunCount = static_cast<int32_t>(message->event.dataLong);
615 break;
616 default:
617 ALOGE("%s - Unrecognized event = %d", __func__, (int) message->event.event);
618 break;
619 }
620 return result;
621 }
622
drainTimestampsFromService()623 aaudio_result_t AudioStreamInternal::drainTimestampsFromService() {
624 aaudio_result_t result = AAUDIO_OK;
625
626 while (result == AAUDIO_OK) {
627 AAudioServiceMessage message;
628 if (!mAudioEndpoint) {
629 break;
630 }
631 if (mAudioEndpoint->readUpCommand(&message) != 1) {
632 break; // no command this time, no problem
633 }
634 switch (message.what) {
635 // ignore most messages
636 case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
637 case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
638 break;
639
640 case AAudioServiceMessage::code::EVENT:
641 result = onEventFromServer(&message);
642 break;
643
644 default:
645 ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
646 result = AAUDIO_ERROR_INTERNAL;
647 break;
648 }
649 }
650 return result;
651 }
652
653 // Process all the commands coming from the server.
processCommands()654 aaudio_result_t AudioStreamInternal::processCommands() {
655 aaudio_result_t result = AAUDIO_OK;
656
657 while (result == AAUDIO_OK) {
658 AAudioServiceMessage message;
659 if (!mAudioEndpoint) {
660 break;
661 }
662 if (mAudioEndpoint->readUpCommand(&message) != 1) {
663 break; // no command this time, no problem
664 }
665 switch (message.what) {
666 case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
667 result = onTimestampService(&message);
668 break;
669
670 case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
671 result = onTimestampHardware(&message);
672 break;
673
674 case AAudioServiceMessage::code::EVENT:
675 result = onEventFromServer(&message);
676 break;
677
678 default:
679 ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
680 result = AAUDIO_ERROR_INTERNAL;
681 break;
682 }
683 }
684 return result;
685 }
686
687 // Read or write the data, block if needed and timeoutMillis > 0
processData(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)688 aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
689 int64_t timeoutNanoseconds)
690 {
691 const char * traceName = "aaProc";
692 const char * fifoName = "aaRdy";
693 ATRACE_BEGIN(traceName);
694 if (ATRACE_ENABLED()) {
695 int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
696 ATRACE_INT(fifoName, fullFrames);
697 }
698
699 aaudio_result_t result = AAUDIO_OK;
700 int32_t loopCount = 0;
701 uint8_t* audioData = (uint8_t*)buffer;
702 int64_t currentTimeNanos = AudioClock::getNanoseconds();
703 const int64_t entryTimeNanos = currentTimeNanos;
704 const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
705 int32_t framesLeft = numFrames;
706
707 // Loop until all the data has been processed or until a timeout occurs.
708 while (framesLeft > 0) {
709 // The call to processDataNow() will not block. It will just process as much as it can.
710 int64_t wakeTimeNanos = 0;
711 aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
712 currentTimeNanos, &wakeTimeNanos);
713 if (framesProcessed < 0) {
714 result = framesProcessed;
715 break;
716 }
717 framesLeft -= (int32_t) framesProcessed;
718 audioData += framesProcessed * getBytesPerFrame();
719
720 // Should we block?
721 if (timeoutNanoseconds == 0) {
722 break; // don't block
723 } else if (wakeTimeNanos != 0) {
724 if (!mAudioEndpoint->isFreeRunning()) {
725 // If there is software on the other end of the FIFO then it may get delayed.
726 // So wake up just a little after we expect it to be ready.
727 wakeTimeNanos += mWakeupDelayNanos;
728 }
729
730 currentTimeNanos = AudioClock::getNanoseconds();
731 int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
732 // Guarantee a minimum sleep time.
733 if (wakeTimeNanos < earliestWakeTime) {
734 wakeTimeNanos = earliestWakeTime;
735 }
736
737 if (wakeTimeNanos > deadlineNanos) {
738 // If we time out, just return the framesWritten so far.
739 // TODO remove after we fix the deadline bug
740 ALOGW("processData(): entered at %lld nanos, currently %lld",
741 (long long) entryTimeNanos, (long long) currentTimeNanos);
742 ALOGW("processData(): TIMEOUT after %lld nanos",
743 (long long) timeoutNanoseconds);
744 ALOGW("processData(): wakeTime = %lld, deadline = %lld nanos",
745 (long long) wakeTimeNanos, (long long) deadlineNanos);
746 ALOGW("processData(): past deadline by %d micros",
747 (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
748 mClockModel.dump();
749 mAudioEndpoint->dump();
750 break;
751 }
752
753 if (ATRACE_ENABLED()) {
754 int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
755 ATRACE_INT(fifoName, fullFrames);
756 int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
757 ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
758 }
759
760 AudioClock::sleepUntilNanoTime(wakeTimeNanos);
761 currentTimeNanos = AudioClock::getNanoseconds();
762 }
763 }
764
765 if (ATRACE_ENABLED()) {
766 int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
767 ATRACE_INT(fifoName, fullFrames);
768 }
769
770 // return error or framesProcessed
771 (void) loopCount;
772 ATRACE_END();
773 return (result < 0) ? result : numFrames - framesLeft;
774 }
775
processTimestamp(uint64_t position,int64_t time)776 void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
777 mClockModel.processTimestamp(position, time);
778 }
779
setBufferSize(int32_t requestedFrames)780 aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
781 int32_t adjustedFrames = requestedFrames;
782 const int32_t maximumSize = getBufferCapacity() - getFramesPerBurst();
783 // Minimum size should be a multiple number of bursts.
784 const int32_t minimumSize = 1 * getFramesPerBurst();
785
786 // Clip to minimum size so that rounding up will work better.
787 adjustedFrames = std::max(minimumSize, adjustedFrames);
788
789 // Prevent arithmetic overflow by clipping before we round.
790 if (adjustedFrames >= maximumSize) {
791 adjustedFrames = maximumSize;
792 } else {
793 // Round to the next highest burst size.
794 int32_t numBursts = (adjustedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
795 adjustedFrames = numBursts * getFramesPerBurst();
796 // Clip just in case maximumSize is not a multiple of getFramesPerBurst().
797 adjustedFrames = std::min(maximumSize, adjustedFrames);
798 }
799
800 if (mAudioEndpoint) {
801 // Clip against the actual size from the endpoint.
802 int32_t actualFrames = 0;
803 // Set to maximum size so we can write extra data when ready in order to reduce glitches.
804 // The amount we keep in the buffer is controlled by mBufferSizeInFrames.
805 mAudioEndpoint->setBufferSizeInFrames(maximumSize, &actualFrames);
806 // actualFrames should be <= actual maximum size of endpoint
807 adjustedFrames = std::min(actualFrames, adjustedFrames);
808 }
809
810 if (adjustedFrames != mBufferSizeInFrames) {
811 android::mediametrics::LogItem(mMetricsId)
812 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
813 .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, adjustedFrames)
814 .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getXRunCount())
815 .record();
816 }
817
818 mBufferSizeInFrames = adjustedFrames;
819 ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
820 return (aaudio_result_t) adjustedFrames;
821 }
822
getBufferSize() const823 int32_t AudioStreamInternal::getBufferSize() const {
824 return mBufferSizeInFrames;
825 }
826
getBufferCapacity() const827 int32_t AudioStreamInternal::getBufferCapacity() const {
828 return mBufferCapacityInFrames;
829 }
830
isClockModelInControl() const831 bool AudioStreamInternal::isClockModelInControl() const {
832 return isActive() && mAudioEndpoint->isFreeRunning() && mClockModel.isRunning();
833 }
834