• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 package org.webrtc.audio;
12 
13 import android.annotation.TargetApi;
14 import android.content.Context;
15 import android.media.AudioAttributes;
16 import android.media.AudioFormat;
17 import android.media.AudioManager;
18 import android.media.AudioTrack;
19 import android.os.Build;
20 import android.os.Process;
21 import androidx.annotation.Nullable;
22 import java.nio.ByteBuffer;
23 import org.webrtc.CalledByNative;
24 import org.webrtc.Logging;
25 import org.webrtc.ThreadUtils;
26 import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback;
27 import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStartErrorCode;
28 import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStateCallback;
29 import org.webrtc.audio.LowLatencyAudioBufferManager;
30 
31 class WebRtcAudioTrack {
32   private static final String TAG = "WebRtcAudioTrackExternal";
33 
34   // Default audio data format is PCM 16 bit per sample.
35   // Guaranteed to be supported by all devices.
36   private static final int BITS_PER_SAMPLE = 16;
37 
38   // Requested size of each recorded buffer provided to the client.
39   private static final int CALLBACK_BUFFER_SIZE_MS = 10;
40 
41   // Average number of callbacks per second.
42   private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
43 
44   // The AudioTrackThread is allowed to wait for successful call to join()
45   // but the wait times out afther this amount of time.
46   private static final long AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS = 2000;
47 
48   // By default, WebRTC creates audio tracks with a usage attribute
49   // corresponding to voice communications, such as telephony or VoIP.
50   private static final int DEFAULT_USAGE = AudioAttributes.USAGE_VOICE_COMMUNICATION;
51 
52   // Indicates the AudioTrack has started playing audio.
53   private static final int AUDIO_TRACK_START = 0;
54 
55   // Indicates the AudioTrack has stopped playing audio.
56   private static final int AUDIO_TRACK_STOP = 1;
57 
58   private long nativeAudioTrack;
59   private final Context context;
60   private final AudioManager audioManager;
61   private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
62 
63   private ByteBuffer byteBuffer;
64 
65   private @Nullable final AudioAttributes audioAttributes;
66   private @Nullable AudioTrack audioTrack;
67   private @Nullable AudioTrackThread audioThread;
68   private final VolumeLogger volumeLogger;
69 
70   // Samples to be played are replaced by zeros if `speakerMute` is set to true.
71   // Can be used to ensure that the speaker is fully muted.
72   private volatile boolean speakerMute;
73   private byte[] emptyBytes;
74   private boolean useLowLatency;
75   private int initialBufferSizeInFrames;
76 
77   private final @Nullable AudioTrackErrorCallback errorCallback;
78   private final @Nullable AudioTrackStateCallback stateCallback;
79 
80   /**
81    * Audio thread which keeps calling AudioTrack.write() to stream audio.
82    * Data is periodically acquired from the native WebRTC layer using the
83    * nativeGetPlayoutData callback function.
84    * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
85    */
86   private class AudioTrackThread extends Thread {
87     private volatile boolean keepAlive = true;
88     private LowLatencyAudioBufferManager bufferManager;
89 
AudioTrackThread(String name)90     public AudioTrackThread(String name) {
91       super(name);
92       bufferManager = new LowLatencyAudioBufferManager();
93     }
94 
95     @Override
run()96     public void run() {
97       Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
98       Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
99       assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
100 
101       // Audio playout has started and the client is informed about it.
102       doAudioTrackStateCallback(AUDIO_TRACK_START);
103 
104       // Fixed size in bytes of each 10ms block of audio data that we ask for
105       // using callbacks to the native WebRTC client.
106       final int sizeInBytes = byteBuffer.capacity();
107 
108       while (keepAlive) {
109         // Get 10ms of PCM data from the native WebRTC client. Audio data is
110         // written into the common ByteBuffer using the address that was
111         // cached at construction.
112         nativeGetPlayoutData(nativeAudioTrack, sizeInBytes);
113         // Write data until all data has been written to the audio sink.
114         // Upon return, the buffer position will have been advanced to reflect
115         // the amount of data that was successfully written to the AudioTrack.
116         assertTrue(sizeInBytes <= byteBuffer.remaining());
117         if (speakerMute) {
118           byteBuffer.clear();
119           byteBuffer.put(emptyBytes);
120           byteBuffer.position(0);
121         }
122         int bytesWritten = audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
123         if (bytesWritten != sizeInBytes) {
124           Logging.e(TAG, "AudioTrack.write played invalid number of bytes: " + bytesWritten);
125           // If a write() returns a negative value, an error has occurred.
126           // Stop playing and report an error in this case.
127           if (bytesWritten < 0) {
128             keepAlive = false;
129             reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
130           }
131         }
132         if (useLowLatency) {
133           bufferManager.maybeAdjustBufferSize(audioTrack);
134         }
135         // The byte buffer must be rewinded since byteBuffer.position() is
136         // increased at each call to AudioTrack.write(). If we don't do this,
137         // next call to AudioTrack.write() will fail.
138         byteBuffer.rewind();
139 
140         // TODO(henrika): it is possible to create a delay estimate here by
141         // counting number of written frames and subtracting the result from
142         // audioTrack.getPlaybackHeadPosition().
143       }
144     }
145 
146     // Stops the inner thread loop which results in calling AudioTrack.stop().
147     // Does not block the calling thread.
stopThread()148     public void stopThread() {
149       Logging.d(TAG, "stopThread");
150       keepAlive = false;
151     }
152   }
153 
154   @CalledByNative
WebRtcAudioTrack(Context context, AudioManager audioManager)155   WebRtcAudioTrack(Context context, AudioManager audioManager) {
156     this(context, audioManager, null /* audioAttributes */, null /* errorCallback */,
157         null /* stateCallback */, false /* useLowLatency */, true /* enableVolumeLogger */);
158   }
159 
WebRtcAudioTrack(Context context, AudioManager audioManager, @Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback, @Nullable AudioTrackStateCallback stateCallback, boolean useLowLatency, boolean enableVolumeLogger)160   WebRtcAudioTrack(Context context, AudioManager audioManager,
161       @Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback,
162       @Nullable AudioTrackStateCallback stateCallback, boolean useLowLatency,
163       boolean enableVolumeLogger) {
164     threadChecker.detachThread();
165     this.context = context;
166     this.audioManager = audioManager;
167     this.audioAttributes = audioAttributes;
168     this.errorCallback = errorCallback;
169     this.stateCallback = stateCallback;
170     this.volumeLogger = enableVolumeLogger ? new VolumeLogger(audioManager) : null;
171     this.useLowLatency = useLowLatency;
172     Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
173   }
174 
175   @CalledByNative
setNativeAudioTrack(long nativeAudioTrack)176   public void setNativeAudioTrack(long nativeAudioTrack) {
177     this.nativeAudioTrack = nativeAudioTrack;
178   }
179 
180   @CalledByNative
initPlayout(int sampleRate, int channels, double bufferSizeFactor)181   private int initPlayout(int sampleRate, int channels, double bufferSizeFactor) {
182     threadChecker.checkIsOnValidThread();
183     Logging.d(TAG,
184         "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels
185             + ", bufferSizeFactor=" + bufferSizeFactor + ")");
186     final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
187     byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
188     Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
189     emptyBytes = new byte[byteBuffer.capacity()];
190     // Rather than passing the ByteBuffer with every callback (requiring
191     // the potentially expensive GetDirectBufferAddress) we simply have the
192     // the native class cache the address to the memory once.
193     nativeCacheDirectBufferAddress(nativeAudioTrack, byteBuffer);
194 
195     // Get the minimum buffer size required for the successful creation of an
196     // AudioTrack object to be created in the MODE_STREAM mode.
197     // Note that this size doesn't guarantee a smooth playback under load.
198     final int channelConfig = channelCountToConfiguration(channels);
199     final int minBufferSizeInBytes = (int) (AudioTrack.getMinBufferSize(sampleRate, channelConfig,
200                                                 AudioFormat.ENCODING_PCM_16BIT)
201         * bufferSizeFactor);
202     Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
203     // For the streaming mode, data must be written to the audio sink in
204     // chunks of size (given by byteBuffer.capacity()) less than or equal
205     // to the total buffer size `minBufferSizeInBytes`. But, we have seen
206     // reports of "getMinBufferSize(): error querying hardware". Hence, it
207     // can happen that `minBufferSizeInBytes` contains an invalid value.
208     if (minBufferSizeInBytes < byteBuffer.capacity()) {
209       reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
210       return -1;
211     }
212 
213     // Don't use low-latency mode when a bufferSizeFactor > 1 is used. When bufferSizeFactor > 1
214     // we want to use a larger buffer to prevent underruns. However, low-latency mode would
215     // decrease the buffer size, which makes the bufferSizeFactor have no effect.
216     if (bufferSizeFactor > 1.0) {
217       useLowLatency = false;
218     }
219 
220     // Ensure that prevision audio session was stopped correctly before trying
221     // to create a new AudioTrack.
222     if (audioTrack != null) {
223       reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
224       return -1;
225     }
226     try {
227       // Create an AudioTrack object and initialize its associated audio buffer.
228       // The size of this buffer determines how long an AudioTrack can play
229       // before running out of data.
230       if (useLowLatency && Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
231         // On API level 26 or higher, we can use a low latency mode.
232         audioTrack = createAudioTrackOnOreoOrHigher(
233             sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes);
234       } else {
235         // As we are on API level 21 or higher, it is possible to use a special AudioTrack
236         // constructor that uses AudioAttributes and AudioFormat as input. It allows us to
237         // supersede the notion of stream types for defining the behavior of audio playback,
238         // and to allow certain platforms or routing policies to use this information for more
239         // refined volume or routing decisions.
240         audioTrack = createAudioTrackBeforeOreo(
241             sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes);
242       }
243     } catch (IllegalArgumentException e) {
244       reportWebRtcAudioTrackInitError(e.getMessage());
245       releaseAudioResources();
246       return -1;
247     }
248 
249     // It can happen that an AudioTrack is created but it was not successfully
250     // initialized upon creation. Seems to be the case e.g. when the maximum
251     // number of globally available audio tracks is exceeded.
252     if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
253       reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
254       releaseAudioResources();
255       return -1;
256     }
257     if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
258       initialBufferSizeInFrames = audioTrack.getBufferSizeInFrames();
259     } else {
260       initialBufferSizeInFrames = -1;
261     }
262     logMainParameters();
263     logMainParametersExtended();
264     return minBufferSizeInBytes;
265   }
266 
267   @CalledByNative
startPlayout()268   private boolean startPlayout() {
269     threadChecker.checkIsOnValidThread();
270     if (volumeLogger != null) {
271       volumeLogger.start();
272     }
273     Logging.d(TAG, "startPlayout");
274     assertTrue(audioTrack != null);
275     assertTrue(audioThread == null);
276 
277     // Starts playing an audio track.
278     try {
279       audioTrack.play();
280     } catch (IllegalStateException e) {
281       reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_EXCEPTION,
282           "AudioTrack.play failed: " + e.getMessage());
283       releaseAudioResources();
284       return false;
285     }
286     if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
287       reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_STATE_MISMATCH,
288           "AudioTrack.play failed - incorrect state :" + audioTrack.getPlayState());
289       releaseAudioResources();
290       return false;
291     }
292 
293     // Create and start new high-priority thread which calls AudioTrack.write()
294     // and where we also call the native nativeGetPlayoutData() callback to
295     // request decoded audio from WebRTC.
296     audioThread = new AudioTrackThread("AudioTrackJavaThread");
297     audioThread.start();
298     return true;
299   }
300 
301   @CalledByNative
stopPlayout()302   private boolean stopPlayout() {
303     threadChecker.checkIsOnValidThread();
304     if (volumeLogger != null) {
305       volumeLogger.stop();
306     }
307     Logging.d(TAG, "stopPlayout");
308     assertTrue(audioThread != null);
309     logUnderrunCount();
310     audioThread.stopThread();
311 
312     Logging.d(TAG, "Stopping the AudioTrackThread...");
313     audioThread.interrupt();
314     if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
315       Logging.e(TAG, "Join of AudioTrackThread timed out.");
316       WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
317     }
318     Logging.d(TAG, "AudioTrackThread has now been stopped.");
319     audioThread = null;
320     if (audioTrack != null) {
321       Logging.d(TAG, "Calling AudioTrack.stop...");
322       try {
323         audioTrack.stop();
324         Logging.d(TAG, "AudioTrack.stop is done.");
325         doAudioTrackStateCallback(AUDIO_TRACK_STOP);
326       } catch (IllegalStateException e) {
327         Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
328       }
329     }
330     releaseAudioResources();
331     return true;
332   }
333 
334   // Get max possible volume index for a phone call audio stream.
335   @CalledByNative
getStreamMaxVolume()336   private int getStreamMaxVolume() {
337     threadChecker.checkIsOnValidThread();
338     Logging.d(TAG, "getStreamMaxVolume");
339     return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
340   }
341 
342   // Set current volume level for a phone call audio stream.
343   @CalledByNative
setStreamVolume(int volume)344   private boolean setStreamVolume(int volume) {
345     threadChecker.checkIsOnValidThread();
346     Logging.d(TAG, "setStreamVolume(" + volume + ")");
347     if (audioManager.isVolumeFixed()) {
348       Logging.e(TAG, "The device implements a fixed volume policy.");
349       return false;
350     }
351     audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
352     return true;
353   }
354 
355   /** Get current volume level for a phone call audio stream. */
356   @CalledByNative
getStreamVolume()357   private int getStreamVolume() {
358     threadChecker.checkIsOnValidThread();
359     Logging.d(TAG, "getStreamVolume");
360     return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
361   }
362 
363   @CalledByNative
GetPlayoutUnderrunCount()364   private int GetPlayoutUnderrunCount() {
365     if (Build.VERSION.SDK_INT >= 24) {
366       if (audioTrack != null) {
367         return audioTrack.getUnderrunCount();
368       } else {
369         return -1;
370       }
371     } else {
372       return -2;
373     }
374   }
375 
logMainParameters()376   private void logMainParameters() {
377     Logging.d(TAG,
378         "AudioTrack: "
379             + "session ID: " + audioTrack.getAudioSessionId() + ", "
380             + "channels: " + audioTrack.getChannelCount() + ", "
381             + "sample rate: " + audioTrack.getSampleRate()
382             + ", "
383             // Gain (>=1.0) expressed as linear multiplier on sample values.
384             + "max gain: " + AudioTrack.getMaxVolume());
385   }
386 
logNativeOutputSampleRate(int requestedSampleRateInHz)387   private static void logNativeOutputSampleRate(int requestedSampleRateInHz) {
388     final int nativeOutputSampleRate =
389         AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
390     Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
391     if (requestedSampleRateInHz != nativeOutputSampleRate) {
392       Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
393     }
394   }
395 
getAudioAttributes(@ullable AudioAttributes overrideAttributes)396   private static AudioAttributes getAudioAttributes(@Nullable AudioAttributes overrideAttributes) {
397     AudioAttributes.Builder attributesBuilder =
398         new AudioAttributes.Builder()
399             .setUsage(DEFAULT_USAGE)
400             .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH);
401 
402     if (overrideAttributes != null) {
403       if (overrideAttributes.getUsage() != AudioAttributes.USAGE_UNKNOWN) {
404         attributesBuilder.setUsage(overrideAttributes.getUsage());
405       }
406       if (overrideAttributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN) {
407         attributesBuilder.setContentType(overrideAttributes.getContentType());
408       }
409 
410       attributesBuilder.setFlags(overrideAttributes.getFlags());
411 
412       if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
413         attributesBuilder = applyAttributesOnQOrHigher(attributesBuilder, overrideAttributes);
414       }
415     }
416     return attributesBuilder.build();
417   }
418 
419   // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
420   // It allows certain platforms or routing policies to use this information for more
421   // refined volume or routing decisions.
createAudioTrackBeforeOreo(int sampleRateInHz, int channelConfig, int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes)422   private static AudioTrack createAudioTrackBeforeOreo(int sampleRateInHz, int channelConfig,
423       int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
424     Logging.d(TAG, "createAudioTrackBeforeOreo");
425     logNativeOutputSampleRate(sampleRateInHz);
426 
427     // Create an audio track where the audio usage is for VoIP and the content type is speech.
428     return new AudioTrack(getAudioAttributes(overrideAttributes),
429         new AudioFormat.Builder()
430             .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
431             .setSampleRate(sampleRateInHz)
432             .setChannelMask(channelConfig)
433             .build(),
434         bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
435   }
436 
437   // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
438   // Use the low-latency mode to improve audio latency. Note that the low-latency mode may
439   // prevent effects (such as AEC) from working. Assuming AEC is working, the delay changes
440   // that happen in low-latency mode during the call will cause the AEC to perform worse.
441   // The behavior of the low-latency mode may be device dependent, use at your own risk.
442   @TargetApi(Build.VERSION_CODES.O)
createAudioTrackOnOreoOrHigher(int sampleRateInHz, int channelConfig, int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes)443   private static AudioTrack createAudioTrackOnOreoOrHigher(int sampleRateInHz, int channelConfig,
444       int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
445     Logging.d(TAG, "createAudioTrackOnOreoOrHigher");
446     logNativeOutputSampleRate(sampleRateInHz);
447 
448     // Create an audio track where the audio usage is for VoIP and the content type is speech.
449     return new AudioTrack.Builder()
450         .setAudioAttributes(getAudioAttributes(overrideAttributes))
451         .setAudioFormat(new AudioFormat.Builder()
452                             .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
453                             .setSampleRate(sampleRateInHz)
454                             .setChannelMask(channelConfig)
455                             .build())
456         .setBufferSizeInBytes(bufferSizeInBytes)
457         .setPerformanceMode(AudioTrack.PERFORMANCE_MODE_LOW_LATENCY)
458         .setTransferMode(AudioTrack.MODE_STREAM)
459         .setSessionId(AudioManager.AUDIO_SESSION_ID_GENERATE)
460         .build();
461   }
462 
463   @TargetApi(Build.VERSION_CODES.Q)
applyAttributesOnQOrHigher( AudioAttributes.Builder builder, AudioAttributes overrideAttributes)464   private static AudioAttributes.Builder applyAttributesOnQOrHigher(
465       AudioAttributes.Builder builder, AudioAttributes overrideAttributes) {
466     return builder.setAllowedCapturePolicy(overrideAttributes.getAllowedCapturePolicy());
467   }
468 
logBufferSizeInFrames()469   private void logBufferSizeInFrames() {
470     if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
471       Logging.d(TAG,
472           "AudioTrack: "
473               // The effective size of the AudioTrack buffer that the app writes to.
474               + "buffer size in frames: " + audioTrack.getBufferSizeInFrames());
475     }
476   }
477 
478   @CalledByNative
getBufferSizeInFrames()479   private int getBufferSizeInFrames() {
480     if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
481       return audioTrack.getBufferSizeInFrames();
482     }
483     return -1;
484   }
485 
486   @CalledByNative
getInitialBufferSizeInFrames()487   private int getInitialBufferSizeInFrames() {
488     return initialBufferSizeInFrames;
489   }
490 
logBufferCapacityInFrames()491   private void logBufferCapacityInFrames() {
492     if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
493       Logging.d(TAG,
494           "AudioTrack: "
495               // Maximum size of the AudioTrack buffer in frames.
496               + "buffer capacity in frames: " + audioTrack.getBufferCapacityInFrames());
497     }
498   }
499 
logMainParametersExtended()500   private void logMainParametersExtended() {
501     logBufferSizeInFrames();
502     logBufferCapacityInFrames();
503   }
504 
505   // Prints the number of underrun occurrences in the application-level write
506   // buffer since the AudioTrack was created. An underrun occurs if the app does
507   // not write audio data quickly enough, causing the buffer to underflow and a
508   // potential audio glitch.
509   // TODO(henrika): keep track of this value in the field and possibly add new
510   // UMA stat if needed.
logUnderrunCount()511   private void logUnderrunCount() {
512     if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
513       Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
514     }
515   }
516 
517   // Helper method which throws an exception  when an assertion has failed.
assertTrue(boolean condition)518   private static void assertTrue(boolean condition) {
519     if (!condition) {
520       throw new AssertionError("Expected condition to be true");
521     }
522   }
523 
channelCountToConfiguration(int channels)524   private int channelCountToConfiguration(int channels) {
525     return (channels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
526   }
527 
nativeCacheDirectBufferAddress( long nativeAudioTrackJni, ByteBuffer byteBuffer)528   private static native void nativeCacheDirectBufferAddress(
529       long nativeAudioTrackJni, ByteBuffer byteBuffer);
nativeGetPlayoutData(long nativeAudioTrackJni, int bytes)530   private static native void nativeGetPlayoutData(long nativeAudioTrackJni, int bytes);
531 
532   // Sets all samples to be played out to zero if `mute` is true, i.e.,
533   // ensures that the speaker is muted.
setSpeakerMute(boolean mute)534   public void setSpeakerMute(boolean mute) {
535     Logging.w(TAG, "setSpeakerMute(" + mute + ")");
536     speakerMute = mute;
537   }
538 
539   // Releases the native AudioTrack resources.
releaseAudioResources()540   private void releaseAudioResources() {
541     Logging.d(TAG, "releaseAudioResources");
542     if (audioTrack != null) {
543       audioTrack.release();
544       audioTrack = null;
545     }
546   }
547 
reportWebRtcAudioTrackInitError(String errorMessage)548   private void reportWebRtcAudioTrackInitError(String errorMessage) {
549     Logging.e(TAG, "Init playout error: " + errorMessage);
550     WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
551     if (errorCallback != null) {
552       errorCallback.onWebRtcAudioTrackInitError(errorMessage);
553     }
554   }
555 
reportWebRtcAudioTrackStartError( AudioTrackStartErrorCode errorCode, String errorMessage)556   private void reportWebRtcAudioTrackStartError(
557       AudioTrackStartErrorCode errorCode, String errorMessage) {
558     Logging.e(TAG, "Start playout error: " + errorCode + ". " + errorMessage);
559     WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
560     if (errorCallback != null) {
561       errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
562     }
563   }
564 
reportWebRtcAudioTrackError(String errorMessage)565   private void reportWebRtcAudioTrackError(String errorMessage) {
566     Logging.e(TAG, "Run-time playback error: " + errorMessage);
567     WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
568     if (errorCallback != null) {
569       errorCallback.onWebRtcAudioTrackError(errorMessage);
570     }
571   }
572 
doAudioTrackStateCallback(int audioState)573   private void doAudioTrackStateCallback(int audioState) {
574     Logging.d(TAG, "doAudioTrackStateCallback: " + audioState);
575     if (stateCallback != null) {
576       if (audioState == WebRtcAudioTrack.AUDIO_TRACK_START) {
577         stateCallback.onWebRtcAudioTrackStart();
578       } else if (audioState == WebRtcAudioTrack.AUDIO_TRACK_STOP) {
579         stateCallback.onWebRtcAudioTrackStop();
580       } else {
581         Logging.e(TAG, "Invalid audio state");
582       }
583     }
584   }
585 }
586