• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import java.lang.annotation.Retention;
20 import java.lang.annotation.RetentionPolicy;
21 import java.lang.ref.WeakReference;
22 import java.lang.Math;
23 import java.nio.ByteBuffer;
24 import java.nio.ByteOrder;
25 import java.nio.NioUtils;
26 import java.util.Collection;
27 
28 import android.annotation.IntDef;
29 import android.annotation.NonNull;
30 import android.annotation.Nullable;
31 import android.app.ActivityThread;
32 import android.content.Context;
33 import android.os.Handler;
34 import android.os.IBinder;
35 import android.os.Looper;
36 import android.os.Message;
37 import android.os.Process;
38 import android.os.RemoteException;
39 import android.os.ServiceManager;
40 import android.util.ArrayMap;
41 import android.util.Log;
42 
43 import com.android.internal.annotations.GuardedBy;
44 
45 /**
46  * The AudioTrack class manages and plays a single audio resource for Java applications.
47  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
48  * achieved by "pushing" the data to the AudioTrack object using one of the
49  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
50  *  and {@link #write(float[], int, int, int)} methods.
51  *
52  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
53  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
54  * one of the {@code write()} methods. These are blocking and return when the data has been
55  * transferred from the Java layer to the native layer and queued for playback. The streaming
56  * mode is most useful when playing blocks of audio data that for instance are:
57  *
58  * <ul>
59  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
60  *   <li>too big to fit in memory because of the characteristics of the audio data
61  *         (high sampling rate, bits per sample ...)</li>
62  *   <li>received or generated while previously queued audio is playing.</li>
63  * </ul>
64  *
65  * The static mode should be chosen when dealing with short sounds that fit in memory and
66  * that need to be played with the smallest latency possible. The static mode will
67  * therefore be preferred for UI and game sounds that are played often, and with the
68  * smallest overhead possible.
69  *
70  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
71  * The size of this buffer, specified during the construction, determines how long an AudioTrack
72  * can play before running out of data.<br>
73  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
74  * be played from it.<br>
75  * For the streaming mode, data will be written to the audio sink in chunks of
76  * sizes less than or equal to the total buffer size.
77  *
78  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
79  */
80 public class AudioTrack extends PlayerBase
81                         implements AudioRouting
82                                  , VolumeAutomation
83 {
84     //---------------------------------------------------------
85     // Constants
86     //--------------------
87     /** Minimum value for a linear gain or auxiliary effect level.
88      *  This value must be exactly equal to 0.0f; do not change it.
89      */
90     private static final float GAIN_MIN = 0.0f;
91     /** Maximum value for a linear gain or auxiliary effect level.
92      *  This value must be greater than or equal to 1.0f.
93      */
94     private static final float GAIN_MAX = 1.0f;
95 
96     /** Maximum value for AudioTrack channel count
97      * @hide public for MediaCode only, do not un-hide or change to a numeric literal
98      */
99     public static final int CHANNEL_COUNT_MAX = native_get_FCC_8();
100 
101     /** indicates AudioTrack state is stopped */
102     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
103     /** indicates AudioTrack state is paused */
104     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
105     /** indicates AudioTrack state is playing */
106     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
107 
108     // keep these values in sync with android_media_AudioTrack.cpp
109     /**
110      * Creation mode where audio data is transferred from Java to the native layer
111      * only once before the audio starts playing.
112      */
113     public static final int MODE_STATIC = 0;
114     /**
115      * Creation mode where audio data is streamed from Java to the native layer
116      * as the audio is playing.
117      */
118     public static final int MODE_STREAM = 1;
119 
120     /** @hide */
121     @IntDef({
122         MODE_STATIC,
123         MODE_STREAM
124     })
125     @Retention(RetentionPolicy.SOURCE)
126     public @interface TransferMode {}
127 
128     /**
129      * State of an AudioTrack that was not successfully initialized upon creation.
130      */
131     public static final int STATE_UNINITIALIZED = 0;
132     /**
133      * State of an AudioTrack that is ready to be used.
134      */
135     public static final int STATE_INITIALIZED   = 1;
136     /**
137      * State of a successfully initialized AudioTrack that uses static data,
138      * but that hasn't received that data yet.
139      */
140     public static final int STATE_NO_STATIC_DATA = 2;
141 
142     /**
143      * Denotes a successful operation.
144      */
145     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
146     /**
147      * Denotes a generic operation failure.
148      */
149     public  static final int ERROR                                 = AudioSystem.ERROR;
150     /**
151      * Denotes a failure due to the use of an invalid value.
152      */
153     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
154     /**
155      * Denotes a failure due to the improper use of a method.
156      */
157     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
158     /**
159      * An error code indicating that the object reporting it is no longer valid and needs to
160      * be recreated.
161      */
162     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
163     /**
164      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
165      * or immediately after start/ACTIVE.
166      * @hide
167      */
168     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
169 
170     // Error codes:
171     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
172     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
173     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
174     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
175     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
176     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
177 
178     // Events:
179     // to keep in sync with frameworks/av/include/media/AudioTrack.h
180     /**
181      * Event id denotes when playback head has reached a previously set marker.
182      */
183     private static final int NATIVE_EVENT_MARKER  = 3;
184     /**
185      * Event id denotes when previously set update period has elapsed during playback.
186      */
187     private static final int NATIVE_EVENT_NEW_POS = 4;
188 
189     private final static String TAG = "android.media.AudioTrack";
190 
191 
192     /** @hide */
193     @IntDef({
194         WRITE_BLOCKING,
195         WRITE_NON_BLOCKING
196     })
197     @Retention(RetentionPolicy.SOURCE)
198     public @interface WriteMode {}
199 
200     /**
201      * The write mode indicating the write operation will block until all data has been written,
202      * to be used as the actual value of the writeMode parameter in
203      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
204      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
205      * {@link #write(ByteBuffer, int, int, long)}.
206      */
207     public final static int WRITE_BLOCKING = 0;
208 
209     /**
210      * The write mode indicating the write operation will return immediately after
211      * queuing as much audio data for playback as possible without blocking,
212      * to be used as the actual value of the writeMode parameter in
213      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
214      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
215      * {@link #write(ByteBuffer, int, int, long)}.
216      */
217     public final static int WRITE_NON_BLOCKING = 1;
218 
219     /** @hide */
220     @IntDef({
221         PERFORMANCE_MODE_NONE,
222         PERFORMANCE_MODE_LOW_LATENCY,
223         PERFORMANCE_MODE_POWER_SAVING
224     })
225     @Retention(RetentionPolicy.SOURCE)
226     public @interface PerformanceMode {}
227 
228     /**
229      * Default performance mode for an {@link AudioTrack}.
230      */
231     public static final int PERFORMANCE_MODE_NONE = 0;
232 
233     /**
234      * Low latency performance mode for an {@link AudioTrack}.
235      * If the device supports it, this mode
236      * enables a lower latency path through to the audio output sink.
237      * Effects may no longer work with such an {@code AudioTrack} and
238      * the sample rate must match that of the output sink.
239      * <p>
240      * Applications should be aware that low latency requires careful
241      * buffer management, with smaller chunks of audio data written by each
242      * {@code write()} call.
243      * <p>
244      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
245      * {@code AudioTrack}'s actual buffer size may be too small.
246      * It is recommended that a fairly
247      * large buffer should be specified when the {@code AudioTrack} is created.
248      * Then the actual size can be reduced by calling
249      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
250      * by lowering it after each {@code write()} call until the audio glitches,
251      * which is detected by calling
252      * {@link #getUnderrunCount()}. Then the buffer size can be increased
253      * until there are no glitches.
254      * This tuning step should be done while playing silence.
255      * This technique provides a compromise between latency and glitch rate.
256      */
257     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
258 
259     /**
260      * Power saving performance mode for an {@link AudioTrack}.
261      * If the device supports it, this
262      * mode will enable a lower power path to the audio output sink.
263      * In addition, this lower power path typically will have
264      * deeper internal buffers and better underrun resistance,
265      * with a tradeoff of higher latency.
266      * <p>
267      * In this mode, applications should attempt to use a larger buffer size
268      * and deliver larger chunks of audio data per {@code write()} call.
269      * Use {@link #getBufferSizeInFrames()} to determine
270      * the actual buffer size of the {@code AudioTrack} as it may have increased
271      * to accommodate a deeper buffer.
272      */
273     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
274 
275     // keep in sync with system/media/audio/include/system/audio-base.h
276     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
277     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
278 
279     // Size of HW_AV_SYNC track AV header.
280     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
281 
282     //--------------------------------------------------------------------------
283     // Member variables
284     //--------------------
285     /**
286      * Indicates the state of the AudioTrack instance.
287      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
288      */
289     private int mState = STATE_UNINITIALIZED;
290     /**
291      * Indicates the play state of the AudioTrack instance.
292      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
293      */
294     private int mPlayState = PLAYSTATE_STOPPED;
295     /**
296      * Lock to ensure mPlayState updates reflect the actual state of the object.
297      */
298     private final Object mPlayStateLock = new Object();
299     /**
300      * Sizes of the audio buffer.
301      * These values are set during construction and can be stale.
302      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
303      */
304     private int mNativeBufferSizeInBytes = 0;
305     private int mNativeBufferSizeInFrames = 0;
306     /**
307      * Handler for events coming from the native code.
308      */
309     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
310     /**
311      * Looper associated with the thread that creates the AudioTrack instance.
312      */
313     private final Looper mInitializationLooper;
314     /**
315      * The audio data source sampling rate in Hz.
316      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
317      */
318     private int mSampleRate; // initialized by all constructors via audioParamCheck()
319     /**
320      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
321      */
322     private int mChannelCount = 1;
323     /**
324      * The audio channel mask used for calling native AudioTrack
325      */
326     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
327 
328     /**
329      * The type of the audio stream to play. See
330      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
331      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
332      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
333      *   {@link AudioManager#STREAM_DTMF}.
334      */
335     private int mStreamType = AudioManager.STREAM_MUSIC;
336 
337     /**
338      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
339      */
340     private int mDataLoadMode = MODE_STREAM;
341     /**
342      * The current channel position mask, as specified on AudioTrack creation.
343      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
344      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
345      */
346     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
347     /**
348      * The channel index mask if specified, otherwise 0.
349      */
350     private int mChannelIndexMask = 0;
351     /**
352      * The encoding of the audio samples.
353      * @see AudioFormat#ENCODING_PCM_8BIT
354      * @see AudioFormat#ENCODING_PCM_16BIT
355      * @see AudioFormat#ENCODING_PCM_FLOAT
356      */
357     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
358     /**
359      * Audio session ID
360      */
361     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
362     /**
363      * HW_AV_SYNC track AV Sync Header
364      */
365     private ByteBuffer mAvSyncHeader = null;
366     /**
367      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
368      */
369     private int mAvSyncBytesRemaining = 0;
370     /**
371      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
372      */
373     private int mOffset = 0;
374 
375     //--------------------------------
376     // Used exclusively by native code
377     //--------------------
378     /**
379      * @hide
380      * Accessed by native methods: provides access to C++ AudioTrack object.
381      */
382     @SuppressWarnings("unused")
383     protected long mNativeTrackInJavaObj;
384     /**
385      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
386      * the native AudioTrack object, but not stored in it).
387      */
388     @SuppressWarnings("unused")
389     private long mJniData;
390 
391 
392     //--------------------------------------------------------------------------
393     // Constructor, Finalize
394     //--------------------
395     /**
396      * Class constructor.
397      * @param streamType the type of the audio stream. See
398      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
399      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
400      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
401      * @param sampleRateInHz the initial source sample rate expressed in Hz.
402      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
403      *   which is usually the sample rate of the sink.
404      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
405      * @param channelConfig describes the configuration of the audio channels.
406      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
407      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
408      * @param audioFormat the format in which the audio data is represented.
409      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
410      *   {@link AudioFormat#ENCODING_PCM_8BIT},
411      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
412      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
413      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
414      *   <p> If the track's creation mode is {@link #MODE_STATIC},
415      *   this is the maximum length sample, or audio clip, that can be played by this instance.
416      *   <p> If the track's creation mode is {@link #MODE_STREAM},
417      *   this should be the desired buffer size
418      *   for the <code>AudioTrack</code> to satisfy the application's
419      *   latency requirements.
420      *   If <code>bufferSizeInBytes</code> is less than the
421      *   minimum buffer size for the output sink, it is increased to the minimum
422      *   buffer size.
423      *   The method {@link #getBufferSizeInFrames()} returns the
424      *   actual size in frames of the buffer created, which
425      *   determines the minimum frequency to write
426      *   to the streaming <code>AudioTrack</code> to avoid underrun.
427      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
428      *   for an AudioTrack instance in streaming mode.
429      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
430      * @throws java.lang.IllegalArgumentException
431      * @deprecated use {@link Builder} or
432      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
433      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
434      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)435     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
436             int bufferSizeInBytes, int mode)
437     throws IllegalArgumentException {
438         this(streamType, sampleRateInHz, channelConfig, audioFormat,
439                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
440     }
441 
442     /**
443      * Class constructor with audio session. Use this constructor when the AudioTrack must be
444      * attached to a particular audio session. The primary use of the audio session ID is to
445      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
446      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
447      * and media players in the same session and not to the output mix.
448      * When an AudioTrack is created without specifying a session, it will create its own session
449      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
450      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
451      * session
452      * with all other media players or audio tracks in the same session, otherwise a new session
453      * will be created for this track if none is supplied.
454      * @param streamType the type of the audio stream. See
455      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
456      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
457      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
458      * @param sampleRateInHz the initial source sample rate expressed in Hz.
459      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
460      *   which is usually the sample rate of the sink.
461      * @param channelConfig describes the configuration of the audio channels.
462      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
463      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
464      * @param audioFormat the format in which the audio data is represented.
465      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
466      *   {@link AudioFormat#ENCODING_PCM_8BIT},
467      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
468      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
469      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
470      *   <p> If the track's creation mode is {@link #MODE_STATIC},
471      *   this is the maximum length sample, or audio clip, that can be played by this instance.
472      *   <p> If the track's creation mode is {@link #MODE_STREAM},
473      *   this should be the desired buffer size
474      *   for the <code>AudioTrack</code> to satisfy the application's
475      *   latency requirements.
476      *   If <code>bufferSizeInBytes</code> is less than the
477      *   minimum buffer size for the output sink, it is increased to the minimum
478      *   buffer size.
479      *   The method {@link #getBufferSizeInFrames()} returns the
480      *   actual size in frames of the buffer created, which
481      *   determines the minimum frequency to write
482      *   to the streaming <code>AudioTrack</code> to avoid underrun.
483      *   You can write data into this buffer in smaller chunks than this size.
484      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
485      *   for an AudioTrack instance in streaming mode.
486      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
487      * @param sessionId Id of audio session the AudioTrack must be attached to
488      * @throws java.lang.IllegalArgumentException
489      * @deprecated use {@link Builder} or
490      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
491      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
492      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)493     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
494             int bufferSizeInBytes, int mode, int sessionId)
495     throws IllegalArgumentException {
496         // mState already == STATE_UNINITIALIZED
497         this((new AudioAttributes.Builder())
498                     .setLegacyStreamType(streamType)
499                     .build(),
500                 (new AudioFormat.Builder())
501                     .setChannelMask(channelConfig)
502                     .setEncoding(audioFormat)
503                     .setSampleRate(sampleRateInHz)
504                     .build(),
505                 bufferSizeInBytes,
506                 mode, sessionId);
507         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
508     }
509 
510     /**
511      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
512      * @param attributes a non-null {@link AudioAttributes} instance.
513      * @param format a non-null {@link AudioFormat} instance describing the format of the data
514      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
515      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
516      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
517      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
518      *   <p> If the track's creation mode is {@link #MODE_STATIC},
519      *   this is the maximum length sample, or audio clip, that can be played by this instance.
520      *   <p> If the track's creation mode is {@link #MODE_STREAM},
521      *   this should be the desired buffer size
522      *   for the <code>AudioTrack</code> to satisfy the application's
523      *   latency requirements.
524      *   If <code>bufferSizeInBytes</code> is less than the
525      *   minimum buffer size for the output sink, it is increased to the minimum
526      *   buffer size.
527      *   The method {@link #getBufferSizeInFrames()} returns the
528      *   actual size in frames of the buffer created, which
529      *   determines the minimum frequency to write
530      *   to the streaming <code>AudioTrack</code> to avoid underrun.
531      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
532      *   for an AudioTrack instance in streaming mode.
533      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
534      * @param sessionId ID of audio session the AudioTrack must be attached to, or
535      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
536      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
537      *   construction.
538      * @throws IllegalArgumentException
539      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)540     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
541             int mode, int sessionId)
542                     throws IllegalArgumentException {
543         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
544         // mState already == STATE_UNINITIALIZED
545 
546         if (format == null) {
547             throw new IllegalArgumentException("Illegal null AudioFormat");
548         }
549 
550         // Check if we should enable deep buffer mode
551         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
552             mAttributes = new AudioAttributes.Builder(mAttributes)
553                 .replaceFlags((mAttributes.getAllFlags()
554                         | AudioAttributes.FLAG_DEEP_BUFFER)
555                         & ~AudioAttributes.FLAG_LOW_LATENCY)
556                 .build();
557         }
558 
559         // remember which looper is associated with the AudioTrack instantiation
560         Looper looper;
561         if ((looper = Looper.myLooper()) == null) {
562             looper = Looper.getMainLooper();
563         }
564 
565         int rate = format.getSampleRate();
566         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
567             rate = 0;
568         }
569 
570         int channelIndexMask = 0;
571         if ((format.getPropertySetMask()
572                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
573             channelIndexMask = format.getChannelIndexMask();
574         }
575         int channelMask = 0;
576         if ((format.getPropertySetMask()
577                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
578             channelMask = format.getChannelMask();
579         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
580             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
581                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
582         }
583         int encoding = AudioFormat.ENCODING_DEFAULT;
584         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
585             encoding = format.getEncoding();
586         }
587         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
588         mStreamType = AudioSystem.STREAM_DEFAULT;
589 
590         audioBuffSizeCheck(bufferSizeInBytes);
591 
592         mInitializationLooper = looper;
593 
594         if (sessionId < 0) {
595             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
596         }
597 
598         int[] sampleRate = new int[] {mSampleRate};
599         int[] session = new int[1];
600         session[0] = sessionId;
601         // native initialization
602         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
603                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
604                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);
605         if (initResult != SUCCESS) {
606             loge("Error code "+initResult+" when initializing AudioTrack.");
607             return; // with mState == STATE_UNINITIALIZED
608         }
609 
610         mSampleRate = sampleRate[0];
611         mSessionId = session[0];
612 
613         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
614             int frameSizeInBytes;
615             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
616                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
617             } else {
618                 frameSizeInBytes = 1;
619             }
620             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
621         }
622 
623         if (mDataLoadMode == MODE_STATIC) {
624             mState = STATE_NO_STATIC_DATA;
625         } else {
626             mState = STATE_INITIALIZED;
627         }
628 
629         baseRegisterPlayer();
630     }
631 
632     /**
633      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
634      * the AudioTrackRoutingProxy subclass.
635      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
636      * (associated with an OpenSL ES player).
637      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
638      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
639      * it means that the OpenSL player interface hasn't been realized, so there is no native
640      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
641      * OpenSLES interface is realized.
642      */
AudioTrack(long nativeTrackInJavaObj)643     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
644         super(new AudioAttributes.Builder().build(),
645                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
646         // "final"s
647         mNativeTrackInJavaObj = 0;
648         mJniData = 0;
649 
650         // remember which looper is associated with the AudioTrack instantiation
651         Looper looper;
652         if ((looper = Looper.myLooper()) == null) {
653             looper = Looper.getMainLooper();
654         }
655         mInitializationLooper = looper;
656 
657         // other initialization...
658         if (nativeTrackInJavaObj != 0) {
659             baseRegisterPlayer();
660             deferred_connect(nativeTrackInJavaObj);
661         } else {
662             mState = STATE_UNINITIALIZED;
663         }
664     }
665 
666     /**
667      * @hide
668      */
deferred_connect(long nativeTrackInJavaObj)669     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
670         if (mState != STATE_INITIALIZED) {
671             // Note that for this native_setup, we are providing an already created/initialized
672             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
673             int[] session = { 0 };
674             int[] rates = { 0 };
675             int initResult = native_setup(new WeakReference<AudioTrack>(this),
676                     null /*mAttributes - NA*/,
677                     rates /*sampleRate - NA*/,
678                     0 /*mChannelMask - NA*/,
679                     0 /*mChannelIndexMask - NA*/,
680                     0 /*mAudioFormat - NA*/,
681                     0 /*mNativeBufferSizeInBytes - NA*/,
682                     0 /*mDataLoadMode - NA*/,
683                     session,
684                     nativeTrackInJavaObj);
685             if (initResult != SUCCESS) {
686                 loge("Error code "+initResult+" when initializing AudioTrack.");
687                 return; // with mState == STATE_UNINITIALIZED
688             }
689 
690             mSessionId = session[0];
691 
692             mState = STATE_INITIALIZED;
693         }
694     }
695 
696     /**
697      * Builder class for {@link AudioTrack} objects.
698      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
699      * attributes and audio format parameters, you indicate which of those vary from the default
700      * behavior on the device.
701      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
702      * parameters, to be used by a new <code>AudioTrack</code> instance:
703      *
704      * <pre class="prettyprint">
705      * AudioTrack player = new AudioTrack.Builder()
706      *         .setAudioAttributes(new AudioAttributes.Builder()
707      *                  .setUsage(AudioAttributes.USAGE_ALARM)
708      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
709      *                  .build())
710      *         .setAudioFormat(new AudioFormat.Builder()
711      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
712      *                 .setSampleRate(44100)
713      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
714      *                 .build())
715      *         .setBufferSizeInBytes(minBuffSize)
716      *         .build();
717      * </pre>
718      * <p>
719      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
720      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
721      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
722      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
723      * {@link AudioFormat#ENCODING_PCM_16BIT}.
724      * The sample rate will depend on the device actually selected for playback and can be queried
725      * with {@link #getSampleRate()} method.
726      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
727      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
728      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
729      * <code>MODE_STREAM</code> will be used.
730      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
731      * be generated.
732      */
733     public static class Builder {
734         private AudioAttributes mAttributes;
735         private AudioFormat mFormat;
736         private int mBufferSizeInBytes;
737         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
738         private int mMode = MODE_STREAM;
739         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
740 
741         /**
742          * Constructs a new Builder with the default values as described above.
743          */
Builder()744         public Builder() {
745         }
746 
747         /**
748          * Sets the {@link AudioAttributes}.
749          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
750          *     data to be played.
751          * @return the same Builder instance.
752          * @throws IllegalArgumentException
753          */
setAudioAttributes(@onNull AudioAttributes attributes)754         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
755                 throws IllegalArgumentException {
756             if (attributes == null) {
757                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
758             }
759             // keep reference, we only copy the data when building
760             mAttributes = attributes;
761             return this;
762         }
763 
764         /**
765          * Sets the format of the audio data to be played by the {@link AudioTrack}.
766          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
767          * as encoding, channel mask and sample rate.
768          * @param format a non-null {@link AudioFormat} instance.
769          * @return the same Builder instance.
770          * @throws IllegalArgumentException
771          */
setAudioFormat(@onNull AudioFormat format)772         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
773                 throws IllegalArgumentException {
774             if (format == null) {
775                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
776             }
777             // keep reference, we only copy the data when building
778             mFormat = format;
779             return this;
780         }
781 
782         /**
783          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
784          * If using the {@link AudioTrack} in streaming mode
785          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
786          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
787          * the estimated minimum buffer size for the creation of an AudioTrack instance
788          * in streaming mode.
789          * <br>If using the <code>AudioTrack</code> in static mode (see
790          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
791          * played by this instance.
792          * @param bufferSizeInBytes
793          * @return the same Builder instance.
794          * @throws IllegalArgumentException
795          */
setBufferSizeInBytes(int bufferSizeInBytes)796         public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes)
797                 throws IllegalArgumentException {
798             if (bufferSizeInBytes <= 0) {
799                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
800             }
801             mBufferSizeInBytes = bufferSizeInBytes;
802             return this;
803         }
804 
805         /**
806          * Sets the mode under which buffers of audio data are transferred from the
807          * {@link AudioTrack} to the framework.
808          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
809          * @return the same Builder instance.
810          * @throws IllegalArgumentException
811          */
setTransferMode(@ransferMode int mode)812         public @NonNull Builder setTransferMode(@TransferMode int mode)
813                 throws IllegalArgumentException {
814             switch(mode) {
815                 case MODE_STREAM:
816                 case MODE_STATIC:
817                     mMode = mode;
818                     break;
819                 default:
820                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
821             }
822             return this;
823         }
824 
825         /**
826          * Sets the session ID the {@link AudioTrack} will be attached to.
827          * @param sessionId a strictly positive ID number retrieved from another
828          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
829          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
830          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
831          * @return the same Builder instance.
832          * @throws IllegalArgumentException
833          */
setSessionId(int sessionId)834         public @NonNull Builder setSessionId(int sessionId)
835                 throws IllegalArgumentException {
836             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
837                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
838             }
839             mSessionId = sessionId;
840             return this;
841         }
842 
843         /**
844          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
845          * may not be supported by the particular device, and the framework is free
846          * to ignore such request if it is incompatible with other requests or hardware.
847          *
848          * @param performanceMode one of
849          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
850          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
851          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
852          * @return the same Builder instance.
853          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
854          */
setPerformanceMode(@erformanceMode int performanceMode)855         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
856             switch (performanceMode) {
857                 case PERFORMANCE_MODE_NONE:
858                 case PERFORMANCE_MODE_LOW_LATENCY:
859                 case PERFORMANCE_MODE_POWER_SAVING:
860                     mPerformanceMode = performanceMode;
861                     break;
862                 default:
863                     throw new IllegalArgumentException(
864                             "Invalid performance mode " + performanceMode);
865             }
866             return this;
867         }
868 
869         /**
870          * Builds an {@link AudioTrack} instance initialized with all the parameters set
871          * on this <code>Builder</code>.
872          * @return a new successfully initialized {@link AudioTrack} instance.
873          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
874          *     were incompatible, or if they are not supported by the device,
875          *     or if the device was not available.
876          */
build()877         public @NonNull AudioTrack build() throws UnsupportedOperationException {
878             if (mAttributes == null) {
879                 mAttributes = new AudioAttributes.Builder()
880                         .setUsage(AudioAttributes.USAGE_MEDIA)
881                         .build();
882             }
883             switch (mPerformanceMode) {
884             case PERFORMANCE_MODE_LOW_LATENCY:
885                 mAttributes = new AudioAttributes.Builder(mAttributes)
886                     .replaceFlags((mAttributes.getAllFlags()
887                             | AudioAttributes.FLAG_LOW_LATENCY)
888                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
889                     .build();
890                 break;
891             case PERFORMANCE_MODE_NONE:
892                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
893                     break; // do not enable deep buffer mode.
894                 }
895                 // permitted to fall through to enable deep buffer
896             case PERFORMANCE_MODE_POWER_SAVING:
897                 mAttributes = new AudioAttributes.Builder(mAttributes)
898                 .replaceFlags((mAttributes.getAllFlags()
899                         | AudioAttributes.FLAG_DEEP_BUFFER)
900                         & ~AudioAttributes.FLAG_LOW_LATENCY)
901                 .build();
902                 break;
903             }
904 
905             if (mFormat == null) {
906                 mFormat = new AudioFormat.Builder()
907                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
908                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
909                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
910                         .build();
911             }
912             try {
913                 // If the buffer size is not specified in streaming mode,
914                 // use a single frame for the buffer size and let the
915                 // native code figure out the minimum buffer size.
916                 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
917                     mBufferSizeInBytes = mFormat.getChannelCount()
918                             * mFormat.getBytesPerSample(mFormat.getEncoding());
919                 }
920                 final AudioTrack track = new AudioTrack(
921                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId);
922                 if (track.getState() == STATE_UNINITIALIZED) {
923                     // release is not necessary
924                     throw new UnsupportedOperationException("Cannot create AudioTrack");
925                 }
926                 return track;
927             } catch (IllegalArgumentException e) {
928                 throw new UnsupportedOperationException(e.getMessage());
929             }
930         }
931     }
932 
933     // mask of all the positional channels supported, however the allowed combinations
934     // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX
935     private static final int SUPPORTED_OUT_CHANNELS =
936             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
937             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
938             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
939             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
940             AudioFormat.CHANNEL_OUT_BACK_LEFT |
941             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
942             AudioFormat.CHANNEL_OUT_BACK_CENTER |
943             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
944             AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
945 
946     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
947     // power saving to be automatically enabled for an AudioTrack. Returns false if
948     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)949     private static boolean shouldEnablePowerSaving(
950             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
951             int bufferSizeInBytes, int mode) {
952         // If no attributes, OK
953         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
954         if (attributes != null &&
955                 (attributes.getAllFlags() != 0  // cannot have any special flags
956                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
957                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
958                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
959                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
960             return false;
961         }
962 
963         // Format must be fully specified and be linear pcm
964         if (format == null
965                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
966                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
967                 || !AudioFormat.isValidEncoding(format.getEncoding())
968                 || format.getChannelCount() < 1) {
969             return false;
970         }
971 
972         // Mode must be streaming
973         if (mode != MODE_STREAM) {
974             return false;
975         }
976 
977         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
978         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
979         if (bufferSizeInBytes != 0) {
980             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
981             final int MILLIS_PER_SECOND = 1000;
982             final long bufferTargetSize =
983                     BUFFER_TARGET_MODE_STREAM_MS
984                     * format.getChannelCount()
985                     * format.getBytesPerSample(format.getEncoding())
986                     * format.getSampleRate()
987                     / MILLIS_PER_SECOND;
988             if (bufferSizeInBytes < bufferTargetSize) {
989                 return false;
990             }
991         }
992 
993         return true;
994     }
995 
996     // Convenience method for the constructor's parameter checks.
997     // This is where constructor IllegalArgumentException-s are thrown
998     // postconditions:
999     //    mChannelCount is valid
1000     //    mChannelMask is valid
1001     //    mAudioFormat is valid
1002     //    mSampleRate is valid
1003     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1004     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1005                                  int audioFormat, int mode) {
1006         //--------------
1007         // sample rate, note these values are subject to change
1008         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1009                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1010                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1011             throw new IllegalArgumentException(sampleRateInHz
1012                     + "Hz is not a supported sample rate.");
1013         }
1014         mSampleRate = sampleRateInHz;
1015 
1016         // IEC61937 is based on stereo. We could coerce it to stereo.
1017         // But the application needs to know the stream is stereo so that
1018         // it is encoded and played correctly. So better to just reject it.
1019         if (audioFormat == AudioFormat.ENCODING_IEC61937
1020                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) {
1021             throw new IllegalArgumentException(
1022                     "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO");
1023         }
1024 
1025         //--------------
1026         // channel config
1027         mChannelConfiguration = channelConfig;
1028 
1029         switch (channelConfig) {
1030         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1031         case AudioFormat.CHANNEL_OUT_MONO:
1032         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1033             mChannelCount = 1;
1034             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1035             break;
1036         case AudioFormat.CHANNEL_OUT_STEREO:
1037         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1038             mChannelCount = 2;
1039             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1040             break;
1041         default:
1042             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1043                 mChannelCount = 0;
1044                 break; // channel index configuration only
1045             }
1046             if (!isMultichannelConfigSupported(channelConfig)) {
1047                 // input channel configuration features unsupported channels
1048                 throw new IllegalArgumentException("Unsupported channel configuration.");
1049             }
1050             mChannelMask = channelConfig;
1051             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1052         }
1053         // check the channel index configuration (if present)
1054         mChannelIndexMask = channelIndexMask;
1055         if (mChannelIndexMask != 0) {
1056             // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
1057             final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1;
1058             if ((channelIndexMask & ~indexMask) != 0) {
1059                 throw new IllegalArgumentException("Unsupported channel index configuration "
1060                         + channelIndexMask);
1061             }
1062             int channelIndexCount = Integer.bitCount(channelIndexMask);
1063             if (mChannelCount == 0) {
1064                  mChannelCount = channelIndexCount;
1065             } else if (mChannelCount != channelIndexCount) {
1066                 throw new IllegalArgumentException("Channel count must match");
1067             }
1068         }
1069 
1070         //--------------
1071         // audio format
1072         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1073             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1074         }
1075 
1076         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1077             throw new IllegalArgumentException("Unsupported audio encoding.");
1078         }
1079         mAudioFormat = audioFormat;
1080 
1081         //--------------
1082         // audio load mode
1083         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1084                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1085             throw new IllegalArgumentException("Invalid mode.");
1086         }
1087         mDataLoadMode = mode;
1088     }
1089 
1090     /**
1091      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1092      * @param channelConfig the mask to validate
1093      * @return false if the AudioTrack can't be used with such a mask
1094      */
isMultichannelConfigSupported(int channelConfig)1095     private static boolean isMultichannelConfigSupported(int channelConfig) {
1096         // check for unsupported channels
1097         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1098             loge("Channel configuration features unsupported channels");
1099             return false;
1100         }
1101         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1102         if (channelCount > CHANNEL_COUNT_MAX) {
1103             loge("Channel configuration contains too many channels " +
1104                     channelCount + ">" + CHANNEL_COUNT_MAX);
1105             return false;
1106         }
1107         // check for unsupported multichannel combinations:
1108         // - FL/FR must be present
1109         // - L/R channels must be paired (e.g. no single L channel)
1110         final int frontPair =
1111                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1112         if ((channelConfig & frontPair) != frontPair) {
1113                 loge("Front channels must be present in multichannel configurations");
1114                 return false;
1115         }
1116         final int backPair =
1117                 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
1118         if ((channelConfig & backPair) != 0) {
1119             if ((channelConfig & backPair) != backPair) {
1120                 loge("Rear channels can't be used independently");
1121                 return false;
1122             }
1123         }
1124         final int sidePair =
1125                 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
1126         if ((channelConfig & sidePair) != 0
1127                 && (channelConfig & sidePair) != sidePair) {
1128             loge("Side channels can't be used independently");
1129             return false;
1130         }
1131         return true;
1132     }
1133 
1134 
1135     // Convenience method for the constructor's audio buffer size check.
1136     // preconditions:
1137     //    mChannelCount is valid
1138     //    mAudioFormat is valid
1139     // postcondition:
1140     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)1141     private void audioBuffSizeCheck(int audioBufferSize) {
1142         // NB: this section is only valid with PCM or IEC61937 data.
1143         //     To update when supporting compressed formats
1144         int frameSizeInBytes;
1145         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
1146             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
1147         } else {
1148             frameSizeInBytes = 1;
1149         }
1150         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
1151             throw new IllegalArgumentException("Invalid audio buffer size.");
1152         }
1153 
1154         mNativeBufferSizeInBytes = audioBufferSize;
1155         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
1156     }
1157 
1158 
1159     /**
1160      * Releases the native AudioTrack resources.
1161      */
release()1162     public void release() {
1163         // even though native_release() stops the native AudioTrack, we need to stop
1164         // AudioTrack subclasses too.
1165         try {
1166             stop();
1167         } catch(IllegalStateException ise) {
1168             // don't raise an exception, we're releasing the resources.
1169         }
1170         baseRelease();
1171         native_release();
1172         mState = STATE_UNINITIALIZED;
1173     }
1174 
1175     @Override
finalize()1176     protected void finalize() {
1177         baseRelease();
1178         native_finalize();
1179     }
1180 
1181     //--------------------------------------------------------------------------
1182     // Getters
1183     //--------------------
1184     /**
1185      * Returns the minimum gain value, which is the constant 0.0.
1186      * Gain values less than 0.0 will be clamped to 0.0.
1187      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1188      * @return the minimum value, which is the constant 0.0.
1189      */
getMinVolume()1190     static public float getMinVolume() {
1191         return GAIN_MIN;
1192     }
1193 
1194     /**
1195      * Returns the maximum gain value, which is greater than or equal to 1.0.
1196      * Gain values greater than the maximum will be clamped to the maximum.
1197      * <p>The word "volume" in the API name is historical; this is actually a gain.
1198      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
1199      * corresponds to a gain of 0 dB (sample values left unmodified).
1200      * @return the maximum value, which is greater than or equal to 1.0.
1201      */
getMaxVolume()1202     static public float getMaxVolume() {
1203         return GAIN_MAX;
1204     }
1205 
1206     /**
1207      * Returns the configured audio source sample rate in Hz.
1208      * The initial source sample rate depends on the constructor parameters,
1209      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
1210      * If the constructor had a specific sample rate, then the initial sink sample rate is that
1211      * value.
1212      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
1213      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
1214      */
getSampleRate()1215     public int getSampleRate() {
1216         return mSampleRate;
1217     }
1218 
1219     /**
1220      * Returns the current playback sample rate rate in Hz.
1221      */
getPlaybackRate()1222     public int getPlaybackRate() {
1223         return native_get_playback_rate();
1224     }
1225 
1226     /**
1227      * Returns the current playback parameters.
1228      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
1229      * @return current {@link PlaybackParams}.
1230      * @throws IllegalStateException if track is not initialized.
1231      */
getPlaybackParams()1232     public @NonNull PlaybackParams getPlaybackParams() {
1233         return native_get_playback_params();
1234     }
1235 
1236     /**
1237      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
1238      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1239      */
getAudioFormat()1240     public int getAudioFormat() {
1241         return mAudioFormat;
1242     }
1243 
1244     /**
1245      * Returns the volume stream type of this AudioTrack.
1246      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
1247      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
1248      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
1249      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
1250      * {@link AudioManager#STREAM_ACCESSIBILITY}.
1251      */
getStreamType()1252     public int getStreamType() {
1253         return mStreamType;
1254     }
1255 
1256     /**
1257      * Returns the configured channel position mask.
1258      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
1259      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
1260      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
1261      * a channel index mask was used. Consider
1262      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
1263      * which contains both the channel position mask and the channel index mask.
1264      */
getChannelConfiguration()1265     public int getChannelConfiguration() {
1266         return mChannelConfiguration;
1267     }
1268 
1269     /**
1270      * Returns the configured <code>AudioTrack</code> format.
1271      * @return an {@link AudioFormat} containing the
1272      * <code>AudioTrack</code> parameters at the time of configuration.
1273      */
getFormat()1274     public @NonNull AudioFormat getFormat() {
1275         AudioFormat.Builder builder = new AudioFormat.Builder()
1276             .setSampleRate(mSampleRate)
1277             .setEncoding(mAudioFormat);
1278         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
1279             builder.setChannelMask(mChannelConfiguration);
1280         }
1281         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
1282             builder.setChannelIndexMask(mChannelIndexMask);
1283         }
1284         return builder.build();
1285     }
1286 
1287     /**
1288      * Returns the configured number of channels.
1289      */
getChannelCount()1290     public int getChannelCount() {
1291         return mChannelCount;
1292     }
1293 
1294     /**
1295      * Returns the state of the AudioTrack instance. This is useful after the
1296      * AudioTrack instance has been created to check if it was initialized
1297      * properly. This ensures that the appropriate resources have been acquired.
1298      * @see #STATE_UNINITIALIZED
1299      * @see #STATE_INITIALIZED
1300      * @see #STATE_NO_STATIC_DATA
1301      */
getState()1302     public int getState() {
1303         return mState;
1304     }
1305 
1306     /**
1307      * Returns the playback state of the AudioTrack instance.
1308      * @see #PLAYSTATE_STOPPED
1309      * @see #PLAYSTATE_PAUSED
1310      * @see #PLAYSTATE_PLAYING
1311      */
getPlayState()1312     public int getPlayState() {
1313         synchronized (mPlayStateLock) {
1314             return mPlayState;
1315         }
1316     }
1317 
1318 
1319     /**
1320      * Returns the effective size of the <code>AudioTrack</code> buffer
1321      * that the application writes to.
1322      * <p> This will be less than or equal to the result of
1323      * {@link #getBufferCapacityInFrames()}.
1324      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
1325      * <p> If the track is subsequently routed to a different output sink, the buffer
1326      * size and capacity may enlarge to accommodate.
1327      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1328      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1329      * the size of the <code>AudioTrack</code> buffer in bytes.
1330      * <p> See also {@link AudioManager#getProperty(String)} for key
1331      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1332      * @return current size in frames of the <code>AudioTrack</code> buffer.
1333      * @throws IllegalStateException if track is not initialized.
1334      */
getBufferSizeInFrames()1335     public int getBufferSizeInFrames() {
1336         return native_get_buffer_size_frames();
1337     }
1338 
1339     /**
1340      * Limits the effective size of the <code>AudioTrack</code> buffer
1341      * that the application writes to.
1342      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
1343      * If a blocking write is used then the write will block until the data
1344      * can fit within this limit.
1345      * <p>Changing this limit modifies the latency associated with
1346      * the buffer for this track. A smaller size will give lower latency
1347      * but there may be more glitches due to buffer underruns.
1348      * <p>The actual size used may not be equal to this requested size.
1349      * It will be limited to a valid range with a maximum of
1350      * {@link #getBufferCapacityInFrames()}.
1351      * It may also be adjusted slightly for internal reasons.
1352      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
1353      * will be returned.
1354      * <p>This method is only supported for PCM audio.
1355      * It is not supported for compressed audio tracks.
1356      *
1357      * @param bufferSizeInFrames requested buffer size in frames
1358      * @return the actual buffer size in frames or an error code,
1359      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1360      * @throws IllegalStateException if track is not initialized.
1361      */
setBufferSizeInFrames(int bufferSizeInFrames)1362     public int setBufferSizeInFrames(int bufferSizeInFrames) {
1363         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
1364             return ERROR_INVALID_OPERATION;
1365         }
1366         if (bufferSizeInFrames < 0) {
1367             return ERROR_BAD_VALUE;
1368         }
1369         return native_set_buffer_size_frames(bufferSizeInFrames);
1370     }
1371 
1372     /**
1373      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
1374      *  <p> If the track's creation mode is {@link #MODE_STATIC},
1375      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
1376      *  A static track's frame count will not change.
1377      *  <p> If the track's creation mode is {@link #MODE_STREAM},
1378      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
1379      *  For streaming tracks, this value may be rounded up to a larger value if needed by
1380      *  the target output sink, and
1381      *  if the track is subsequently routed to a different output sink, the
1382      *  frame count may enlarge to accommodate.
1383      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1384      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1385      *  the size of the <code>AudioTrack</code> buffer in bytes.
1386      *  <p> See also {@link AudioManager#getProperty(String)} for key
1387      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1388      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
1389      *  @throws IllegalStateException if track is not initialized.
1390      */
getBufferCapacityInFrames()1391     public int getBufferCapacityInFrames() {
1392         return native_get_buffer_capacity_frames();
1393     }
1394 
1395     /**
1396      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
1397      *  @return current size in frames of the <code>AudioTrack</code> buffer.
1398      *  @throws IllegalStateException
1399      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
1400      */
1401     @Deprecated
getNativeFrameCount()1402     protected int getNativeFrameCount() {
1403         return native_get_buffer_capacity_frames();
1404     }
1405 
1406     /**
1407      * Returns marker position expressed in frames.
1408      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
1409      * or zero if marker is disabled.
1410      */
getNotificationMarkerPosition()1411     public int getNotificationMarkerPosition() {
1412         return native_get_marker_pos();
1413     }
1414 
1415     /**
1416      * Returns the notification update period expressed in frames.
1417      * Zero means that no position update notifications are being delivered.
1418      */
getPositionNotificationPeriod()1419     public int getPositionNotificationPeriod() {
1420         return native_get_pos_update_period();
1421     }
1422 
1423     /**
1424      * Returns the playback head position expressed in frames.
1425      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
1426      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
1427      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
1428      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
1429      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
1430      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
1431      * the total number of frames played since reset,
1432      * <i>not</i> the current offset within the buffer.
1433      */
getPlaybackHeadPosition()1434     public int getPlaybackHeadPosition() {
1435         return native_get_position();
1436     }
1437 
1438     /**
1439      * Returns this track's estimated latency in milliseconds. This includes the latency due
1440      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
1441      *
1442      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
1443      * a better solution.
1444      * @hide
1445      */
getLatency()1446     public int getLatency() {
1447         return native_get_latency();
1448     }
1449 
1450     /**
1451      * Returns the number of underrun occurrences in the application-level write buffer
1452      * since the AudioTrack was created.
1453      * An underrun occurs if the application does not write audio
1454      * data quickly enough, causing the buffer to underflow
1455      * and a potential audio glitch or pop.
1456      * <p>
1457      * Underruns are less likely when buffer sizes are large.
1458      * It may be possible to eliminate underruns by recreating the AudioTrack with
1459      * a larger buffer.
1460      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
1461      * effective size of the buffer.
1462      */
getUnderrunCount()1463     public int getUnderrunCount() {
1464         return native_get_underrun_count();
1465     }
1466 
1467     /**
1468      * Returns the current performance mode of the {@link AudioTrack}.
1469      *
1470      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
1471      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1472      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1473      * Use {@link AudioTrack.Builder#setPerformanceMode}
1474      * in the {@link AudioTrack.Builder} to enable a performance mode.
1475      * @throws IllegalStateException if track is not initialized.
1476      */
getPerformanceMode()1477     public @PerformanceMode int getPerformanceMode() {
1478         final int flags = native_get_flags();
1479         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
1480             return PERFORMANCE_MODE_LOW_LATENCY;
1481         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
1482             return PERFORMANCE_MODE_POWER_SAVING;
1483         } else {
1484             return PERFORMANCE_MODE_NONE;
1485         }
1486     }
1487 
1488     /**
1489      *  Returns the output sample rate in Hz for the specified stream type.
1490      */
getNativeOutputSampleRate(int streamType)1491     static public int getNativeOutputSampleRate(int streamType) {
1492         return native_get_output_sample_rate(streamType);
1493     }
1494 
1495     /**
1496      * Returns the estimated minimum buffer size required for an AudioTrack
1497      * object to be created in the {@link #MODE_STREAM} mode.
1498      * The size is an estimate because it does not consider either the route or the sink,
1499      * since neither is known yet.  Note that this size doesn't
1500      * guarantee a smooth playback under load, and higher values should be chosen according to
1501      * the expected frequency at which the buffer will be refilled with additional data to play.
1502      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
1503      * to a higher value than the initial source sample rate, be sure to configure the buffer size
1504      * based on the highest planned sample rate.
1505      * @param sampleRateInHz the source sample rate expressed in Hz.
1506      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
1507      * @param channelConfig describes the configuration of the audio channels.
1508      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
1509      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
1510      * @param audioFormat the format in which the audio data is represented.
1511      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
1512      *   {@link AudioFormat#ENCODING_PCM_8BIT},
1513      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1514      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
1515      *   or {@link #ERROR} if unable to query for output properties,
1516      *   or the minimum buffer size expressed in bytes.
1517      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)1518     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
1519         int channelCount = 0;
1520         switch(channelConfig) {
1521         case AudioFormat.CHANNEL_OUT_MONO:
1522         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1523             channelCount = 1;
1524             break;
1525         case AudioFormat.CHANNEL_OUT_STEREO:
1526         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1527             channelCount = 2;
1528             break;
1529         default:
1530             if (!isMultichannelConfigSupported(channelConfig)) {
1531                 loge("getMinBufferSize(): Invalid channel configuration.");
1532                 return ERROR_BAD_VALUE;
1533             } else {
1534                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1535             }
1536         }
1537 
1538         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1539             loge("getMinBufferSize(): Invalid audio format.");
1540             return ERROR_BAD_VALUE;
1541         }
1542 
1543         // sample rate, note these values are subject to change
1544         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
1545         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
1546                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
1547             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
1548             return ERROR_BAD_VALUE;
1549         }
1550 
1551         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
1552         if (size <= 0) {
1553             loge("getMinBufferSize(): error querying hardware");
1554             return ERROR;
1555         }
1556         else {
1557             return size;
1558         }
1559     }
1560 
1561     /**
1562      * Returns the audio session ID.
1563      *
1564      * @return the ID of the audio session this AudioTrack belongs to.
1565      */
getAudioSessionId()1566     public int getAudioSessionId() {
1567         return mSessionId;
1568     }
1569 
1570    /**
1571     * Poll for a timestamp on demand.
1572     * <p>
1573     * If you need to track timestamps during initial warmup or after a routing or mode change,
1574     * you should request a new timestamp periodically until the reported timestamps
1575     * show that the frame position is advancing, or until it becomes clear that
1576     * timestamps are unavailable for this route.
1577     * <p>
1578     * After the clock is advancing at a stable rate,
1579     * query for a new timestamp approximately once every 10 seconds to once per minute.
1580     * Calling this method more often is inefficient.
1581     * It is also counter-productive to call this method more often than recommended,
1582     * because the short-term differences between successive timestamp reports are not meaningful.
1583     * If you need a high-resolution mapping between frame position and presentation time,
1584     * consider implementing that at application level, based on low-resolution timestamps.
1585     * <p>
1586     * The audio data at the returned position may either already have been
1587     * presented, or may have not yet been presented but is committed to be presented.
1588     * It is not possible to request the time corresponding to a particular position,
1589     * or to request the (fractional) position corresponding to a particular time.
1590     * If you need such features, consider implementing them at application level.
1591     *
1592     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1593     *        and owned by caller.
1594     * @return true if a timestamp is available, or false if no timestamp is available.
1595     *         If a timestamp if available,
1596     *         the AudioTimestamp instance is filled in with a position in frame units, together
1597     *         with the estimated time when that frame was presented or is committed to
1598     *         be presented.
1599     *         In the case that no timestamp is available, any supplied instance is left unaltered.
1600     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
1601     *         or during and immediately after a route change.
1602     *         A timestamp is permanently unavailable for a given route if the route does not support
1603     *         timestamps.  In this case, the approximate frame position can be obtained
1604     *         using {@link #getPlaybackHeadPosition}.
1605     *         However, it may be useful to continue to query for
1606     *         timestamps occasionally, to recover after a route change.
1607     */
1608     // Add this text when the "on new timestamp" API is added:
1609     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)1610     public boolean getTimestamp(AudioTimestamp timestamp)
1611     {
1612         if (timestamp == null) {
1613             throw new IllegalArgumentException();
1614         }
1615         // It's unfortunate, but we have to either create garbage every time or use synchronized
1616         long[] longArray = new long[2];
1617         int ret = native_get_timestamp(longArray);
1618         if (ret != SUCCESS) {
1619             return false;
1620         }
1621         timestamp.framePosition = longArray[0];
1622         timestamp.nanoTime = longArray[1];
1623         return true;
1624     }
1625 
1626     /**
1627      * Poll for a timestamp on demand.
1628      * <p>
1629      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
1630      *
1631      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1632      *        and owned by caller.
1633      * @return {@link #SUCCESS} if a timestamp is available
1634      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
1635      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
1636      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
1637      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
1638      *         for the timestamp.
1639      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1640      *         needs to be recreated.
1641      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
1642      *         timestamps. In this case, the approximate frame position can be obtained
1643      *         using {@link #getPlaybackHeadPosition}.
1644      *
1645      *         The AudioTimestamp instance is filled in with a position in frame units, together
1646      *         with the estimated time when that frame was presented or is committed to
1647      *         be presented.
1648      * @hide
1649      */
1650      // Add this text when the "on new timestamp" API is added:
1651      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)1652      public int getTimestampWithStatus(AudioTimestamp timestamp)
1653      {
1654          if (timestamp == null) {
1655              throw new IllegalArgumentException();
1656          }
1657          // It's unfortunate, but we have to either create garbage every time or use synchronized
1658          long[] longArray = new long[2];
1659          int ret = native_get_timestamp(longArray);
1660          timestamp.framePosition = longArray[0];
1661          timestamp.nanoTime = longArray[1];
1662          return ret;
1663      }
1664 
1665     //--------------------------------------------------------------------------
1666     // Initialization / configuration
1667     //--------------------
1668     /**
1669      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1670      * for each periodic playback head position update.
1671      * Notifications will be received in the same thread as the one in which the AudioTrack
1672      * instance was created.
1673      * @param listener
1674      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)1675     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
1676         setPlaybackPositionUpdateListener(listener, null);
1677     }
1678 
1679     /**
1680      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1681      * for each periodic playback head position update.
1682      * Use this method to receive AudioTrack events in the Handler associated with another
1683      * thread than the one in which you created the AudioTrack instance.
1684      * @param listener
1685      * @param handler the Handler that will receive the event notification messages.
1686      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)1687     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
1688                                                     Handler handler) {
1689         if (listener != null) {
1690             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
1691         } else {
1692             mEventHandlerDelegate = null;
1693         }
1694     }
1695 
1696 
clampGainOrLevel(float gainOrLevel)1697     private static float clampGainOrLevel(float gainOrLevel) {
1698         if (Float.isNaN(gainOrLevel)) {
1699             throw new IllegalArgumentException();
1700         }
1701         if (gainOrLevel < GAIN_MIN) {
1702             gainOrLevel = GAIN_MIN;
1703         } else if (gainOrLevel > GAIN_MAX) {
1704             gainOrLevel = GAIN_MAX;
1705         }
1706         return gainOrLevel;
1707     }
1708 
1709 
1710      /**
1711      * Sets the specified left and right output gain values on the AudioTrack.
1712      * <p>Gain values are clamped to the closed interval [0.0, max] where
1713      * max is the value of {@link #getMaxVolume}.
1714      * A value of 0.0 results in zero gain (silence), and
1715      * a value of 1.0 means unity gain (signal unchanged).
1716      * The default value is 1.0 meaning unity gain.
1717      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1718      * @param leftGain output gain for the left channel.
1719      * @param rightGain output gain for the right channel
1720      * @return error code or success, see {@link #SUCCESS},
1721      *    {@link #ERROR_INVALID_OPERATION}
1722      * @deprecated Applications should use {@link #setVolume} instead, as it
1723      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1724      */
1725     @Deprecated
setStereoVolume(float leftGain, float rightGain)1726     public int setStereoVolume(float leftGain, float rightGain) {
1727         if (mState == STATE_UNINITIALIZED) {
1728             return ERROR_INVALID_OPERATION;
1729         }
1730 
1731         baseSetVolume(leftGain, rightGain);
1732         return SUCCESS;
1733     }
1734 
1735     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)1736     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
1737         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
1738         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
1739 
1740         native_setVolume(leftVolume, rightVolume);
1741     }
1742 
1743 
1744     /**
1745      * Sets the specified output gain value on all channels of this track.
1746      * <p>Gain values are clamped to the closed interval [0.0, max] where
1747      * max is the value of {@link #getMaxVolume}.
1748      * A value of 0.0 results in zero gain (silence), and
1749      * a value of 1.0 means unity gain (signal unchanged).
1750      * The default value is 1.0 meaning unity gain.
1751      * <p>This API is preferred over {@link #setStereoVolume}, as it
1752      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1753      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1754      * @param gain output gain for all channels.
1755      * @return error code or success, see {@link #SUCCESS},
1756      *    {@link #ERROR_INVALID_OPERATION}
1757      */
setVolume(float gain)1758     public int setVolume(float gain) {
1759         return setStereoVolume(gain, gain);
1760     }
1761 
1762     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)1763     /* package */ int playerApplyVolumeShaper(
1764             @NonNull VolumeShaper.Configuration configuration,
1765             @NonNull VolumeShaper.Operation operation) {
1766         return native_applyVolumeShaper(configuration, operation);
1767     }
1768 
1769     @Override
playerGetVolumeShaperState(int id)1770     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
1771         return native_getVolumeShaperState(id);
1772     }
1773 
1774     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)1775     public @NonNull VolumeShaper createVolumeShaper(
1776             @NonNull VolumeShaper.Configuration configuration) {
1777         return new VolumeShaper(configuration, this);
1778     }
1779 
1780     /**
1781      * Sets the playback sample rate for this track. This sets the sampling rate at which
1782      * the audio data will be consumed and played back
1783      * (as set by the sampleRateInHz parameter in the
1784      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
1785      * not the original sampling rate of the
1786      * content. For example, setting it to half the sample rate of the content will cause the
1787      * playback to last twice as long, but will also result in a pitch shift down by one octave.
1788      * The valid sample rate range is from 1 Hz to twice the value returned by
1789      * {@link #getNativeOutputSampleRate(int)}.
1790      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
1791      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
1792      * for playback of content of differing sample rate,
1793      * but with identical encoding and channel mask.
1794      * @param sampleRateInHz the sample rate expressed in Hz
1795      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1796      *    {@link #ERROR_INVALID_OPERATION}
1797      */
setPlaybackRate(int sampleRateInHz)1798     public int setPlaybackRate(int sampleRateInHz) {
1799         if (mState != STATE_INITIALIZED) {
1800             return ERROR_INVALID_OPERATION;
1801         }
1802         if (sampleRateInHz <= 0) {
1803             return ERROR_BAD_VALUE;
1804         }
1805         return native_set_playback_rate(sampleRateInHz);
1806     }
1807 
1808 
1809     /**
1810      * Sets the playback parameters.
1811      * This method returns failure if it cannot apply the playback parameters.
1812      * One possible cause is that the parameters for speed or pitch are out of range.
1813      * Another possible cause is that the <code>AudioTrack</code> is streaming
1814      * (see {@link #MODE_STREAM}) and the
1815      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
1816      * on configuration must be larger than the speed multiplied by the minimum size
1817      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
1818      * @param params see {@link PlaybackParams}. In particular,
1819      * speed, pitch, and audio mode should be set.
1820      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
1821      * @throws IllegalStateException if track is not initialized.
1822      */
setPlaybackParams(@onNull PlaybackParams params)1823     public void setPlaybackParams(@NonNull PlaybackParams params) {
1824         if (params == null) {
1825             throw new IllegalArgumentException("params is null");
1826         }
1827         native_set_playback_params(params);
1828     }
1829 
1830 
1831     /**
1832      * Sets the position of the notification marker.  At most one marker can be active.
1833      * @param markerInFrames marker position in wrapping frame units similar to
1834      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
1835      * To set a marker at a position which would appear as zero due to wraparound,
1836      * a workaround is to use a non-zero position near zero, such as -1 or 1.
1837      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1838      *  {@link #ERROR_INVALID_OPERATION}
1839      */
setNotificationMarkerPosition(int markerInFrames)1840     public int setNotificationMarkerPosition(int markerInFrames) {
1841         if (mState == STATE_UNINITIALIZED) {
1842             return ERROR_INVALID_OPERATION;
1843         }
1844         return native_set_marker_pos(markerInFrames);
1845     }
1846 
1847 
1848     /**
1849      * Sets the period for the periodic notification event.
1850      * @param periodInFrames update period expressed in frames.
1851      * Zero period means no position updates.  A negative period is not allowed.
1852      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
1853      */
setPositionNotificationPeriod(int periodInFrames)1854     public int setPositionNotificationPeriod(int periodInFrames) {
1855         if (mState == STATE_UNINITIALIZED) {
1856             return ERROR_INVALID_OPERATION;
1857         }
1858         return native_set_pos_update_period(periodInFrames);
1859     }
1860 
1861 
1862     /**
1863      * Sets the playback head position within the static buffer.
1864      * The track must be stopped or paused for the position to be changed,
1865      * and must use the {@link #MODE_STATIC} mode.
1866      * @param positionInFrames playback head position within buffer, expressed in frames.
1867      * Zero corresponds to start of buffer.
1868      * The position must not be greater than the buffer size in frames, or negative.
1869      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
1870      * the position values have different meanings.
1871      * <br>
1872      * If looping is currently enabled and the new position is greater than or equal to the
1873      * loop end marker, the behavior varies by API level:
1874      * as of {@link android.os.Build.VERSION_CODES#M},
1875      * the looping is first disabled and then the position is set.
1876      * For earlier API levels, the behavior is unspecified.
1877      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1878      *    {@link #ERROR_INVALID_OPERATION}
1879      */
setPlaybackHeadPosition(int positionInFrames)1880     public int setPlaybackHeadPosition(int positionInFrames) {
1881         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1882                 getPlayState() == PLAYSTATE_PLAYING) {
1883             return ERROR_INVALID_OPERATION;
1884         }
1885         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1886             return ERROR_BAD_VALUE;
1887         }
1888         return native_set_position(positionInFrames);
1889     }
1890 
1891     /**
1892      * Sets the loop points and the loop count. The loop can be infinite.
1893      * Similarly to setPlaybackHeadPosition,
1894      * the track must be stopped or paused for the loop points to be changed,
1895      * and must use the {@link #MODE_STATIC} mode.
1896      * @param startInFrames loop start marker expressed in frames.
1897      * Zero corresponds to start of buffer.
1898      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1899      * @param endInFrames loop end marker expressed in frames.
1900      * The total buffer size in frames corresponds to end of buffer.
1901      * The end marker must not be greater than the buffer size in frames.
1902      * For looping, the end marker must not be less than or equal to the start marker,
1903      * but to disable looping
1904      * it is permitted for start marker, end marker, and loop count to all be 0.
1905      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
1906      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
1907      * support,
1908      * {@link #ERROR_BAD_VALUE} is returned.
1909      * The loop range is the interval [startInFrames, endInFrames).
1910      * <br>
1911      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
1912      * unless it is greater than or equal to the loop end marker, in which case
1913      * it is forced to the loop start marker.
1914      * For earlier API levels, the effect on position is unspecified.
1915      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
1916      *    A value of -1 means infinite looping, and 0 disables looping.
1917      *    A value of positive N means to "loop" (go back) N times.  For example,
1918      *    a value of one means to play the region two times in total.
1919      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1920      *    {@link #ERROR_INVALID_OPERATION}
1921      */
setLoopPoints(int startInFrames, int endInFrames, int loopCount)1922     public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1923         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1924                 getPlayState() == PLAYSTATE_PLAYING) {
1925             return ERROR_INVALID_OPERATION;
1926         }
1927         if (loopCount == 0) {
1928             ;   // explicitly allowed as an exception to the loop region range check
1929         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1930                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1931             return ERROR_BAD_VALUE;
1932         }
1933         return native_set_loop(startInFrames, endInFrames, loopCount);
1934     }
1935 
1936     /**
1937      * Sets the initialization state of the instance. This method was originally intended to be used
1938      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1939      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1940      * @param state the state of the AudioTrack instance
1941      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1942      */
1943     @Deprecated
setState(int state)1944     protected void setState(int state) {
1945         mState = state;
1946     }
1947 
1948 
1949     //---------------------------------------------------------
1950     // Transport control methods
1951     //--------------------
1952     /**
1953      * Starts playing an AudioTrack.
1954      * <p>
1955      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
1956      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
1957      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
1958      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
1959      * play().
1960      * <p>
1961      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
1962      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
1963      * If you don't call write() first, or if you call write() but with an insufficient amount of
1964      * data, then the track will be in underrun state at play().  In this case,
1965      * playback will not actually start playing until the data path is filled to a
1966      * device-specific minimum level.  This requirement for the path to be filled
1967      * to a minimum level is also true when resuming audio playback after calling stop().
1968      * Similarly the buffer will need to be filled up again after
1969      * the track underruns due to failure to call write() in a timely manner with sufficient data.
1970      * For portability, an application should prime the data path to the maximum allowed
1971      * by writing data until the write() method returns a short transfer count.
1972      * This allows play() to start immediately, and reduces the chance of underrun.
1973      *
1974      * @throws IllegalStateException if the track isn't properly initialized
1975      */
play()1976     public void play()
1977     throws IllegalStateException {
1978         if (mState != STATE_INITIALIZED) {
1979             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1980         }
1981         //FIXME use lambda to pass startImpl to superclass
1982         final int delay = getStartDelayMs();
1983         if (delay == 0) {
1984             startImpl();
1985         } else {
1986             new Thread() {
1987                 public void run() {
1988                     try {
1989                         Thread.sleep(delay);
1990                     } catch (InterruptedException e) {
1991                         e.printStackTrace();
1992                     }
1993                     baseSetStartDelayMs(0);
1994                     try {
1995                         startImpl();
1996                     } catch (IllegalStateException e) {
1997                         // fail silently for a state exception when it is happening after
1998                         // a delayed start, as the player state could have changed between the
1999                         // call to start() and the execution of startImpl()
2000                     }
2001                 }
2002             }.start();
2003         }
2004     }
2005 
startImpl()2006     private void startImpl() {
2007         synchronized(mPlayStateLock) {
2008             baseStart();
2009             native_start();
2010             mPlayState = PLAYSTATE_PLAYING;
2011         }
2012     }
2013 
2014     /**
2015      * Stops playing the audio data.
2016      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
2017      * after the last buffer that was written has been played. For an immediate stop, use
2018      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
2019      * back yet.
2020      * @throws IllegalStateException
2021      */
stop()2022     public void stop()
2023     throws IllegalStateException {
2024         if (mState != STATE_INITIALIZED) {
2025             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
2026         }
2027 
2028         // stop playing
2029         synchronized(mPlayStateLock) {
2030             native_stop();
2031             baseStop();
2032             mPlayState = PLAYSTATE_STOPPED;
2033             mAvSyncHeader = null;
2034             mAvSyncBytesRemaining = 0;
2035         }
2036     }
2037 
2038     /**
2039      * Pauses the playback of the audio data. Data that has not been played
2040      * back will not be discarded. Subsequent calls to {@link #play} will play
2041      * this data back. See {@link #flush()} to discard this data.
2042      *
2043      * @throws IllegalStateException
2044      */
pause()2045     public void pause()
2046     throws IllegalStateException {
2047         if (mState != STATE_INITIALIZED) {
2048             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
2049         }
2050 
2051         // pause playback
2052         synchronized(mPlayStateLock) {
2053             native_pause();
2054             basePause();
2055             mPlayState = PLAYSTATE_PAUSED;
2056         }
2057     }
2058 
2059 
2060     //---------------------------------------------------------
2061     // Audio data supply
2062     //--------------------
2063 
2064     /**
2065      * Flushes the audio data currently queued for playback. Any data that has
2066      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
2067      * or if the track's creation mode is not {@link #MODE_STREAM}.
2068      * <BR> Note that although data written but not yet presented is discarded, there is no
2069      * guarantee that all of the buffer space formerly used by that data
2070      * is available for a subsequent write.
2071      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
2072      * less than or equal to the total buffer size
2073      * may return a short actual transfer count.
2074      */
flush()2075     public void flush() {
2076         if (mState == STATE_INITIALIZED) {
2077             // flush the data in native layer
2078             native_flush();
2079             mAvSyncHeader = null;
2080             mAvSyncBytesRemaining = 0;
2081         }
2082 
2083     }
2084 
2085     /**
2086      * Writes the audio data to the audio sink for playback (streaming mode),
2087      * or copies audio data for later playback (static buffer mode).
2088      * The format specified in the AudioTrack constructor should be
2089      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2090      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2091      * <p>
2092      * In streaming mode, the write will normally block until all the data has been enqueued for
2093      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2094      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2095      * occurs during the write, then the write may return a short transfer count.
2096      * <p>
2097      * In static buffer mode, copies the data to the buffer starting at offset 0.
2098      * Note that the actual playback of this data might occur after this function returns.
2099      *
2100      * @param audioData the array that holds the data to play.
2101      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2102      *    starts.
2103      *    Must not be negative, or cause the data access to go out of bounds of the array.
2104      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2105      *    Must not be negative, or cause the data access to go out of bounds of the array.
2106      * @return zero or the positive number of bytes that were written, or one of the following
2107      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2108      *    not to exceed sizeInBytes.
2109      * <ul>
2110      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2111      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2112      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2113      *    needs to be recreated. The dead object error code is not returned if some data was
2114      *    successfully transferred. In this case, the error is returned at the next write()</li>
2115      * <li>{@link #ERROR} in case of other error</li>
2116      * </ul>
2117      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
2118      * set to  {@link #WRITE_BLOCKING}.
2119      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)2120     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
2121         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
2122     }
2123 
2124     /**
2125      * Writes the audio data to the audio sink for playback (streaming mode),
2126      * or copies audio data for later playback (static buffer mode).
2127      * The format specified in the AudioTrack constructor should be
2128      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2129      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2130      * <p>
2131      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2132      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2133      * for playback, and will return a full transfer count.  However, if the write mode is
2134      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2135      * interrupts the write by calling stop or pause, or an I/O error
2136      * occurs during the write, then the write may return a short transfer count.
2137      * <p>
2138      * In static buffer mode, copies the data to the buffer starting at offset 0,
2139      * and the write mode is ignored.
2140      * Note that the actual playback of this data might occur after this function returns.
2141      *
2142      * @param audioData the array that holds the data to play.
2143      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2144      *    starts.
2145      *    Must not be negative, or cause the data access to go out of bounds of the array.
2146      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2147      *    Must not be negative, or cause the data access to go out of bounds of the array.
2148      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2149      *     effect in static mode.
2150      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2151      *         to the audio sink.
2152      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2153      *     queuing as much audio data for playback as possible without blocking.
2154      * @return zero or the positive number of bytes that were written, or one of the following
2155      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2156      *    not to exceed sizeInBytes.
2157      * <ul>
2158      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2159      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2160      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2161      *    needs to be recreated. The dead object error code is not returned if some data was
2162      *    successfully transferred. In this case, the error is returned at the next write()</li>
2163      * <li>{@link #ERROR} in case of other error</li>
2164      * </ul>
2165      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)2166     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
2167             @WriteMode int writeMode) {
2168 
2169         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2170             return ERROR_INVALID_OPERATION;
2171         }
2172 
2173         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2174             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2175             return ERROR_BAD_VALUE;
2176         }
2177 
2178         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
2179                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
2180                 || (offsetInBytes + sizeInBytes > audioData.length)) {
2181             return ERROR_BAD_VALUE;
2182         }
2183 
2184         int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
2185                 writeMode == WRITE_BLOCKING);
2186 
2187         if ((mDataLoadMode == MODE_STATIC)
2188                 && (mState == STATE_NO_STATIC_DATA)
2189                 && (ret > 0)) {
2190             // benign race with respect to other APIs that read mState
2191             mState = STATE_INITIALIZED;
2192         }
2193 
2194         return ret;
2195     }
2196 
2197     /**
2198      * Writes the audio data to the audio sink for playback (streaming mode),
2199      * or copies audio data for later playback (static buffer mode).
2200      * The format specified in the AudioTrack constructor should be
2201      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2202      * <p>
2203      * In streaming mode, the write will normally block until all the data has been enqueued for
2204      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2205      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2206      * occurs during the write, then the write may return a short transfer count.
2207      * <p>
2208      * In static buffer mode, copies the data to the buffer starting at offset 0.
2209      * Note that the actual playback of this data might occur after this function returns.
2210      *
2211      * @param audioData the array that holds the data to play.
2212      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
2213      *     starts.
2214      *    Must not be negative, or cause the data access to go out of bounds of the array.
2215      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2216      *    Must not be negative, or cause the data access to go out of bounds of the array.
2217      * @return zero or the positive number of shorts that were written, or one of the following
2218      *    error codes. The number of shorts will be a multiple of the channel count not to
2219      *    exceed sizeInShorts.
2220      * <ul>
2221      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2222      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2223      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2224      *    needs to be recreated. The dead object error code is not returned if some data was
2225      *    successfully transferred. In this case, the error is returned at the next write()</li>
2226      * <li>{@link #ERROR} in case of other error</li>
2227      * </ul>
2228      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
2229      * set to  {@link #WRITE_BLOCKING}.
2230      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)2231     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
2232         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
2233     }
2234 
2235     /**
2236      * Writes the audio data to the audio sink for playback (streaming mode),
2237      * or copies audio data for later playback (static buffer mode).
2238      * The format specified in the AudioTrack constructor should be
2239      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2240      * <p>
2241      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2242      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2243      * for playback, and will return a full transfer count.  However, if the write mode is
2244      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2245      * interrupts the write by calling stop or pause, or an I/O error
2246      * occurs during the write, then the write may return a short transfer count.
2247      * <p>
2248      * In static buffer mode, copies the data to the buffer starting at offset 0.
2249      * Note that the actual playback of this data might occur after this function returns.
2250      *
2251      * @param audioData the array that holds the data to write.
2252      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
2253      *     starts.
2254      *    Must not be negative, or cause the data access to go out of bounds of the array.
2255      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2256      *    Must not be negative, or cause the data access to go out of bounds of the array.
2257      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2258      *     effect in static mode.
2259      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2260      *         to the audio sink.
2261      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2262      *     queuing as much audio data for playback as possible without blocking.
2263      * @return zero or the positive number of shorts that were written, or one of the following
2264      *    error codes. The number of shorts will be a multiple of the channel count not to
2265      *    exceed sizeInShorts.
2266      * <ul>
2267      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2268      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2269      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2270      *    needs to be recreated. The dead object error code is not returned if some data was
2271      *    successfully transferred. In this case, the error is returned at the next write()</li>
2272      * <li>{@link #ERROR} in case of other error</li>
2273      * </ul>
2274      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)2275     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
2276             @WriteMode int writeMode) {
2277 
2278         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2279             return ERROR_INVALID_OPERATION;
2280         }
2281 
2282         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2283             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2284             return ERROR_BAD_VALUE;
2285         }
2286 
2287         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
2288                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
2289                 || (offsetInShorts + sizeInShorts > audioData.length)) {
2290             return ERROR_BAD_VALUE;
2291         }
2292 
2293         int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
2294                 writeMode == WRITE_BLOCKING);
2295 
2296         if ((mDataLoadMode == MODE_STATIC)
2297                 && (mState == STATE_NO_STATIC_DATA)
2298                 && (ret > 0)) {
2299             // benign race with respect to other APIs that read mState
2300             mState = STATE_INITIALIZED;
2301         }
2302 
2303         return ret;
2304     }
2305 
2306     /**
2307      * Writes the audio data to the audio sink for playback (streaming mode),
2308      * or copies audio data for later playback (static buffer mode).
2309      * The format specified in the AudioTrack constructor should be
2310      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
2311      * <p>
2312      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2313      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2314      * for playback, and will return a full transfer count.  However, if the write mode is
2315      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2316      * interrupts the write by calling stop or pause, or an I/O error
2317      * occurs during the write, then the write may return a short transfer count.
2318      * <p>
2319      * In static buffer mode, copies the data to the buffer starting at offset 0,
2320      * and the write mode is ignored.
2321      * Note that the actual playback of this data might occur after this function returns.
2322      *
2323      * @param audioData the array that holds the data to write.
2324      *     The implementation does not clip for sample values within the nominal range
2325      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
2326      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
2327      *     that could add energy, such as reverb.  For the convenience of applications
2328      *     that compute samples using filters with non-unity gain,
2329      *     sample values +3 dB beyond the nominal range are permitted.
2330      *     However such values may eventually be limited or clipped, depending on various gains
2331      *     and later processing in the audio path.  Therefore applications are encouraged
2332      *     to provide samples values within the nominal range.
2333      * @param offsetInFloats the offset, expressed as a number of floats,
2334      *     in audioData where the data to write starts.
2335      *    Must not be negative, or cause the data access to go out of bounds of the array.
2336      * @param sizeInFloats the number of floats to write in audioData after the offset.
2337      *    Must not be negative, or cause the data access to go out of bounds of the array.
2338      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2339      *     effect in static mode.
2340      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2341      *         to the audio sink.
2342      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2343      *     queuing as much audio data for playback as possible without blocking.
2344      * @return zero or the positive number of floats that were written, or one of the following
2345      *    error codes. The number of floats will be a multiple of the channel count not to
2346      *    exceed sizeInFloats.
2347      * <ul>
2348      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2349      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2350      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2351      *    needs to be recreated. The dead object error code is not returned if some data was
2352      *    successfully transferred. In this case, the error is returned at the next write()</li>
2353      * <li>{@link #ERROR} in case of other error</li>
2354      * </ul>
2355      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)2356     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
2357             @WriteMode int writeMode) {
2358 
2359         if (mState == STATE_UNINITIALIZED) {
2360             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2361             return ERROR_INVALID_OPERATION;
2362         }
2363 
2364         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
2365             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
2366             return ERROR_INVALID_OPERATION;
2367         }
2368 
2369         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2370             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2371             return ERROR_BAD_VALUE;
2372         }
2373 
2374         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
2375                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
2376                 || (offsetInFloats + sizeInFloats > audioData.length)) {
2377             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
2378             return ERROR_BAD_VALUE;
2379         }
2380 
2381         int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
2382                 writeMode == WRITE_BLOCKING);
2383 
2384         if ((mDataLoadMode == MODE_STATIC)
2385                 && (mState == STATE_NO_STATIC_DATA)
2386                 && (ret > 0)) {
2387             // benign race with respect to other APIs that read mState
2388             mState = STATE_INITIALIZED;
2389         }
2390 
2391         return ret;
2392     }
2393 
2394 
2395     /**
2396      * Writes the audio data to the audio sink for playback (streaming mode),
2397      * or copies audio data for later playback (static buffer mode).
2398      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
2399      * <p>
2400      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2401      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2402      * for playback, and will return a full transfer count.  However, if the write mode is
2403      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2404      * interrupts the write by calling stop or pause, or an I/O error
2405      * occurs during the write, then the write may return a short transfer count.
2406      * <p>
2407      * In static buffer mode, copies the data to the buffer starting at offset 0,
2408      * and the write mode is ignored.
2409      * Note that the actual playback of this data might occur after this function returns.
2410      *
2411      * @param audioData the buffer that holds the data to write, starting at the position reported
2412      *     by <code>audioData.position()</code>.
2413      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2414      *     have been advanced to reflect the amount of data that was successfully written to
2415      *     the AudioTrack.
2416      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2417      *     that the number of bytes requested be a multiple of the frame size (sample size in
2418      *     bytes multiplied by the channel count).
2419      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2420      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2421      *     effect in static mode.
2422      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2423      *         to the audio sink.
2424      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2425      *     queuing as much audio data for playback as possible without blocking.
2426      * @return zero or the positive number of bytes that were written, or one of the following
2427      *    error codes.
2428      * <ul>
2429      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2430      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2431      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2432      *    needs to be recreated. The dead object error code is not returned if some data was
2433      *    successfully transferred. In this case, the error is returned at the next write()</li>
2434      * <li>{@link #ERROR} in case of other error</li>
2435      * </ul>
2436      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)2437     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2438             @WriteMode int writeMode) {
2439 
2440         if (mState == STATE_UNINITIALIZED) {
2441             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2442             return ERROR_INVALID_OPERATION;
2443         }
2444 
2445         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2446             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2447             return ERROR_BAD_VALUE;
2448         }
2449 
2450         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2451             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2452             return ERROR_BAD_VALUE;
2453         }
2454 
2455         int ret = 0;
2456         if (audioData.isDirect()) {
2457             ret = native_write_native_bytes(audioData,
2458                     audioData.position(), sizeInBytes, mAudioFormat,
2459                     writeMode == WRITE_BLOCKING);
2460         } else {
2461             ret = native_write_byte(NioUtils.unsafeArray(audioData),
2462                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
2463                     sizeInBytes, mAudioFormat,
2464                     writeMode == WRITE_BLOCKING);
2465         }
2466 
2467         if ((mDataLoadMode == MODE_STATIC)
2468                 && (mState == STATE_NO_STATIC_DATA)
2469                 && (ret > 0)) {
2470             // benign race with respect to other APIs that read mState
2471             mState = STATE_INITIALIZED;
2472         }
2473 
2474         if (ret > 0) {
2475             audioData.position(audioData.position() + ret);
2476         }
2477 
2478         return ret;
2479     }
2480 
2481     /**
2482      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
2483      * The blocking behavior will depend on the write mode.
2484      * @param audioData the buffer that holds the data to write, starting at the position reported
2485      *     by <code>audioData.position()</code>.
2486      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2487      *     have been advanced to reflect the amount of data that was successfully written to
2488      *     the AudioTrack.
2489      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2490      *     that the number of bytes requested be a multiple of the frame size (sample size in
2491      *     bytes multiplied by the channel count).
2492      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2493      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
2494      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2495      *         to the audio sink.
2496      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2497      *     queuing as much audio data for playback as possible without blocking.
2498      * @param timestamp The timestamp of the first decodable audio frame in the provided audioData.
2499      * @return zero or the positive number of bytes that were written, or one of the following
2500      *    error codes.
2501      * <ul>
2502      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2503      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2504      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2505      *    needs to be recreated. The dead object error code is not returned if some data was
2506      *    successfully transferred. In this case, the error is returned at the next write()</li>
2507      * <li>{@link #ERROR} in case of other error</li>
2508      * </ul>
2509      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)2510     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2511             @WriteMode int writeMode, long timestamp) {
2512 
2513         if (mState == STATE_UNINITIALIZED) {
2514             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2515             return ERROR_INVALID_OPERATION;
2516         }
2517 
2518         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2519             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2520             return ERROR_BAD_VALUE;
2521         }
2522 
2523         if (mDataLoadMode != MODE_STREAM) {
2524             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
2525             return ERROR_INVALID_OPERATION;
2526         }
2527 
2528         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
2529             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
2530             return write(audioData, sizeInBytes, writeMode);
2531         }
2532 
2533         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2534             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2535             return ERROR_BAD_VALUE;
2536         }
2537 
2538         // create timestamp header if none exists
2539         if (mAvSyncHeader == null) {
2540             mAvSyncHeader = ByteBuffer.allocate(mOffset);
2541             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
2542             mAvSyncHeader.putInt(0x55550002);
2543         }
2544 
2545         if (mAvSyncBytesRemaining == 0) {
2546             mAvSyncHeader.putInt(4, sizeInBytes);
2547             mAvSyncHeader.putLong(8, timestamp);
2548             mAvSyncHeader.putInt(16, mOffset);
2549             mAvSyncHeader.position(0);
2550             mAvSyncBytesRemaining = sizeInBytes;
2551         }
2552 
2553         // write timestamp header if not completely written already
2554         int ret = 0;
2555         if (mAvSyncHeader.remaining() != 0) {
2556             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
2557             if (ret < 0) {
2558                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
2559                 mAvSyncHeader = null;
2560                 mAvSyncBytesRemaining = 0;
2561                 return ret;
2562             }
2563             if (mAvSyncHeader.remaining() > 0) {
2564                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
2565                 return 0;
2566             }
2567         }
2568 
2569         // write audio data
2570         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
2571         ret = write(audioData, sizeToWrite, writeMode);
2572         if (ret < 0) {
2573             Log.e(TAG, "AudioTrack.write() could not write audio data!");
2574             mAvSyncHeader = null;
2575             mAvSyncBytesRemaining = 0;
2576             return ret;
2577         }
2578 
2579         mAvSyncBytesRemaining -= ret;
2580 
2581         return ret;
2582     }
2583 
2584 
2585     /**
2586      * Sets the playback head position within the static buffer to zero,
2587      * that is it rewinds to start of static buffer.
2588      * The track must be stopped or paused, and
2589      * the track's creation mode must be {@link #MODE_STATIC}.
2590      * <p>
2591      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
2592      * {@link #getPlaybackHeadPosition()} to zero.
2593      * For earlier API levels, the reset behavior is unspecified.
2594      * <p>
2595      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
2596      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
2597      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2598      *  {@link #ERROR_INVALID_OPERATION}
2599      */
reloadStaticData()2600     public int reloadStaticData() {
2601         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
2602             return ERROR_INVALID_OPERATION;
2603         }
2604         return native_reload_static();
2605     }
2606 
2607     //--------------------------------------------------------------------------
2608     // Audio effects management
2609     //--------------------
2610 
2611     /**
2612      * Attaches an auxiliary effect to the audio track. A typical auxiliary
2613      * effect is a reverberation effect which can be applied on any sound source
2614      * that directs a certain amount of its energy to this effect. This amount
2615      * is defined by setAuxEffectSendLevel().
2616      * {@see #setAuxEffectSendLevel(float)}.
2617      * <p>After creating an auxiliary effect (e.g.
2618      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
2619      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
2620      * this method to attach the audio track to the effect.
2621      * <p>To detach the effect from the audio track, call this method with a
2622      * null effect id.
2623      *
2624      * @param effectId system wide unique id of the effect to attach
2625      * @return error code or success, see {@link #SUCCESS},
2626      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
2627      */
attachAuxEffect(int effectId)2628     public int attachAuxEffect(int effectId) {
2629         if (mState == STATE_UNINITIALIZED) {
2630             return ERROR_INVALID_OPERATION;
2631         }
2632         return native_attachAuxEffect(effectId);
2633     }
2634 
2635     /**
2636      * Sets the send level of the audio track to the attached auxiliary effect
2637      * {@link #attachAuxEffect(int)}.  Effect levels
2638      * are clamped to the closed interval [0.0, max] where
2639      * max is the value of {@link #getMaxVolume}.
2640      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
2641      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
2642      * this method must be called for the effect to be applied.
2643      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
2644      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
2645      * so an appropriate conversion from linear UI input x to level is:
2646      * x == 0 -&gt; level = 0
2647      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
2648      *
2649      * @param level linear send level
2650      * @return error code or success, see {@link #SUCCESS},
2651      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
2652      */
setAuxEffectSendLevel(float level)2653     public int setAuxEffectSendLevel(float level) {
2654         if (mState == STATE_UNINITIALIZED) {
2655             return ERROR_INVALID_OPERATION;
2656         }
2657         return baseSetAuxEffectSendLevel(level);
2658     }
2659 
2660     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)2661     int playerSetAuxEffectSendLevel(boolean muting, float level) {
2662         level = clampGainOrLevel(muting ? 0.0f : level);
2663         int err = native_setAuxEffectSendLevel(level);
2664         return err == 0 ? SUCCESS : ERROR;
2665     }
2666 
2667     //--------------------------------------------------------------------------
2668     // Explicit Routing
2669     //--------------------
2670     private AudioDeviceInfo mPreferredDevice = null;
2671 
2672     /**
2673      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
2674      * the output from this AudioTrack.
2675      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
2676      *  If deviceInfo is null, default routing is restored.
2677      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
2678      * does not correspond to a valid audio output device.
2679      */
2680     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)2681     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
2682         // Do some validation....
2683         if (deviceInfo != null && !deviceInfo.isSink()) {
2684             return false;
2685         }
2686         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
2687         boolean status = native_setOutputDevice(preferredDeviceId);
2688         if (status == true) {
2689             synchronized (this) {
2690                 mPreferredDevice = deviceInfo;
2691             }
2692         }
2693         return status;
2694     }
2695 
2696     /**
2697      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
2698      * is not guaranteed to correspond to the actual device being used for playback.
2699      */
2700     @Override
getPreferredDevice()2701     public AudioDeviceInfo getPreferredDevice() {
2702         synchronized (this) {
2703             return mPreferredDevice;
2704         }
2705     }
2706 
2707     /**
2708      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
2709      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
2710      * <code>getRoutedDevice()</code> will return null.
2711      */
2712     @Override
getRoutedDevice()2713     public AudioDeviceInfo getRoutedDevice() {
2714         int deviceId = native_getRoutedDeviceId();
2715         if (deviceId == 0) {
2716             return null;
2717         }
2718         AudioDeviceInfo[] devices =
2719                 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS);
2720         for (int i = 0; i < devices.length; i++) {
2721             if (devices[i].getId() == deviceId) {
2722                 return devices[i];
2723             }
2724         }
2725         return null;
2726     }
2727 
2728     /*
2729      * Call BEFORE adding a routing callback handler.
2730      */
testEnableNativeRoutingCallbacksLocked()2731     private void testEnableNativeRoutingCallbacksLocked() {
2732         if (mRoutingChangeListeners.size() == 0) {
2733             native_enableDeviceCallback();
2734         }
2735     }
2736 
2737     /*
2738      * Call AFTER removing a routing callback handler.
2739      */
testDisableNativeRoutingCallbacksLocked()2740     private void testDisableNativeRoutingCallbacksLocked() {
2741         if (mRoutingChangeListeners.size() == 0) {
2742             native_disableDeviceCallback();
2743         }
2744     }
2745 
2746     //--------------------------------------------------------------------------
2747     // (Re)Routing Info
2748     //--------------------
2749     /**
2750      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
2751      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
2752      * by an app to receive (re)routing notifications.
2753      */
2754     @GuardedBy("mRoutingChangeListeners")
2755     private ArrayMap<AudioRouting.OnRoutingChangedListener,
2756             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
2757 
2758    /**
2759     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
2760     * changes on this AudioTrack.
2761     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
2762     * notifications of rerouting events.
2763     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
2764     * the callback. If <code>null</code>, the {@link Handler} associated with the main
2765     * {@link Looper} will be used.
2766     */
2767     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)2768     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
2769             Handler handler) {
2770         synchronized (mRoutingChangeListeners) {
2771             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
2772                 testEnableNativeRoutingCallbacksLocked();
2773                 mRoutingChangeListeners.put(
2774                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
2775                                 handler != null ? handler : new Handler(mInitializationLooper)));
2776             }
2777         }
2778     }
2779 
2780     /**
2781      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
2782      * to receive rerouting notifications.
2783      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
2784      * to remove.
2785      */
2786     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)2787     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
2788         synchronized (mRoutingChangeListeners) {
2789             if (mRoutingChangeListeners.containsKey(listener)) {
2790                 mRoutingChangeListeners.remove(listener);
2791             }
2792             testDisableNativeRoutingCallbacksLocked();
2793         }
2794     }
2795 
2796     //--------------------------------------------------------------------------
2797     // (Re)Routing Info
2798     //--------------------
2799     /**
2800      * Defines the interface by which applications can receive notifications of
2801      * routing changes for the associated {@link AudioTrack}.
2802      *
2803      * @deprecated users should switch to the general purpose
2804      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
2805      */
2806     @Deprecated
2807     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
2808         /**
2809          * Called when the routing of an AudioTrack changes from either and
2810          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
2811          * retrieve the newly routed-to device.
2812          */
onRoutingChanged(AudioTrack audioTrack)2813         public void onRoutingChanged(AudioTrack audioTrack);
2814 
2815         @Override
onRoutingChanged(AudioRouting router)2816         default public void onRoutingChanged(AudioRouting router) {
2817             if (router instanceof AudioTrack) {
2818                 onRoutingChanged((AudioTrack) router);
2819             }
2820         }
2821     }
2822 
2823     /**
2824      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
2825      * on this AudioTrack.
2826      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
2827      * of rerouting events.
2828      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
2829      * the callback. If <code>null</code>, the {@link Handler} associated with the main
2830      * {@link Looper} will be used.
2831      * @deprecated users should switch to the general purpose
2832      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
2833      */
2834     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)2835     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
2836             android.os.Handler handler) {
2837         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
2838     }
2839 
2840     /**
2841      * Removes an {@link OnRoutingChangedListener} which has been previously added
2842      * to receive rerouting notifications.
2843      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
2844      * @deprecated users should switch to the general purpose
2845      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
2846      */
2847     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)2848     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
2849         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
2850     }
2851 
2852     /**
2853      * Sends device list change notification to all listeners.
2854      */
broadcastRoutingChange()2855     private void broadcastRoutingChange() {
2856         AudioManager.resetAudioPortGeneration();
2857         synchronized (mRoutingChangeListeners) {
2858             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
2859                 Handler handler = delegate.getHandler();
2860                 if (handler != null) {
2861                     handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE);
2862                 }
2863             }
2864         }
2865     }
2866 
2867     //---------------------------------------------------------
2868     // Interface definitions
2869     //--------------------
2870     /**
2871      * Interface definition for a callback to be invoked when the playback head position of
2872      * an AudioTrack has reached a notification marker or has increased by a certain period.
2873      */
2874     public interface OnPlaybackPositionUpdateListener  {
2875         /**
2876          * Called on the listener to notify it that the previously set marker has been reached
2877          * by the playback head.
2878          */
onMarkerReached(AudioTrack track)2879         void onMarkerReached(AudioTrack track);
2880 
2881         /**
2882          * Called on the listener to periodically notify it that the playback head has reached
2883          * a multiple of the notification period.
2884          */
onPeriodicNotification(AudioTrack track)2885         void onPeriodicNotification(AudioTrack track);
2886     }
2887 
2888     //---------------------------------------------------------
2889     // Inner classes
2890     //--------------------
2891     /**
2892      * Helper class to handle the forwarding of native events to the appropriate listener
2893      * (potentially) handled in a different thread
2894      */
2895     private class NativePositionEventHandlerDelegate {
2896         private final Handler mHandler;
2897 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)2898         NativePositionEventHandlerDelegate(final AudioTrack track,
2899                                    final OnPlaybackPositionUpdateListener listener,
2900                                    Handler handler) {
2901             // find the looper for our new event handler
2902             Looper looper;
2903             if (handler != null) {
2904                 looper = handler.getLooper();
2905             } else {
2906                 // no given handler, use the looper the AudioTrack was created in
2907                 looper = mInitializationLooper;
2908             }
2909 
2910             // construct the event handler with this looper
2911             if (looper != null) {
2912                 // implement the event handler delegate
2913                 mHandler = new Handler(looper) {
2914                     @Override
2915                     public void handleMessage(Message msg) {
2916                         if (track == null) {
2917                             return;
2918                         }
2919                         switch(msg.what) {
2920                         case NATIVE_EVENT_MARKER:
2921                             if (listener != null) {
2922                                 listener.onMarkerReached(track);
2923                             }
2924                             break;
2925                         case NATIVE_EVENT_NEW_POS:
2926                             if (listener != null) {
2927                                 listener.onPeriodicNotification(track);
2928                             }
2929                             break;
2930                         default:
2931                             loge("Unknown native event type: " + msg.what);
2932                             break;
2933                         }
2934                     }
2935                 };
2936             } else {
2937                 mHandler = null;
2938             }
2939         }
2940 
getHandler()2941         Handler getHandler() {
2942             return mHandler;
2943         }
2944     }
2945 
2946     /**
2947      * Helper class to handle the forwarding of native events to the appropriate listener
2948      * (potentially) handled in a different thread
2949      */
2950     private class NativeRoutingEventHandlerDelegate {
2951         private final Handler mHandler;
2952 
NativeRoutingEventHandlerDelegate(final AudioTrack track, final AudioRouting.OnRoutingChangedListener listener, Handler handler)2953         NativeRoutingEventHandlerDelegate(final AudioTrack track,
2954                                    final AudioRouting.OnRoutingChangedListener listener,
2955                                    Handler handler) {
2956             // find the looper for our new event handler
2957             Looper looper;
2958             if (handler != null) {
2959                 looper = handler.getLooper();
2960             } else {
2961                 // no given handler, use the looper the AudioTrack was created in
2962                 looper = mInitializationLooper;
2963             }
2964 
2965             // construct the event handler with this looper
2966             if (looper != null) {
2967                 // implement the event handler delegate
2968                 mHandler = new Handler(looper) {
2969                     @Override
2970                     public void handleMessage(Message msg) {
2971                         if (track == null) {
2972                             return;
2973                         }
2974                         switch(msg.what) {
2975                         case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE:
2976                             if (listener != null) {
2977                                 listener.onRoutingChanged(track);
2978                             }
2979                             break;
2980                         default:
2981                             loge("Unknown native event type: " + msg.what);
2982                             break;
2983                         }
2984                     }
2985                 };
2986             } else {
2987                 mHandler = null;
2988             }
2989         }
2990 
getHandler()2991         Handler getHandler() {
2992             return mHandler;
2993         }
2994     }
2995 
2996     //---------------------------------------------------------
2997     // Methods for IPlayer interface
2998     //--------------------
2999     @Override
playerStart()3000     void playerStart() {
3001         play();
3002     }
3003 
3004     @Override
playerPause()3005     void playerPause() {
3006         pause();
3007     }
3008 
3009     @Override
playerStop()3010     void playerStop() {
3011         stop();
3012     }
3013 
3014     //---------------------------------------------------------
3015     // Java methods called from the native side
3016     //--------------------
3017     @SuppressWarnings("unused")
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)3018     private static void postEventFromNative(Object audiotrack_ref,
3019             int what, int arg1, int arg2, Object obj) {
3020         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
3021         AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
3022         if (track == null) {
3023             return;
3024         }
3025 
3026         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
3027             track.broadcastRoutingChange();
3028             return;
3029         }
3030         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
3031         if (delegate != null) {
3032             Handler handler = delegate.getHandler();
3033             if (handler != null) {
3034                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
3035                 handler.sendMessage(m);
3036             }
3037         }
3038     }
3039 
3040 
3041     //---------------------------------------------------------
3042     // Native methods called from the Java side
3043     //--------------------
3044 
3045     // post-condition: mStreamType is overwritten with a value
3046     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
3047     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack)3048     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
3049             Object /*AudioAttributes*/ attributes,
3050             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
3051             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack);
3052 
native_finalize()3053     private native final void native_finalize();
3054 
3055     /**
3056      * @hide
3057      */
native_release()3058     public native final void native_release();
3059 
native_start()3060     private native final void native_start();
3061 
native_stop()3062     private native final void native_stop();
3063 
native_pause()3064     private native final void native_pause();
3065 
native_flush()3066     private native final void native_flush();
3067 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)3068     private native final int native_write_byte(byte[] audioData,
3069                                                int offsetInBytes, int sizeInBytes, int format,
3070                                                boolean isBlocking);
3071 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)3072     private native final int native_write_short(short[] audioData,
3073                                                 int offsetInShorts, int sizeInShorts, int format,
3074                                                 boolean isBlocking);
3075 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)3076     private native final int native_write_float(float[] audioData,
3077                                                 int offsetInFloats, int sizeInFloats, int format,
3078                                                 boolean isBlocking);
3079 
native_write_native_bytes(Object audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)3080     private native final int native_write_native_bytes(Object audioData,
3081             int positionInBytes, int sizeInBytes, int format, boolean blocking);
3082 
native_reload_static()3083     private native final int native_reload_static();
3084 
native_get_buffer_size_frames()3085     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)3086     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()3087     private native final int native_get_buffer_capacity_frames();
3088 
native_setVolume(float leftVolume, float rightVolume)3089     private native final void native_setVolume(float leftVolume, float rightVolume);
3090 
native_set_playback_rate(int sampleRateInHz)3091     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()3092     private native final int native_get_playback_rate();
3093 
native_set_playback_params(@onNull PlaybackParams params)3094     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()3095     private native final @NonNull PlaybackParams native_get_playback_params();
3096 
native_set_marker_pos(int marker)3097     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()3098     private native final int native_get_marker_pos();
3099 
native_set_pos_update_period(int updatePeriod)3100     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()3101     private native final int native_get_pos_update_period();
3102 
native_set_position(int position)3103     private native final int native_set_position(int position);
native_get_position()3104     private native final int native_get_position();
3105 
native_get_latency()3106     private native final int native_get_latency();
3107 
native_get_underrun_count()3108     private native final int native_get_underrun_count();
3109 
native_get_flags()3110     private native final int native_get_flags();
3111 
3112     // longArray must be a non-null array of length >= 2
3113     // [0] is assigned the frame position
3114     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)3115     private native final int native_get_timestamp(long[] longArray);
3116 
native_set_loop(int start, int end, int loopCount)3117     private native final int native_set_loop(int start, int end, int loopCount);
3118 
native_get_output_sample_rate(int streamType)3119     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)3120     static private native final int native_get_min_buff_size(
3121             int sampleRateInHz, int channelConfig, int audioFormat);
3122 
native_attachAuxEffect(int effectId)3123     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)3124     private native final int native_setAuxEffectSendLevel(float level);
3125 
native_setOutputDevice(int deviceId)3126     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()3127     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()3128     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()3129     private native final void native_disableDeviceCallback();
native_get_FCC_8()3130     static private native int native_get_FCC_8();
3131 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)3132     private native int native_applyVolumeShaper(
3133             @NonNull VolumeShaper.Configuration configuration,
3134             @NonNull VolumeShaper.Operation operation);
3135 
native_getVolumeShaperState(int id)3136     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
3137 
3138     //---------------------------------------------------------
3139     // Utility methods
3140     //------------------
3141 
logd(String msg)3142     private static void logd(String msg) {
3143         Log.d(TAG, msg);
3144     }
3145 
loge(String msg)3146     private static void loge(String msg) {
3147         Log.e(TAG, msg);
3148     }
3149 }
3150