• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import android.annotation.CallbackExecutor;
20 import android.annotation.FloatRange;
21 import android.annotation.IntDef;
22 import android.annotation.IntRange;
23 import android.annotation.NonNull;
24 import android.annotation.Nullable;
25 import android.annotation.RequiresPermission;
26 import android.annotation.SystemApi;
27 import android.annotation.TestApi;
28 import android.compat.annotation.UnsupportedAppUsage;
29 import android.media.metrics.LogSessionId;
30 import android.os.Binder;
31 import android.os.Build;
32 import android.os.Handler;
33 import android.os.HandlerThread;
34 import android.os.Looper;
35 import android.os.Message;
36 import android.os.PersistableBundle;
37 import android.util.ArrayMap;
38 import android.util.Log;
39 
40 import com.android.internal.annotations.GuardedBy;
41 
42 import java.lang.annotation.Retention;
43 import java.lang.annotation.RetentionPolicy;
44 import java.lang.ref.WeakReference;
45 import java.nio.ByteBuffer;
46 import java.nio.ByteOrder;
47 import java.nio.NioUtils;
48 import java.util.HashMap;
49 import java.util.LinkedList;
50 import java.util.Objects;
51 import java.util.concurrent.Executor;
52 
53 /**
54  * The AudioTrack class manages and plays a single audio resource for Java applications.
55  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
56  * achieved by "pushing" the data to the AudioTrack object using one of the
57  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
58  *  and {@link #write(float[], int, int, int)} methods.
59  *
60  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
61  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
62  * one of the {@code write()} methods. These are blocking and return when the data has been
63  * transferred from the Java layer to the native layer and queued for playback. The streaming
64  * mode is most useful when playing blocks of audio data that for instance are:
65  *
66  * <ul>
67  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
68  *   <li>too big to fit in memory because of the characteristics of the audio data
69  *         (high sampling rate, bits per sample ...)</li>
70  *   <li>received or generated while previously queued audio is playing.</li>
71  * </ul>
72  *
73  * The static mode should be chosen when dealing with short sounds that fit in memory and
74  * that need to be played with the smallest latency possible. The static mode will
75  * therefore be preferred for UI and game sounds that are played often, and with the
76  * smallest overhead possible.
77  *
78  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
79  * The size of this buffer, specified during the construction, determines how long an AudioTrack
80  * can play before running out of data.<br>
81  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
82  * be played from it.<br>
83  * For the streaming mode, data will be written to the audio sink in chunks of
84  * sizes less than or equal to the total buffer size.
85  *
86  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
87  */
88 public class AudioTrack extends PlayerBase
89                         implements AudioRouting
90                                  , VolumeAutomation
91 {
92     //---------------------------------------------------------
93     // Constants
94     //--------------------
95     /** Minimum value for a linear gain or auxiliary effect level.
96      *  This value must be exactly equal to 0.0f; do not change it.
97      */
98     private static final float GAIN_MIN = 0.0f;
99     /** Maximum value for a linear gain or auxiliary effect level.
100      *  This value must be greater than or equal to 1.0f.
101      */
102     private static final float GAIN_MAX = 1.0f;
103 
104     /** indicates AudioTrack state is stopped */
105     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
106     /** indicates AudioTrack state is paused */
107     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
108     /** indicates AudioTrack state is playing */
109     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
110     /**
111       * @hide
112       * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to
113       * transition to PLAYSTATE_STOPPED.
114       * Only valid for offload mode.
115       */
116     private static final int PLAYSTATE_STOPPING = 4;
117     /**
118       * @hide
119       * indicates AudioTrack state is paused from stopping state. Will transition to
120       * PLAYSTATE_STOPPING if play() is called.
121       * Only valid for offload mode.
122       */
123     private static final int PLAYSTATE_PAUSED_STOPPING = 5;
124 
125     // keep these values in sync with android_media_AudioTrack.cpp
126     /**
127      * Creation mode where audio data is transferred from Java to the native layer
128      * only once before the audio starts playing.
129      */
130     public static final int MODE_STATIC = 0;
131     /**
132      * Creation mode where audio data is streamed from Java to the native layer
133      * as the audio is playing.
134      */
135     public static final int MODE_STREAM = 1;
136 
137     /** @hide */
138     @IntDef({
139         MODE_STATIC,
140         MODE_STREAM
141     })
142     @Retention(RetentionPolicy.SOURCE)
143     public @interface TransferMode {}
144 
145     /**
146      * State of an AudioTrack that was not successfully initialized upon creation.
147      */
148     public static final int STATE_UNINITIALIZED = 0;
149     /**
150      * State of an AudioTrack that is ready to be used.
151      */
152     public static final int STATE_INITIALIZED   = 1;
153     /**
154      * State of a successfully initialized AudioTrack that uses static data,
155      * but that hasn't received that data yet.
156      */
157     public static final int STATE_NO_STATIC_DATA = 2;
158 
159     /**
160      * Denotes a successful operation.
161      */
162     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
163     /**
164      * Denotes a generic operation failure.
165      */
166     public  static final int ERROR                                 = AudioSystem.ERROR;
167     /**
168      * Denotes a failure due to the use of an invalid value.
169      */
170     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
171     /**
172      * Denotes a failure due to the improper use of a method.
173      */
174     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
175     /**
176      * An error code indicating that the object reporting it is no longer valid and needs to
177      * be recreated.
178      */
179     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
180     /**
181      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
182      * or immediately after start/ACTIVE.
183      * @hide
184      */
185     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
186 
187     // Error codes:
188     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
189     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
190     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
191     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
192     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
193     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
194 
195     // Events:
196     // to keep in sync with frameworks/av/include/media/AudioTrack.h
197     // Note: To avoid collisions with other event constants,
198     // do not define an event here that is the same value as
199     // AudioSystem.NATIVE_EVENT_ROUTING_CHANGE.
200 
201     /**
202      * Event id denotes when playback head has reached a previously set marker.
203      */
204     private static final int NATIVE_EVENT_MARKER  = 3;
205     /**
206      * Event id denotes when previously set update period has elapsed during playback.
207      */
208     private static final int NATIVE_EVENT_NEW_POS = 4;
209     /**
210      * Callback for more data
211      */
212     private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9;
213     /**
214      * IAudioTrack tear down for offloaded tracks
215      * TODO: when received, java AudioTrack must be released
216      */
217     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
218     /**
219      * Event id denotes when all the buffers queued in AF and HW are played
220      * back (after stop is called) for an offloaded track.
221      */
222     private static final int NATIVE_EVENT_STREAM_END = 7;
223     /**
224      * Event id denotes when the codec format changes.
225      *
226      * Note: Similar to a device routing change (AudioSystem.NATIVE_EVENT_ROUTING_CHANGE),
227      * this event comes from the AudioFlinger Thread / Output Stream management
228      * (not from buffer indications as above).
229      */
230     private static final int NATIVE_EVENT_CODEC_FORMAT_CHANGE = 100;
231 
232     private final static String TAG = "android.media.AudioTrack";
233 
234     /** @hide */
235     @IntDef({
236         ENCAPSULATION_MODE_NONE,
237         ENCAPSULATION_MODE_ELEMENTARY_STREAM,
238         // ENCAPSULATION_MODE_HANDLE, @SystemApi
239     })
240     @Retention(RetentionPolicy.SOURCE)
241     public @interface EncapsulationMode {}
242 
243     // Important: The ENCAPSULATION_MODE values must be kept in sync with native header files.
244     /**
245      * This mode indicates no metadata encapsulation,
246      * which is the default mode for sending audio data
247      * through {@code AudioTrack}.
248      */
249     public static final int ENCAPSULATION_MODE_NONE = 0;
250     /**
251      * This mode indicates metadata encapsulation with an elementary stream payload.
252      * Both compressed and PCM format is allowed.
253      */
254     public static final int ENCAPSULATION_MODE_ELEMENTARY_STREAM = 1;
255     /**
256      * This mode indicates metadata encapsulation with a handle payload
257      * and is set through {@link Builder#setEncapsulationMode(int)}.
258      * The handle is a 64 bit long, provided by the Tuner API
259      * in {@link android.os.Build.VERSION_CODES#R}.
260      * @hide
261      */
262     @SystemApi
263     @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
264     public static final int ENCAPSULATION_MODE_HANDLE = 2;
265 
266     /* Enumeration of metadata types permitted for use by
267      * encapsulation mode audio streams.
268      */
269     /** @hide */
270     @IntDef(prefix = { "ENCAPSULATION_METADATA_TYPE_" }, value = {
271         ENCAPSULATION_METADATA_TYPE_NONE, /* reserved */
272         ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER,
273         ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR,
274     })
275     @Retention(RetentionPolicy.SOURCE)
276     public @interface EncapsulationMetadataType {}
277 
278     /**
279      * Reserved do not use.
280      * @hide
281      */
282     public static final int ENCAPSULATION_METADATA_TYPE_NONE = 0; // reserved
283 
284     /**
285      * Encapsulation metadata type for framework tuner information.
286      *
287      * Refer to the Android Media TV Tuner API for details.
288      */
289     public static final int ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER = 1;
290 
291     /**
292      * Encapsulation metadata type for DVB AD descriptor.
293      *
294      * This metadata is formatted per ETSI TS 101 154 Table E.1: AD_descriptor.
295      */
296     public static final int ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR = 2;
297 
298     /* Dual Mono handling is used when a stereo audio stream
299      * contains separate audio content on the left and right channels.
300      * Such information about the content of the stream may be found, for example, in
301      * ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
302      */
303     /** @hide */
304     @IntDef({
305         DUAL_MONO_MODE_OFF,
306         DUAL_MONO_MODE_LR,
307         DUAL_MONO_MODE_LL,
308         DUAL_MONO_MODE_RR,
309     })
310     @Retention(RetentionPolicy.SOURCE)
311     public @interface DualMonoMode {}
312     // Important: The DUAL_MONO_MODE values must be kept in sync with native header files.
313     /**
314      * This mode disables any Dual Mono presentation effect.
315      *
316      */
317     public static final int DUAL_MONO_MODE_OFF = 0;
318 
319     /**
320      * This mode indicates that a stereo stream should be presented
321      * with the left and right audio channels blended together
322      * and delivered to both channels.
323      *
324      * Behavior for non-stereo streams is implementation defined.
325      * A suggested guideline is that the left-right stereo symmetric
326      * channels are pairwise blended;
327      * the other channels such as center are left alone.
328      *
329      * The Dual Mono effect occurs before volume scaling.
330      */
331     public static final int DUAL_MONO_MODE_LR = 1;
332 
333     /**
334      * This mode indicates that a stereo stream should be presented
335      * with the left audio channel replicated into the right audio channel.
336      *
337      * Behavior for non-stereo streams is implementation defined.
338      * A suggested guideline is that all channels with left-right
339      * stereo symmetry will have the left channel position replicated
340      * into the right channel position.
341      * The center channels (with no left/right symmetry) or unbalanced
342      * channels are left alone.
343      *
344      * The Dual Mono effect occurs before volume scaling.
345      */
346     public static final int DUAL_MONO_MODE_LL = 2;
347 
348     /**
349      * This mode indicates that a stereo stream should be presented
350      * with the right audio channel replicated into the left audio channel.
351      *
352      * Behavior for non-stereo streams is implementation defined.
353      * A suggested guideline is that all channels with left-right
354      * stereo symmetry will have the right channel position replicated
355      * into the left channel position.
356      * The center channels (with no left/right symmetry) or unbalanced
357      * channels are left alone.
358      *
359      * The Dual Mono effect occurs before volume scaling.
360      */
361     public static final int DUAL_MONO_MODE_RR = 3;
362 
363     /** @hide */
364     @IntDef({
365         WRITE_BLOCKING,
366         WRITE_NON_BLOCKING
367     })
368     @Retention(RetentionPolicy.SOURCE)
369     public @interface WriteMode {}
370 
371     /**
372      * The write mode indicating the write operation will block until all data has been written,
373      * to be used as the actual value of the writeMode parameter in
374      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
375      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
376      * {@link #write(ByteBuffer, int, int, long)}.
377      */
378     public final static int WRITE_BLOCKING = 0;
379 
380     /**
381      * The write mode indicating the write operation will return immediately after
382      * queuing as much audio data for playback as possible without blocking,
383      * to be used as the actual value of the writeMode parameter in
384      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
385      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
386      * {@link #write(ByteBuffer, int, int, long)}.
387      */
388     public final static int WRITE_NON_BLOCKING = 1;
389 
390     /** @hide */
391     @IntDef({
392         PERFORMANCE_MODE_NONE,
393         PERFORMANCE_MODE_LOW_LATENCY,
394         PERFORMANCE_MODE_POWER_SAVING
395     })
396     @Retention(RetentionPolicy.SOURCE)
397     public @interface PerformanceMode {}
398 
399     /**
400      * Default performance mode for an {@link AudioTrack}.
401      */
402     public static final int PERFORMANCE_MODE_NONE = 0;
403 
404     /**
405      * Low latency performance mode for an {@link AudioTrack}.
406      * If the device supports it, this mode
407      * enables a lower latency path through to the audio output sink.
408      * Effects may no longer work with such an {@code AudioTrack} and
409      * the sample rate must match that of the output sink.
410      * <p>
411      * Applications should be aware that low latency requires careful
412      * buffer management, with smaller chunks of audio data written by each
413      * {@code write()} call.
414      * <p>
415      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
416      * {@code AudioTrack}'s actual buffer size may be too small.
417      * It is recommended that a fairly
418      * large buffer should be specified when the {@code AudioTrack} is created.
419      * Then the actual size can be reduced by calling
420      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
421      * by lowering it after each {@code write()} call until the audio glitches,
422      * which is detected by calling
423      * {@link #getUnderrunCount()}. Then the buffer size can be increased
424      * until there are no glitches.
425      * This tuning step should be done while playing silence.
426      * This technique provides a compromise between latency and glitch rate.
427      */
428     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
429 
430     /**
431      * Power saving performance mode for an {@link AudioTrack}.
432      * If the device supports it, this
433      * mode will enable a lower power path to the audio output sink.
434      * In addition, this lower power path typically will have
435      * deeper internal buffers and better underrun resistance,
436      * with a tradeoff of higher latency.
437      * <p>
438      * In this mode, applications should attempt to use a larger buffer size
439      * and deliver larger chunks of audio data per {@code write()} call.
440      * Use {@link #getBufferSizeInFrames()} to determine
441      * the actual buffer size of the {@code AudioTrack} as it may have increased
442      * to accommodate a deeper buffer.
443      */
444     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
445 
446     // keep in sync with system/media/audio/include/system/audio-base.h
447     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
448     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
449 
450     // Size of HW_AV_SYNC track AV header.
451     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
452 
453     //--------------------------------------------------------------------------
454     // Member variables
455     //--------------------
456     /**
457      * Indicates the state of the AudioTrack instance.
458      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
459      */
460     private int mState = STATE_UNINITIALIZED;
461     /**
462      * Indicates the play state of the AudioTrack instance.
463      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
464      */
465     private int mPlayState = PLAYSTATE_STOPPED;
466 
467     /**
468      * Indicates that we are expecting an end of stream callback following a call
469      * to setOffloadEndOfStream() in a gapless track transition context. The native track
470      * will be restarted automatically.
471      */
472     private boolean mOffloadEosPending = false;
473 
474     /**
475      * Lock to ensure mPlayState updates reflect the actual state of the object.
476      */
477     private final Object mPlayStateLock = new Object();
478     /**
479      * Sizes of the audio buffer.
480      * These values are set during construction and can be stale.
481      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
482      */
483     private int mNativeBufferSizeInBytes = 0;
484     private int mNativeBufferSizeInFrames = 0;
485     /**
486      * Handler for events coming from the native code.
487      */
488     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
489     /**
490      * Looper associated with the thread that creates the AudioTrack instance.
491      */
492     private final Looper mInitializationLooper;
493     /**
494      * The audio data source sampling rate in Hz.
495      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
496      */
497     private int mSampleRate; // initialized by all constructors via audioParamCheck()
498     /**
499      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
500      */
501     private int mChannelCount = 1;
502     /**
503      * The audio channel mask used for calling native AudioTrack
504      */
505     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
506 
507     /**
508      * The type of the audio stream to play. See
509      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
510      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
511      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
512      *   {@link AudioManager#STREAM_DTMF}.
513      */
514     @UnsupportedAppUsage
515     private int mStreamType = AudioManager.STREAM_MUSIC;
516 
517     /**
518      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
519      */
520     private int mDataLoadMode = MODE_STREAM;
521     /**
522      * The current channel position mask, as specified on AudioTrack creation.
523      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
524      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
525      */
526     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
527     /**
528      * The channel index mask if specified, otherwise 0.
529      */
530     private int mChannelIndexMask = 0;
531     /**
532      * The encoding of the audio samples.
533      * @see AudioFormat#ENCODING_PCM_8BIT
534      * @see AudioFormat#ENCODING_PCM_16BIT
535      * @see AudioFormat#ENCODING_PCM_FLOAT
536      */
537     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
538     /**
539      * The AudioAttributes used in configuration.
540      */
541     private AudioAttributes mConfiguredAudioAttributes;
542     /**
543      * Audio session ID
544      */
545     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
546     /**
547      * HW_AV_SYNC track AV Sync Header
548      */
549     private ByteBuffer mAvSyncHeader = null;
550     /**
551      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
552      */
553     private int mAvSyncBytesRemaining = 0;
554     /**
555      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
556      */
557     private int mOffset = 0;
558     /**
559      * Indicates whether the track is intended to play in offload mode.
560      */
561     private boolean mOffloaded = false;
562     /**
563      * When offloaded track: delay for decoder in frames
564      */
565     private int mOffloadDelayFrames = 0;
566     /**
567      * When offloaded track: padding for decoder in frames
568      */
569     private int mOffloadPaddingFrames = 0;
570 
571     /**
572      * The log session id used for metrics.
573      * {@link LogSessionId#LOG_SESSION_ID_NONE} here means it is not set.
574      */
575     @NonNull private LogSessionId mLogSessionId = LogSessionId.LOG_SESSION_ID_NONE;
576 
577     //--------------------------------
578     // Used exclusively by native code
579     //--------------------
580     /**
581      * @hide
582      * Accessed by native methods: provides access to C++ AudioTrack object.
583      */
584     @SuppressWarnings("unused")
585     @UnsupportedAppUsage
586     protected long mNativeTrackInJavaObj;
587     /**
588      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
589      * the native AudioTrack object, but not stored in it).
590      */
591     @SuppressWarnings("unused")
592     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
593     private long mJniData;
594 
595 
596     //--------------------------------------------------------------------------
597     // Constructor, Finalize
598     //--------------------
599     /**
600      * Class constructor.
601      * @param streamType the type of the audio stream. See
602      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
603      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
604      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
605      * @param sampleRateInHz the initial source sample rate expressed in Hz.
606      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
607      *   which is usually the sample rate of the sink.
608      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
609      * @param channelConfig describes the configuration of the audio channels.
610      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
611      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
612      * @param audioFormat the format in which the audio data is represented.
613      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
614      *   {@link AudioFormat#ENCODING_PCM_8BIT},
615      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
616      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
617      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
618      *   <p> If the track's creation mode is {@link #MODE_STATIC},
619      *   this is the maximum length sample, or audio clip, that can be played by this instance.
620      *   <p> If the track's creation mode is {@link #MODE_STREAM},
621      *   this should be the desired buffer size
622      *   for the <code>AudioTrack</code> to satisfy the application's
623      *   latency requirements.
624      *   If <code>bufferSizeInBytes</code> is less than the
625      *   minimum buffer size for the output sink, it is increased to the minimum
626      *   buffer size.
627      *   The method {@link #getBufferSizeInFrames()} returns the
628      *   actual size in frames of the buffer created, which
629      *   determines the minimum frequency to write
630      *   to the streaming <code>AudioTrack</code> to avoid underrun.
631      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
632      *   for an AudioTrack instance in streaming mode.
633      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
634      * @throws java.lang.IllegalArgumentException
635      * @deprecated use {@link Builder} or
636      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
637      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
638      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)639     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
640             int bufferSizeInBytes, int mode)
641     throws IllegalArgumentException {
642         this(streamType, sampleRateInHz, channelConfig, audioFormat,
643                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
644     }
645 
646     /**
647      * Class constructor with audio session. Use this constructor when the AudioTrack must be
648      * attached to a particular audio session. The primary use of the audio session ID is to
649      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
650      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
651      * and media players in the same session and not to the output mix.
652      * When an AudioTrack is created without specifying a session, it will create its own session
653      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
654      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
655      * session
656      * with all other media players or audio tracks in the same session, otherwise a new session
657      * will be created for this track if none is supplied.
658      * @param streamType the type of the audio stream. See
659      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
660      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
661      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
662      * @param sampleRateInHz the initial source sample rate expressed in Hz.
663      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
664      *   which is usually the sample rate of the sink.
665      * @param channelConfig describes the configuration of the audio channels.
666      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
667      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
668      * @param audioFormat the format in which the audio data is represented.
669      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
670      *   {@link AudioFormat#ENCODING_PCM_8BIT},
671      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
672      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
673      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
674      *   <p> If the track's creation mode is {@link #MODE_STATIC},
675      *   this is the maximum length sample, or audio clip, that can be played by this instance.
676      *   <p> If the track's creation mode is {@link #MODE_STREAM},
677      *   this should be the desired buffer size
678      *   for the <code>AudioTrack</code> to satisfy the application's
679      *   latency requirements.
680      *   If <code>bufferSizeInBytes</code> is less than the
681      *   minimum buffer size for the output sink, it is increased to the minimum
682      *   buffer size.
683      *   The method {@link #getBufferSizeInFrames()} returns the
684      *   actual size in frames of the buffer created, which
685      *   determines the minimum frequency to write
686      *   to the streaming <code>AudioTrack</code> to avoid underrun.
687      *   You can write data into this buffer in smaller chunks than this size.
688      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
689      *   for an AudioTrack instance in streaming mode.
690      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
691      * @param sessionId Id of audio session the AudioTrack must be attached to
692      * @throws java.lang.IllegalArgumentException
693      * @deprecated use {@link Builder} or
694      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
695      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
696      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)697     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
698             int bufferSizeInBytes, int mode, int sessionId)
699     throws IllegalArgumentException {
700         // mState already == STATE_UNINITIALIZED
701         this((new AudioAttributes.Builder())
702                     .setLegacyStreamType(streamType)
703                     .build(),
704                 (new AudioFormat.Builder())
705                     .setChannelMask(channelConfig)
706                     .setEncoding(audioFormat)
707                     .setSampleRate(sampleRateInHz)
708                     .build(),
709                 bufferSizeInBytes,
710                 mode, sessionId);
711         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
712     }
713 
714     /**
715      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
716      * @param attributes a non-null {@link AudioAttributes} instance.
717      * @param format a non-null {@link AudioFormat} instance describing the format of the data
718      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
719      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
720      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
721      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
722      *   <p> If the track's creation mode is {@link #MODE_STATIC},
723      *   this is the maximum length sample, or audio clip, that can be played by this instance.
724      *   <p> If the track's creation mode is {@link #MODE_STREAM},
725      *   this should be the desired buffer size
726      *   for the <code>AudioTrack</code> to satisfy the application's
727      *   latency requirements.
728      *   If <code>bufferSizeInBytes</code> is less than the
729      *   minimum buffer size for the output sink, it is increased to the minimum
730      *   buffer size.
731      *   The method {@link #getBufferSizeInFrames()} returns the
732      *   actual size in frames of the buffer created, which
733      *   determines the minimum frequency to write
734      *   to the streaming <code>AudioTrack</code> to avoid underrun.
735      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
736      *   for an AudioTrack instance in streaming mode.
737      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
738      * @param sessionId ID of audio session the AudioTrack must be attached to, or
739      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
740      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
741      *   construction.
742      * @throws IllegalArgumentException
743      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)744     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
745             int mode, int sessionId)
746                     throws IllegalArgumentException {
747         this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/,
748                 ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
749     }
750 
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, @Nullable TunerConfiguration tunerConfiguration)751     private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
752             int mode, int sessionId, boolean offload, int encapsulationMode,
753             @Nullable TunerConfiguration tunerConfiguration)
754                     throws IllegalArgumentException {
755         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
756         // mState already == STATE_UNINITIALIZED
757 
758         mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
759 
760         if (format == null) {
761             throw new IllegalArgumentException("Illegal null AudioFormat");
762         }
763 
764         // Check if we should enable deep buffer mode
765         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
766             mAttributes = new AudioAttributes.Builder(mAttributes)
767                 .replaceFlags((mAttributes.getAllFlags()
768                         | AudioAttributes.FLAG_DEEP_BUFFER)
769                         & ~AudioAttributes.FLAG_LOW_LATENCY)
770                 .build();
771         }
772 
773         // remember which looper is associated with the AudioTrack instantiation
774         Looper looper;
775         if ((looper = Looper.myLooper()) == null) {
776             looper = Looper.getMainLooper();
777         }
778 
779         int rate = format.getSampleRate();
780         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
781             rate = 0;
782         }
783 
784         int channelIndexMask = 0;
785         if ((format.getPropertySetMask()
786                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
787             channelIndexMask = format.getChannelIndexMask();
788         }
789         int channelMask = 0;
790         if ((format.getPropertySetMask()
791                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
792             channelMask = format.getChannelMask();
793         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
794             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
795                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
796         }
797         int encoding = AudioFormat.ENCODING_DEFAULT;
798         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
799             encoding = format.getEncoding();
800         }
801         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
802         mOffloaded = offload;
803         mStreamType = AudioSystem.STREAM_DEFAULT;
804 
805         audioBuffSizeCheck(bufferSizeInBytes);
806 
807         mInitializationLooper = looper;
808 
809         if (sessionId < 0) {
810             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
811         }
812 
813         int[] sampleRate = new int[] {mSampleRate};
814         int[] session = new int[1];
815         session[0] = sessionId;
816         // native initialization
817         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
818                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
819                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
820                 offload, encapsulationMode, tunerConfiguration,
821                 getCurrentOpPackageName());
822         if (initResult != SUCCESS) {
823             loge("Error code "+initResult+" when initializing AudioTrack.");
824             return; // with mState == STATE_UNINITIALIZED
825         }
826 
827         mSampleRate = sampleRate[0];
828         mSessionId = session[0];
829 
830         // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
831 
832         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
833             int frameSizeInBytes;
834             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
835                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
836             } else {
837                 frameSizeInBytes = 1;
838             }
839             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
840         }
841 
842         if (mDataLoadMode == MODE_STATIC) {
843             mState = STATE_NO_STATIC_DATA;
844         } else {
845             mState = STATE_INITIALIZED;
846         }
847 
848         baseRegisterPlayer(mSessionId);
849         native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
850     }
851 
852     /**
853      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
854      * the AudioTrackRoutingProxy subclass.
855      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
856      * (associated with an OpenSL ES player).
857      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
858      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
859      * it means that the OpenSL player interface hasn't been realized, so there is no native
860      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
861      * OpenSLES interface is realized.
862      */
AudioTrack(long nativeTrackInJavaObj)863     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
864         super(new AudioAttributes.Builder().build(),
865                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
866         // "final"s
867         mNativeTrackInJavaObj = 0;
868         mJniData = 0;
869 
870         // remember which looper is associated with the AudioTrack instantiation
871         Looper looper;
872         if ((looper = Looper.myLooper()) == null) {
873             looper = Looper.getMainLooper();
874         }
875         mInitializationLooper = looper;
876 
877         // other initialization...
878         if (nativeTrackInJavaObj != 0) {
879             baseRegisterPlayer(AudioSystem.AUDIO_SESSION_ALLOCATE);
880             deferred_connect(nativeTrackInJavaObj);
881         } else {
882             mState = STATE_UNINITIALIZED;
883         }
884     }
885 
886     /**
887      * @hide
888      */
889     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
deferred_connect(long nativeTrackInJavaObj)890     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
891         if (mState != STATE_INITIALIZED) {
892             // Note that for this native_setup, we are providing an already created/initialized
893             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
894             int[] session = { 0 };
895             int[] rates = { 0 };
896             int initResult = native_setup(new WeakReference<AudioTrack>(this),
897                     null /*mAttributes - NA*/,
898                     rates /*sampleRate - NA*/,
899                     0 /*mChannelMask - NA*/,
900                     0 /*mChannelIndexMask - NA*/,
901                     0 /*mAudioFormat - NA*/,
902                     0 /*mNativeBufferSizeInBytes - NA*/,
903                     0 /*mDataLoadMode - NA*/,
904                     session,
905                     nativeTrackInJavaObj,
906                     false /*offload*/,
907                     ENCAPSULATION_MODE_NONE,
908                     null /* tunerConfiguration */,
909                     "" /* opPackagename */);
910             if (initResult != SUCCESS) {
911                 loge("Error code "+initResult+" when initializing AudioTrack.");
912                 return; // with mState == STATE_UNINITIALIZED
913             }
914 
915             mSessionId = session[0];
916 
917             mState = STATE_INITIALIZED;
918         }
919     }
920 
921     /**
922      * TunerConfiguration is used to convey tuner information
923      * from the android.media.tv.Tuner API to AudioTrack construction.
924      *
925      * Use the Builder to construct the TunerConfiguration object,
926      * which is then used by the {@link AudioTrack.Builder} to create an AudioTrack.
927      * @hide
928      */
929     @SystemApi
930     public static class TunerConfiguration {
931         private final int mContentId;
932         private final int mSyncId;
933 
934         /**
935          * A special content id for {@link #TunerConfiguration(int, int)}
936          * indicating audio is delivered
937          * from an {@code AudioTrack} write, not tunneled from the tuner stack.
938          */
939         public static final int CONTENT_ID_NONE = 0;
940 
941         /**
942          * Constructs a TunerConfiguration instance for use in {@link AudioTrack.Builder}
943          *
944          * @param contentId selects the audio stream to use.
945          *     The contentId may be obtained from
946          *     {@link android.media.tv.tuner.filter.Filter#getId()},
947          *     such obtained id is always a positive number.
948          *     If audio is to be delivered through an {@code AudioTrack} write
949          *     then {@code CONTENT_ID_NONE} may be used.
950          * @param syncId selects the clock to use for synchronization
951          *     of audio with other streams such as video.
952          *     The syncId may be obtained from
953          *     {@link android.media.tv.tuner.Tuner#getAvSyncHwId()}.
954          *     This is always a positive number.
955          */
956         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
TunerConfiguration( @ntRangefrom = 0) int contentId, @IntRange(from = 1)int syncId)957         public TunerConfiguration(
958                 @IntRange(from = 0) int contentId, @IntRange(from = 1)int syncId) {
959             if (contentId < 0) {
960                 throw new IllegalArgumentException(
961                         "contentId " + contentId + " must be positive or CONTENT_ID_NONE");
962             }
963             if (syncId < 1) {
964                 throw new IllegalArgumentException("syncId " + syncId + " must be positive");
965             }
966             mContentId = contentId;
967             mSyncId = syncId;
968         }
969 
970         /**
971          * Returns the contentId.
972          */
973         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getContentId()974         public @IntRange(from = 1) int getContentId() {
975             return mContentId; // The Builder ensures this is > 0.
976         }
977 
978         /**
979          * Returns the syncId.
980          */
981         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getSyncId()982         public @IntRange(from = 1) int getSyncId() {
983             return mSyncId;  // The Builder ensures this is > 0.
984         }
985     }
986 
987     /**
988      * Builder class for {@link AudioTrack} objects.
989      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
990      * attributes and audio format parameters, you indicate which of those vary from the default
991      * behavior on the device.
992      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
993      * parameters, to be used by a new <code>AudioTrack</code> instance:
994      *
995      * <pre class="prettyprint">
996      * AudioTrack player = new AudioTrack.Builder()
997      *         .setAudioAttributes(new AudioAttributes.Builder()
998      *                  .setUsage(AudioAttributes.USAGE_ALARM)
999      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
1000      *                  .build())
1001      *         .setAudioFormat(new AudioFormat.Builder()
1002      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
1003      *                 .setSampleRate(44100)
1004      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1005      *                 .build())
1006      *         .setBufferSizeInBytes(minBuffSize)
1007      *         .build();
1008      * </pre>
1009      * <p>
1010      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
1011      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
1012      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
1013      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
1014      * {@link AudioFormat#ENCODING_PCM_16BIT}.
1015      * The sample rate will depend on the device actually selected for playback and can be queried
1016      * with {@link #getSampleRate()} method.
1017      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
1018      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
1019      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
1020      * <code>MODE_STREAM</code> will be used.
1021      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
1022      * be generated.
1023      * <br>Offload is false by default.
1024      */
1025     public static class Builder {
1026         private AudioAttributes mAttributes;
1027         private AudioFormat mFormat;
1028         private int mBufferSizeInBytes;
1029         private int mEncapsulationMode = ENCAPSULATION_MODE_NONE;
1030         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
1031         private int mMode = MODE_STREAM;
1032         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
1033         private boolean mOffload = false;
1034         private TunerConfiguration mTunerConfiguration;
1035 
1036         /**
1037          * Constructs a new Builder with the default values as described above.
1038          */
Builder()1039         public Builder() {
1040         }
1041 
1042         /**
1043          * Sets the {@link AudioAttributes}.
1044          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
1045          *     data to be played.
1046          * @return the same Builder instance.
1047          * @throws IllegalArgumentException
1048          */
setAudioAttributes(@onNull AudioAttributes attributes)1049         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
1050                 throws IllegalArgumentException {
1051             if (attributes == null) {
1052                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1053             }
1054             // keep reference, we only copy the data when building
1055             mAttributes = attributes;
1056             return this;
1057         }
1058 
1059         /**
1060          * Sets the format of the audio data to be played by the {@link AudioTrack}.
1061          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
1062          * as encoding, channel mask and sample rate.
1063          * @param format a non-null {@link AudioFormat} instance.
1064          * @return the same Builder instance.
1065          * @throws IllegalArgumentException
1066          */
setAudioFormat(@onNull AudioFormat format)1067         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
1068                 throws IllegalArgumentException {
1069             if (format == null) {
1070                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
1071             }
1072             // keep reference, we only copy the data when building
1073             mFormat = format;
1074             return this;
1075         }
1076 
1077         /**
1078          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
1079          * If using the {@link AudioTrack} in streaming mode
1080          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
1081          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
1082          * the estimated minimum buffer size for the creation of an AudioTrack instance
1083          * in streaming mode.
1084          * <br>If using the <code>AudioTrack</code> in static mode (see
1085          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
1086          * played by this instance.
1087          * @param bufferSizeInBytes
1088          * @return the same Builder instance.
1089          * @throws IllegalArgumentException
1090          */
setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)1091         public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes)
1092                 throws IllegalArgumentException {
1093             if (bufferSizeInBytes <= 0) {
1094                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
1095             }
1096             mBufferSizeInBytes = bufferSizeInBytes;
1097             return this;
1098         }
1099 
1100         /**
1101          * Sets the encapsulation mode.
1102          *
1103          * Encapsulation mode allows metadata to be sent together with
1104          * the audio data payload in a {@code ByteBuffer}.
1105          * This requires a compatible hardware audio codec.
1106          *
1107          * @param encapsulationMode one of {@link AudioTrack#ENCAPSULATION_MODE_NONE},
1108          *        or {@link AudioTrack#ENCAPSULATION_MODE_ELEMENTARY_STREAM}.
1109          * @return the same Builder instance.
1110          */
1111         // Note: with the correct permission {@code AudioTrack#ENCAPSULATION_MODE_HANDLE}
1112         // may be used as well.
setEncapsulationMode(@ncapsulationMode int encapsulationMode)1113         public @NonNull Builder setEncapsulationMode(@EncapsulationMode int encapsulationMode) {
1114             switch (encapsulationMode) {
1115                 case ENCAPSULATION_MODE_NONE:
1116                 case ENCAPSULATION_MODE_ELEMENTARY_STREAM:
1117                 case ENCAPSULATION_MODE_HANDLE:
1118                     mEncapsulationMode = encapsulationMode;
1119                     break;
1120                 default:
1121                     throw new IllegalArgumentException(
1122                             "Invalid encapsulation mode " + encapsulationMode);
1123             }
1124             return this;
1125         }
1126 
1127         /**
1128          * Sets the mode under which buffers of audio data are transferred from the
1129          * {@link AudioTrack} to the framework.
1130          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
1131          * @return the same Builder instance.
1132          * @throws IllegalArgumentException
1133          */
setTransferMode(@ransferMode int mode)1134         public @NonNull Builder setTransferMode(@TransferMode int mode)
1135                 throws IllegalArgumentException {
1136             switch(mode) {
1137                 case MODE_STREAM:
1138                 case MODE_STATIC:
1139                     mMode = mode;
1140                     break;
1141                 default:
1142                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
1143             }
1144             return this;
1145         }
1146 
1147         /**
1148          * Sets the session ID the {@link AudioTrack} will be attached to.
1149          * @param sessionId a strictly positive ID number retrieved from another
1150          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
1151          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
1152          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
1153          * @return the same Builder instance.
1154          * @throws IllegalArgumentException
1155          */
setSessionId(@ntRangefrom = 1) int sessionId)1156         public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId)
1157                 throws IllegalArgumentException {
1158             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
1159                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
1160             }
1161             mSessionId = sessionId;
1162             return this;
1163         }
1164 
1165         /**
1166          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
1167          * may not be supported by the particular device, and the framework is free
1168          * to ignore such request if it is incompatible with other requests or hardware.
1169          *
1170          * @param performanceMode one of
1171          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
1172          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1173          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1174          * @return the same Builder instance.
1175          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
1176          */
setPerformanceMode(@erformanceMode int performanceMode)1177         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
1178             switch (performanceMode) {
1179                 case PERFORMANCE_MODE_NONE:
1180                 case PERFORMANCE_MODE_LOW_LATENCY:
1181                 case PERFORMANCE_MODE_POWER_SAVING:
1182                     mPerformanceMode = performanceMode;
1183                     break;
1184                 default:
1185                     throw new IllegalArgumentException(
1186                             "Invalid performance mode " + performanceMode);
1187             }
1188             return this;
1189         }
1190 
1191         /**
1192          * Sets whether this track will play through the offloaded audio path.
1193          * When set to true, at build time, the audio format will be checked against
1194          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)}
1195          * to verify the audio format used by this track is supported on the device's offload
1196          * path (if any).
1197          * <br>Offload is only supported for media audio streams, and therefore requires that
1198          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
1199          * @param offload true to require the offload path for playback.
1200          * @return the same Builder instance.
1201          */
setOffloadedPlayback(boolean offload)1202         public @NonNull Builder setOffloadedPlayback(boolean offload) {
1203             mOffload = offload;
1204             return this;
1205         }
1206 
1207         /**
1208          * Sets the tuner configuration for the {@code AudioTrack}.
1209          *
1210          * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from
1211          * the Android TV tuner API which indicate the audio content stream id and the
1212          * synchronization id for the {@code AudioTrack}.
1213          *
1214          * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}.
1215          * @return the same Builder instance.
1216          * @hide
1217          */
1218         @SystemApi
1219         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
setTunerConfiguration( @onNull TunerConfiguration tunerConfiguration)1220         public @NonNull Builder setTunerConfiguration(
1221                 @NonNull TunerConfiguration tunerConfiguration) {
1222             if (tunerConfiguration == null) {
1223                 throw new IllegalArgumentException("tunerConfiguration is null");
1224             }
1225             mTunerConfiguration = tunerConfiguration;
1226             return this;
1227         }
1228 
1229         /**
1230          * Builds an {@link AudioTrack} instance initialized with all the parameters set
1231          * on this <code>Builder</code>.
1232          * @return a new successfully initialized {@link AudioTrack} instance.
1233          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
1234          *     were incompatible, or if they are not supported by the device,
1235          *     or if the device was not available.
1236          */
build()1237         public @NonNull AudioTrack build() throws UnsupportedOperationException {
1238             if (mAttributes == null) {
1239                 mAttributes = new AudioAttributes.Builder()
1240                         .setUsage(AudioAttributes.USAGE_MEDIA)
1241                         .build();
1242             }
1243             switch (mPerformanceMode) {
1244             case PERFORMANCE_MODE_LOW_LATENCY:
1245                 mAttributes = new AudioAttributes.Builder(mAttributes)
1246                     .replaceFlags((mAttributes.getAllFlags()
1247                             | AudioAttributes.FLAG_LOW_LATENCY)
1248                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
1249                     .build();
1250                 break;
1251             case PERFORMANCE_MODE_NONE:
1252                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
1253                     break; // do not enable deep buffer mode.
1254                 }
1255                 // permitted to fall through to enable deep buffer
1256             case PERFORMANCE_MODE_POWER_SAVING:
1257                 mAttributes = new AudioAttributes.Builder(mAttributes)
1258                 .replaceFlags((mAttributes.getAllFlags()
1259                         | AudioAttributes.FLAG_DEEP_BUFFER)
1260                         & ~AudioAttributes.FLAG_LOW_LATENCY)
1261                 .build();
1262                 break;
1263             }
1264 
1265             if (mFormat == null) {
1266                 mFormat = new AudioFormat.Builder()
1267                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1268                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
1269                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
1270                         .build();
1271             }
1272 
1273             if (mOffload) {
1274                 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) {
1275                     throw new UnsupportedOperationException(
1276                             "Offload and low latency modes are incompatible");
1277                 }
1278                 if (AudioSystem.getOffloadSupport(mFormat, mAttributes)
1279                         == AudioSystem.OFFLOAD_NOT_SUPPORTED) {
1280                     throw new UnsupportedOperationException(
1281                             "Cannot create AudioTrack, offload format / attributes not supported");
1282                 }
1283             }
1284 
1285             // TODO: Check mEncapsulationMode compatibility with MODE_STATIC, etc?
1286 
1287             // If the buffer size is not specified in streaming mode,
1288             // use a single frame for the buffer size and let the
1289             // native code figure out the minimum buffer size.
1290             if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
1291                 int bytesPerSample = 1;
1292                 if (AudioFormat.isEncodingLinearFrames(mFormat.getEncoding())) {
1293                     try {
1294                         bytesPerSample = mFormat.getBytesPerSample(mFormat.getEncoding());
1295                     } catch (IllegalArgumentException e) {
1296                         // do nothing
1297                     }
1298                 }
1299                 mBufferSizeInBytes = mFormat.getChannelCount() * bytesPerSample;
1300             }
1301 
1302             try {
1303                 final AudioTrack track = new AudioTrack(
1304                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, mOffload,
1305                         mEncapsulationMode, mTunerConfiguration);
1306                 if (track.getState() == STATE_UNINITIALIZED) {
1307                     // release is not necessary
1308                     throw new UnsupportedOperationException("Cannot create AudioTrack");
1309                 }
1310                 return track;
1311             } catch (IllegalArgumentException e) {
1312                 throw new UnsupportedOperationException(e.getMessage());
1313             }
1314         }
1315     }
1316 
1317     /**
1318      * Configures the delay and padding values for the current compressed stream playing
1319      * in offload mode.
1320      * This can only be used on a track successfully initialized with
1321      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a
1322      * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed
1323      * stream corresponds to 200 decoded interleaved PCM samples.
1324      * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value
1325      *     of 0 indicates no delay is to be applied.
1326      * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0
1327      *     of 0 indicates no padding is to be applied.
1328      */
setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1329     public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames,
1330             @IntRange(from = 0) int paddingInFrames) {
1331         if (paddingInFrames < 0) {
1332             throw new IllegalArgumentException("Illegal negative padding");
1333         }
1334         if (delayInFrames < 0) {
1335             throw new IllegalArgumentException("Illegal negative delay");
1336         }
1337         if (!mOffloaded) {
1338             throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track");
1339         }
1340         if (mState == STATE_UNINITIALIZED) {
1341             throw new IllegalStateException("Uninitialized track");
1342         }
1343         mOffloadDelayFrames = delayInFrames;
1344         mOffloadPaddingFrames = paddingInFrames;
1345         native_set_delay_padding(delayInFrames, paddingInFrames);
1346     }
1347 
1348     /**
1349      * Return the decoder delay of an offloaded track, expressed in frames, previously set with
1350      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1351      * <p>This delay indicates the number of frames to be ignored at the beginning of the stream.
1352      * This value can only be queried on a track successfully initialized with
1353      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1354      * @return decoder delay expressed in frames.
1355      */
getOffloadDelay()1356     public @IntRange(from = 0) int getOffloadDelay() {
1357         if (!mOffloaded) {
1358             throw new IllegalStateException("Illegal query of delay on non-offloaded track");
1359         }
1360         if (mState == STATE_UNINITIALIZED) {
1361             throw new IllegalStateException("Illegal query of delay on uninitialized track");
1362         }
1363         return mOffloadDelayFrames;
1364     }
1365 
1366     /**
1367      * Return the decoder padding of an offloaded track, expressed in frames, previously set with
1368      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1369      * <p>This padding indicates the number of frames to be ignored at the end of the stream.
1370      * This value can only be queried on a track successfully initialized with
1371      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1372      * @return decoder padding expressed in frames.
1373      */
getOffloadPadding()1374     public @IntRange(from = 0) int getOffloadPadding() {
1375         if (!mOffloaded) {
1376             throw new IllegalStateException("Illegal query of padding on non-offloaded track");
1377         }
1378         if (mState == STATE_UNINITIALIZED) {
1379             throw new IllegalStateException("Illegal query of padding on uninitialized track");
1380         }
1381         return mOffloadPaddingFrames;
1382     }
1383 
1384     /**
1385      * Declares that the last write() operation on this track provided the last buffer of this
1386      * stream.
1387      * After the end of stream, previously set padding and delay values are ignored.
1388      * Can only be called only if the AudioTrack is opened in offload mode
1389      * {@see Builder#setOffloadedPlayback(boolean)}.
1390      * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING}
1391      * {@see #getPlayState()}.
1392      * Use this method in the same thread as any write() operation.
1393      */
setOffloadEndOfStream()1394     public void setOffloadEndOfStream() {
1395         if (!mOffloaded) {
1396             throw new IllegalStateException("EOS not supported on non-offloaded track");
1397         }
1398         if (mState == STATE_UNINITIALIZED) {
1399             throw new IllegalStateException("Uninitialized track");
1400         }
1401         if (mPlayState != PLAYSTATE_PLAYING) {
1402             throw new IllegalStateException("EOS not supported if not playing");
1403         }
1404         synchronized (mStreamEventCbLock) {
1405             if (mStreamEventCbInfoList.size() == 0) {
1406                 throw new IllegalStateException("EOS not supported without StreamEventCallback");
1407             }
1408         }
1409 
1410         synchronized (mPlayStateLock) {
1411             native_stop();
1412             mOffloadEosPending = true;
1413             mPlayState = PLAYSTATE_STOPPING;
1414         }
1415     }
1416 
1417     /**
1418      * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set
1419      * to {@code true}.
1420      * @return true if the track is using offloaded playback.
1421      */
isOffloadedPlayback()1422     public boolean isOffloadedPlayback() {
1423         return mOffloaded;
1424     }
1425 
1426     /**
1427      * Returns whether direct playback of an audio format with the provided attributes is
1428      * currently supported on the system.
1429      * <p>Direct playback means that the audio stream is not resampled or downmixed
1430      * by the framework. Checking for direct support can help the app select the representation
1431      * of audio content that most closely matches the capabilities of the device and peripherials
1432      * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded
1433      * or mixed with other streams, if needed.
1434      * <p>Also note that this query only provides information about the support of an audio format.
1435      * It does not indicate whether the resources necessary for the playback are available
1436      * at that instant.
1437      * @param format a non-null {@link AudioFormat} instance describing the format of
1438      *   the audio data.
1439      * @param attributes a non-null {@link AudioAttributes} instance.
1440      * @return true if the given audio format can be played directly.
1441      */
isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1442     public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format,
1443             @NonNull AudioAttributes attributes) {
1444         if (format == null) {
1445             throw new IllegalArgumentException("Illegal null AudioFormat argument");
1446         }
1447         if (attributes == null) {
1448             throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1449         }
1450         return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(),
1451                 format.getChannelMask(), format.getChannelIndexMask(),
1452                 attributes.getContentType(), attributes.getUsage(), attributes.getFlags());
1453     }
1454 
1455     /*
1456      * The MAX_LEVEL should be exactly representable by an IEEE 754-2008 base32 float.
1457      * This means fractions must be divisible by a power of 2. For example,
1458      * 10.25f is OK as 0.25 is 1/4, but 10.1f is NOT OK as 1/10 is not expressable by
1459      * a finite binary fraction.
1460      *
1461      * 48.f is the nominal max for API level {@link android os.Build.VERSION_CODES#R}.
1462      * We use this to suggest a baseline range for implementation.
1463      *
1464      * The API contract specification allows increasing this value in a future
1465      * API release, but not decreasing this value.
1466      */
1467     private static final float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f;
1468 
isValidAudioDescriptionMixLevel(float level)1469     private static boolean isValidAudioDescriptionMixLevel(float level) {
1470         return !(Float.isNaN(level) || level > MAX_AUDIO_DESCRIPTION_MIX_LEVEL);
1471     }
1472 
1473     /**
1474      * Sets the Audio Description mix level in dB.
1475      *
1476      * For AudioTracks incorporating a secondary Audio Description stream
1477      * (where such contents may be sent through an Encapsulation Mode
1478      * other than {@link #ENCAPSULATION_MODE_NONE}).
1479      * or internally by a HW channel),
1480      * the level of mixing of the Audio Description to the Main Audio stream
1481      * is controlled by this method.
1482      *
1483      * Such mixing occurs <strong>prior</strong> to overall volume scaling.
1484      *
1485      * @param level a floating point value between
1486      *     {@code Float.NEGATIVE_INFINITY} to {@code +48.f},
1487      *     where {@code Float.NEGATIVE_INFINITY} means the Audio Description is not mixed
1488      *     and a level of {@code 0.f} means the Audio Description is mixed without scaling.
1489      * @return true on success, false on failure.
1490      */
setAudioDescriptionMixLeveldB( @loatRangeto = 48.f, toInclusive = true) float level)1491     public boolean setAudioDescriptionMixLeveldB(
1492             @FloatRange(to = 48.f, toInclusive = true) float level) {
1493         if (!isValidAudioDescriptionMixLevel(level)) {
1494             throw new IllegalArgumentException("level is out of range" + level);
1495         }
1496         return native_set_audio_description_mix_level_db(level) == SUCCESS;
1497     }
1498 
1499     /**
1500      * Returns the Audio Description mix level in dB.
1501      *
1502      * If Audio Description mixing is unavailable from the hardware device,
1503      * a value of {@code Float.NEGATIVE_INFINITY} is returned.
1504      *
1505      * @return the current Audio Description Mix Level in dB.
1506      *     A value of {@code Float.NEGATIVE_INFINITY} means
1507      *     that the audio description is not mixed or
1508      *     the hardware is not available.
1509      *     This should reflect the <strong>true</strong> internal device mix level;
1510      *     hence the application might receive any floating value
1511      *     except {@code Float.NaN}.
1512      */
getAudioDescriptionMixLeveldB()1513     public float getAudioDescriptionMixLeveldB() {
1514         float[] level = { Float.NEGATIVE_INFINITY };
1515         try {
1516             final int status = native_get_audio_description_mix_level_db(level);
1517             if (status != SUCCESS || Float.isNaN(level[0])) {
1518                 return Float.NEGATIVE_INFINITY;
1519             }
1520         } catch (Exception e) {
1521             return Float.NEGATIVE_INFINITY;
1522         }
1523         return level[0];
1524     }
1525 
isValidDualMonoMode(@ualMonoMode int dualMonoMode)1526     private static boolean isValidDualMonoMode(@DualMonoMode int dualMonoMode) {
1527         switch (dualMonoMode) {
1528             case DUAL_MONO_MODE_OFF:
1529             case DUAL_MONO_MODE_LR:
1530             case DUAL_MONO_MODE_LL:
1531             case DUAL_MONO_MODE_RR:
1532                 return true;
1533             default:
1534                 return false;
1535         }
1536     }
1537 
1538     /**
1539      * Sets the Dual Mono mode presentation on the output device.
1540      *
1541      * The Dual Mono mode is generally applied to stereo audio streams
1542      * where the left and right channels come from separate sources.
1543      *
1544      * For compressed audio, where the decoding is done in hardware,
1545      * Dual Mono presentation needs to be performed
1546      * by the hardware output device
1547      * as the PCM audio is not available to the framework.
1548      *
1549      * @param dualMonoMode one of {@link #DUAL_MONO_MODE_OFF},
1550      *     {@link #DUAL_MONO_MODE_LR},
1551      *     {@link #DUAL_MONO_MODE_LL},
1552      *     {@link #DUAL_MONO_MODE_RR}.
1553      *
1554      * @return true on success, false on failure if the output device
1555      *     does not support Dual Mono mode.
1556      */
setDualMonoMode(@ualMonoMode int dualMonoMode)1557     public boolean setDualMonoMode(@DualMonoMode int dualMonoMode) {
1558         if (!isValidDualMonoMode(dualMonoMode)) {
1559             throw new IllegalArgumentException(
1560                     "Invalid Dual Mono mode " + dualMonoMode);
1561         }
1562         return native_set_dual_mono_mode(dualMonoMode) == SUCCESS;
1563     }
1564 
1565     /**
1566      * Returns the Dual Mono mode presentation setting.
1567      *
1568      * If no Dual Mono presentation is available for the output device,
1569      * then {@link #DUAL_MONO_MODE_OFF} is returned.
1570      *
1571      * @return one of {@link #DUAL_MONO_MODE_OFF},
1572      *     {@link #DUAL_MONO_MODE_LR},
1573      *     {@link #DUAL_MONO_MODE_LL},
1574      *     {@link #DUAL_MONO_MODE_RR}.
1575      */
getDualMonoMode()1576     public @DualMonoMode int getDualMonoMode() {
1577         int[] dualMonoMode = { DUAL_MONO_MODE_OFF };
1578         try {
1579             final int status = native_get_dual_mono_mode(dualMonoMode);
1580             if (status != SUCCESS || !isValidDualMonoMode(dualMonoMode[0])) {
1581                 return DUAL_MONO_MODE_OFF;
1582             }
1583         } catch (Exception e) {
1584             return DUAL_MONO_MODE_OFF;
1585         }
1586         return dualMonoMode[0];
1587     }
1588 
1589     // mask of all the positional channels supported, however the allowed combinations
1590     // are further restricted by the matching left/right rule and
1591     // AudioSystem.OUT_CHANNEL_COUNT_MAX
1592     private static final int SUPPORTED_OUT_CHANNELS =
1593             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
1594             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
1595             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
1596             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
1597             AudioFormat.CHANNEL_OUT_BACK_LEFT |
1598             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
1599             AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER |
1600             AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER |
1601             AudioFormat.CHANNEL_OUT_BACK_CENTER |
1602             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
1603             AudioFormat.CHANNEL_OUT_SIDE_RIGHT |
1604             AudioFormat.CHANNEL_OUT_TOP_CENTER |
1605             AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT |
1606             AudioFormat.CHANNEL_OUT_TOP_FRONT_CENTER |
1607             AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT |
1608             AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT |
1609             AudioFormat.CHANNEL_OUT_TOP_BACK_CENTER |
1610             AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT |
1611             AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT |
1612             AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT |
1613             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT |
1614             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_CENTER |
1615             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT |
1616             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY_2;
1617 
1618     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
1619     // power saving to be automatically enabled for an AudioTrack. Returns false if
1620     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1621     private static boolean shouldEnablePowerSaving(
1622             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
1623             int bufferSizeInBytes, int mode) {
1624         // If no attributes, OK
1625         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
1626         // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include
1627         // FLAG_DEEP_BUFFER because if set the request is explicit and
1628         // shouldEnablePowerSaving() should return false.
1629         final int flags = attributes.getAllFlags()
1630                 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY
1631                     | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON);
1632 
1633         if (attributes != null &&
1634                 (flags != 0  // cannot have any special flags
1635                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
1636                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
1637                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
1638                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
1639             return false;
1640         }
1641 
1642         // Format must be fully specified and be linear pcm
1643         if (format == null
1644                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
1645                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
1646                 || !AudioFormat.isValidEncoding(format.getEncoding())
1647                 || format.getChannelCount() < 1) {
1648             return false;
1649         }
1650 
1651         // Mode must be streaming
1652         if (mode != MODE_STREAM) {
1653             return false;
1654         }
1655 
1656         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
1657         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
1658         if (bufferSizeInBytes != 0) {
1659             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
1660             final int MILLIS_PER_SECOND = 1000;
1661             final long bufferTargetSize =
1662                     BUFFER_TARGET_MODE_STREAM_MS
1663                     * format.getChannelCount()
1664                     * format.getBytesPerSample(format.getEncoding())
1665                     * format.getSampleRate()
1666                     / MILLIS_PER_SECOND;
1667             if (bufferSizeInBytes < bufferTargetSize) {
1668                 return false;
1669             }
1670         }
1671 
1672         return true;
1673     }
1674 
1675     // Convenience method for the constructor's parameter checks.
1676     // This is where constructor IllegalArgumentException-s are thrown
1677     // postconditions:
1678     //    mChannelCount is valid
1679     //    mChannelMask is valid
1680     //    mAudioFormat is valid
1681     //    mSampleRate is valid
1682     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1683     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1684                                  int audioFormat, int mode) {
1685         //--------------
1686         // sample rate, note these values are subject to change
1687         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1688                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1689                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1690             throw new IllegalArgumentException(sampleRateInHz
1691                     + "Hz is not a supported sample rate.");
1692         }
1693         mSampleRate = sampleRateInHz;
1694 
1695         if (audioFormat == AudioFormat.ENCODING_IEC61937
1696                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO
1697                 && AudioFormat.channelCountFromOutChannelMask(channelConfig) != 8) {
1698             Log.w(TAG, "ENCODING_IEC61937 is configured with channel mask as " + channelConfig
1699                     + ", which is not 2 or 8 channels");
1700         }
1701 
1702         //--------------
1703         // channel config
1704         mChannelConfiguration = channelConfig;
1705 
1706         switch (channelConfig) {
1707         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1708         case AudioFormat.CHANNEL_OUT_MONO:
1709         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1710             mChannelCount = 1;
1711             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1712             break;
1713         case AudioFormat.CHANNEL_OUT_STEREO:
1714         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1715             mChannelCount = 2;
1716             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1717             break;
1718         default:
1719             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1720                 mChannelCount = 0;
1721                 break; // channel index configuration only
1722             }
1723             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
1724                 throw new IllegalArgumentException(
1725                         "Unsupported channel mask configuration " + channelConfig
1726                         + " for encoding " + audioFormat);
1727             }
1728             mChannelMask = channelConfig;
1729             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1730         }
1731         // check the channel index configuration (if present)
1732         mChannelIndexMask = channelIndexMask;
1733         if (mChannelIndexMask != 0) {
1734             // As of S, we accept up to 24 channel index mask.
1735             final int fullIndexMask = (1 << AudioSystem.FCC_24) - 1;
1736             final int channelIndexCount = Integer.bitCount(channelIndexMask);
1737             final boolean accepted = (channelIndexMask & ~fullIndexMask) == 0
1738                     && (!AudioFormat.isEncodingLinearFrames(audioFormat)  // compressed OK
1739                             || channelIndexCount <= AudioSystem.OUT_CHANNEL_COUNT_MAX); // PCM
1740             if (!accepted) {
1741                 throw new IllegalArgumentException(
1742                         "Unsupported channel index mask configuration " + channelIndexMask
1743                         + " for encoding " + audioFormat);
1744             }
1745             if (mChannelCount == 0) {
1746                  mChannelCount = channelIndexCount;
1747             } else if (mChannelCount != channelIndexCount) {
1748                 throw new IllegalArgumentException("Channel count must match");
1749             }
1750         }
1751 
1752         //--------------
1753         // audio format
1754         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1755             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1756         }
1757 
1758         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1759             throw new IllegalArgumentException("Unsupported audio encoding.");
1760         }
1761         mAudioFormat = audioFormat;
1762 
1763         //--------------
1764         // audio load mode
1765         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1766                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1767             throw new IllegalArgumentException("Invalid mode.");
1768         }
1769         mDataLoadMode = mode;
1770     }
1771 
1772     // General pair map
1773     private static final HashMap<String, Integer> CHANNEL_PAIR_MAP = new HashMap<>() {{
1774         put("front", AudioFormat.CHANNEL_OUT_FRONT_LEFT
1775                 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT);
1776         put("back", AudioFormat.CHANNEL_OUT_BACK_LEFT
1777                 | AudioFormat.CHANNEL_OUT_BACK_RIGHT);
1778         put("front of center", AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER
1779                 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
1780         put("side", AudioFormat.CHANNEL_OUT_SIDE_LEFT
1781                 | AudioFormat.CHANNEL_OUT_SIDE_RIGHT);
1782         put("top front", AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT
1783                 | AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT);
1784         put("top back", AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT
1785                 | AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT);
1786         put("top side", AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT
1787                 | AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT);
1788         put("bottom front", AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT
1789                 | AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT);
1790     }};
1791 
1792     /**
1793      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1794      * @param channelConfig the mask to validate
1795      * @return false if the AudioTrack can't be used with such a mask
1796      */
isMultichannelConfigSupported(int channelConfig, int encoding)1797     private static boolean isMultichannelConfigSupported(int channelConfig, int encoding) {
1798         // check for unsupported channels
1799         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1800             loge("Channel configuration features unsupported channels");
1801             return false;
1802         }
1803         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1804         final int channelCountLimit = AudioFormat.isEncodingLinearFrames(encoding)
1805                 ? AudioSystem.OUT_CHANNEL_COUNT_MAX  // PCM limited to OUT_CHANNEL_COUNT_MAX
1806                 : AudioSystem.FCC_24;                // Compressed limited to 24 channels
1807         if (channelCount > channelCountLimit) {
1808             loge("Channel configuration contains too many channels for encoding "
1809                     + encoding + "(" + channelCount + " > " + channelCountLimit + ")");
1810             return false;
1811         }
1812         // check for unsupported multichannel combinations:
1813         // - FL/FR must be present
1814         // - L/R channels must be paired (e.g. no single L channel)
1815         final int frontPair =
1816                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1817         if ((channelConfig & frontPair) != frontPair) {
1818                 loge("Front channels must be present in multichannel configurations");
1819                 return false;
1820         }
1821         // Check all pairs to see that they are matched (front duplicated here).
1822         for (HashMap.Entry<String, Integer> e : CHANNEL_PAIR_MAP.entrySet()) {
1823             final int positionPair = e.getValue();
1824             if ((channelConfig & positionPair) != 0
1825                     && (channelConfig & positionPair) != positionPair) {
1826                 loge("Channel pair (" + e.getKey() + ") cannot be used independently");
1827                 return false;
1828             }
1829         }
1830         return true;
1831     }
1832 
1833 
1834     // Convenience method for the constructor's audio buffer size check.
1835     // preconditions:
1836     //    mChannelCount is valid
1837     //    mAudioFormat is valid
1838     // postcondition:
1839     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)1840     private void audioBuffSizeCheck(int audioBufferSize) {
1841         // NB: this section is only valid with PCM or IEC61937 data.
1842         //     To update when supporting compressed formats
1843         int frameSizeInBytes;
1844         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
1845             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
1846         } else {
1847             frameSizeInBytes = 1;
1848         }
1849         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
1850             throw new IllegalArgumentException("Invalid audio buffer size.");
1851         }
1852 
1853         mNativeBufferSizeInBytes = audioBufferSize;
1854         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
1855     }
1856 
1857 
1858     /**
1859      * Releases the native AudioTrack resources.
1860      */
release()1861     public void release() {
1862         synchronized (mStreamEventCbLock){
1863             endStreamEventHandling();
1864         }
1865         // even though native_release() stops the native AudioTrack, we need to stop
1866         // AudioTrack subclasses too.
1867         try {
1868             stop();
1869         } catch(IllegalStateException ise) {
1870             // don't raise an exception, we're releasing the resources.
1871         }
1872         baseRelease();
1873         native_release();
1874         synchronized (mPlayStateLock) {
1875             mState = STATE_UNINITIALIZED;
1876             mPlayState = PLAYSTATE_STOPPED;
1877             mPlayStateLock.notify();
1878         }
1879     }
1880 
1881     @Override
finalize()1882     protected void finalize() {
1883         tryToDisableNativeRoutingCallback();
1884         baseRelease();
1885         native_finalize();
1886     }
1887 
1888     //--------------------------------------------------------------------------
1889     // Getters
1890     //--------------------
1891     /**
1892      * Returns the minimum gain value, which is the constant 0.0.
1893      * Gain values less than 0.0 will be clamped to 0.0.
1894      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1895      * @return the minimum value, which is the constant 0.0.
1896      */
getMinVolume()1897     static public float getMinVolume() {
1898         return GAIN_MIN;
1899     }
1900 
1901     /**
1902      * Returns the maximum gain value, which is greater than or equal to 1.0.
1903      * Gain values greater than the maximum will be clamped to the maximum.
1904      * <p>The word "volume" in the API name is historical; this is actually a gain.
1905      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
1906      * corresponds to a gain of 0 dB (sample values left unmodified).
1907      * @return the maximum value, which is greater than or equal to 1.0.
1908      */
getMaxVolume()1909     static public float getMaxVolume() {
1910         return GAIN_MAX;
1911     }
1912 
1913     /**
1914      * Returns the configured audio source sample rate in Hz.
1915      * The initial source sample rate depends on the constructor parameters,
1916      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
1917      * If the constructor had a specific sample rate, then the initial sink sample rate is that
1918      * value.
1919      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
1920      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
1921      */
getSampleRate()1922     public int getSampleRate() {
1923         return mSampleRate;
1924     }
1925 
1926     /**
1927      * Returns the current playback sample rate rate in Hz.
1928      */
getPlaybackRate()1929     public int getPlaybackRate() {
1930         return native_get_playback_rate();
1931     }
1932 
1933     /**
1934      * Returns the current playback parameters.
1935      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
1936      * @return current {@link PlaybackParams}.
1937      * @throws IllegalStateException if track is not initialized.
1938      */
getPlaybackParams()1939     public @NonNull PlaybackParams getPlaybackParams() {
1940         return native_get_playback_params();
1941     }
1942 
1943     /**
1944      * Returns the {@link AudioAttributes} used in configuration.
1945      * If a {@code streamType} is used instead of an {@code AudioAttributes}
1946      * to configure the AudioTrack
1947      * (the use of {@code streamType} for configuration is deprecated),
1948      * then the {@code AudioAttributes}
1949      * equivalent to the {@code streamType} is returned.
1950      * @return The {@code AudioAttributes} used to configure the AudioTrack.
1951      * @throws IllegalStateException If the track is not initialized.
1952      */
getAudioAttributes()1953     public @NonNull AudioAttributes getAudioAttributes() {
1954         if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) {
1955             throw new IllegalStateException("track not initialized");
1956         }
1957         return mConfiguredAudioAttributes;
1958     }
1959 
1960     /**
1961      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
1962      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1963      */
getAudioFormat()1964     public int getAudioFormat() {
1965         return mAudioFormat;
1966     }
1967 
1968     /**
1969      * Returns the volume stream type of this AudioTrack.
1970      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
1971      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
1972      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
1973      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
1974      * {@link AudioManager#STREAM_ACCESSIBILITY}.
1975      */
getStreamType()1976     public int getStreamType() {
1977         return mStreamType;
1978     }
1979 
1980     /**
1981      * Returns the configured channel position mask.
1982      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
1983      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
1984      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
1985      * a channel index mask was used. Consider
1986      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
1987      * which contains both the channel position mask and the channel index mask.
1988      */
getChannelConfiguration()1989     public int getChannelConfiguration() {
1990         return mChannelConfiguration;
1991     }
1992 
1993     /**
1994      * Returns the configured <code>AudioTrack</code> format.
1995      * @return an {@link AudioFormat} containing the
1996      * <code>AudioTrack</code> parameters at the time of configuration.
1997      */
getFormat()1998     public @NonNull AudioFormat getFormat() {
1999         AudioFormat.Builder builder = new AudioFormat.Builder()
2000             .setSampleRate(mSampleRate)
2001             .setEncoding(mAudioFormat);
2002         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
2003             builder.setChannelMask(mChannelConfiguration);
2004         }
2005         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
2006             builder.setChannelIndexMask(mChannelIndexMask);
2007         }
2008         return builder.build();
2009     }
2010 
2011     /**
2012      * Returns the configured number of channels.
2013      */
getChannelCount()2014     public int getChannelCount() {
2015         return mChannelCount;
2016     }
2017 
2018     /**
2019      * Returns the state of the AudioTrack instance. This is useful after the
2020      * AudioTrack instance has been created to check if it was initialized
2021      * properly. This ensures that the appropriate resources have been acquired.
2022      * @see #STATE_UNINITIALIZED
2023      * @see #STATE_INITIALIZED
2024      * @see #STATE_NO_STATIC_DATA
2025      */
getState()2026     public int getState() {
2027         return mState;
2028     }
2029 
2030     /**
2031      * Returns the playback state of the AudioTrack instance.
2032      * @see #PLAYSTATE_STOPPED
2033      * @see #PLAYSTATE_PAUSED
2034      * @see #PLAYSTATE_PLAYING
2035      */
getPlayState()2036     public int getPlayState() {
2037         synchronized (mPlayStateLock) {
2038             switch (mPlayState) {
2039                 case PLAYSTATE_STOPPING:
2040                     return PLAYSTATE_PLAYING;
2041                 case PLAYSTATE_PAUSED_STOPPING:
2042                     return PLAYSTATE_PAUSED;
2043                 default:
2044                     return mPlayState;
2045             }
2046         }
2047     }
2048 
2049 
2050     /**
2051      * Returns the effective size of the <code>AudioTrack</code> buffer
2052      * that the application writes to.
2053      * <p> This will be less than or equal to the result of
2054      * {@link #getBufferCapacityInFrames()}.
2055      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
2056      * <p> If the track is subsequently routed to a different output sink, the buffer
2057      * size and capacity may enlarge to accommodate.
2058      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2059      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2060      * the size of the <code>AudioTrack</code> buffer in bytes.
2061      * <p> See also {@link AudioManager#getProperty(String)} for key
2062      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2063      * @return current size in frames of the <code>AudioTrack</code> buffer.
2064      * @throws IllegalStateException if track is not initialized.
2065      */
getBufferSizeInFrames()2066     public @IntRange (from = 0) int getBufferSizeInFrames() {
2067         return native_get_buffer_size_frames();
2068     }
2069 
2070     /**
2071      * Limits the effective size of the <code>AudioTrack</code> buffer
2072      * that the application writes to.
2073      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
2074      * If a blocking write is used then the write will block until the data
2075      * can fit within this limit.
2076      * <p>Changing this limit modifies the latency associated with
2077      * the buffer for this track. A smaller size will give lower latency
2078      * but there may be more glitches due to buffer underruns.
2079      * <p>The actual size used may not be equal to this requested size.
2080      * It will be limited to a valid range with a maximum of
2081      * {@link #getBufferCapacityInFrames()}.
2082      * It may also be adjusted slightly for internal reasons.
2083      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
2084      * will be returned.
2085      * <p>This method is only supported for PCM audio.
2086      * It is not supported for compressed audio tracks.
2087      *
2088      * @param bufferSizeInFrames requested buffer size in frames
2089      * @return the actual buffer size in frames or an error code,
2090      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2091      * @throws IllegalStateException if track is not initialized.
2092      */
setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)2093     public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) {
2094         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
2095             return ERROR_INVALID_OPERATION;
2096         }
2097         if (bufferSizeInFrames < 0) {
2098             return ERROR_BAD_VALUE;
2099         }
2100         return native_set_buffer_size_frames(bufferSizeInFrames);
2101     }
2102 
2103     /**
2104      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
2105      *  <p> If the track's creation mode is {@link #MODE_STATIC},
2106      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
2107      *  A static track's frame count will not change.
2108      *  <p> If the track's creation mode is {@link #MODE_STREAM},
2109      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
2110      *  For streaming tracks, this value may be rounded up to a larger value if needed by
2111      *  the target output sink, and
2112      *  if the track is subsequently routed to a different output sink, the
2113      *  frame count may enlarge to accommodate.
2114      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2115      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2116      *  the size of the <code>AudioTrack</code> buffer in bytes.
2117      *  <p> See also {@link AudioManager#getProperty(String)} for key
2118      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2119      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
2120      *  @throws IllegalStateException if track is not initialized.
2121      */
getBufferCapacityInFrames()2122     public @IntRange (from = 0) int getBufferCapacityInFrames() {
2123         return native_get_buffer_capacity_frames();
2124     }
2125 
2126     /**
2127      * Sets the streaming start threshold for an <code>AudioTrack</code>.
2128      * <p> The streaming start threshold is the buffer level that the written audio
2129      * data must reach for audio streaming to start after {@link #play()} is called.
2130      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2131      *
2132      * @param startThresholdInFrames the desired start threshold.
2133      * @return the actual start threshold in frames value. This is
2134      *         an integer between 1 to the buffer capacity
2135      *         (see {@link #getBufferCapacityInFrames()}),
2136      *         and might change if the output sink changes after track creation.
2137      * @throws IllegalStateException if the track is not initialized or the
2138      *         track transfer mode is not {@link #MODE_STREAM}.
2139      * @throws IllegalArgumentException if startThresholdInFrames is not positive.
2140      * @see #getStartThresholdInFrames()
2141      */
setStartThresholdInFrames( @ntRange from = 1) int startThresholdInFrames)2142     public @IntRange(from = 1) int setStartThresholdInFrames(
2143             @IntRange (from = 1) int startThresholdInFrames) {
2144         if (mState != STATE_INITIALIZED) {
2145             throw new IllegalStateException("AudioTrack is not initialized");
2146         }
2147         if (mDataLoadMode != MODE_STREAM) {
2148             throw new IllegalStateException("AudioTrack must be a streaming track");
2149         }
2150         if (startThresholdInFrames < 1) {
2151             throw new IllegalArgumentException("startThresholdInFrames "
2152                     + startThresholdInFrames + " must be positive");
2153         }
2154         return native_setStartThresholdInFrames(startThresholdInFrames);
2155     }
2156 
2157     /**
2158      * Returns the streaming start threshold of the <code>AudioTrack</code>.
2159      * <p> The streaming start threshold is the buffer level that the written audio
2160      * data must reach for audio streaming to start after {@link #play()} is called.
2161      * When an <code>AudioTrack</code> is created, the streaming start threshold
2162      * is the buffer capacity in frames. If the buffer size in frames is reduced
2163      * by {@link #setBufferSizeInFrames(int)} to a value smaller than the start threshold
2164      * then that value will be used instead for the streaming start threshold.
2165      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2166      *
2167      * @return the current start threshold in frames value. This is
2168      *         an integer between 1 to the buffer capacity
2169      *         (see {@link #getBufferCapacityInFrames()}),
2170      *         and might change if the  output sink changes after track creation.
2171      * @throws IllegalStateException if the track is not initialized or the
2172      *         track is not {@link #MODE_STREAM}.
2173      * @see #setStartThresholdInFrames(int)
2174      */
getStartThresholdInFrames()2175     public @IntRange (from = 1) int getStartThresholdInFrames() {
2176         if (mState != STATE_INITIALIZED) {
2177             throw new IllegalStateException("AudioTrack is not initialized");
2178         }
2179         if (mDataLoadMode != MODE_STREAM) {
2180             throw new IllegalStateException("AudioTrack must be a streaming track");
2181         }
2182         return native_getStartThresholdInFrames();
2183     }
2184 
2185     /**
2186      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
2187      *  @return current size in frames of the <code>AudioTrack</code> buffer.
2188      *  @throws IllegalStateException
2189      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
2190      */
2191     @Deprecated
getNativeFrameCount()2192     protected int getNativeFrameCount() {
2193         return native_get_buffer_capacity_frames();
2194     }
2195 
2196     /**
2197      * Returns marker position expressed in frames.
2198      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
2199      * or zero if marker is disabled.
2200      */
getNotificationMarkerPosition()2201     public int getNotificationMarkerPosition() {
2202         return native_get_marker_pos();
2203     }
2204 
2205     /**
2206      * Returns the notification update period expressed in frames.
2207      * Zero means that no position update notifications are being delivered.
2208      */
getPositionNotificationPeriod()2209     public int getPositionNotificationPeriod() {
2210         return native_get_pos_update_period();
2211     }
2212 
2213     /**
2214      * Returns the playback head position expressed in frames.
2215      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
2216      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
2217      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
2218      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
2219      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
2220      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
2221      * the total number of frames played since reset,
2222      * <i>not</i> the current offset within the buffer.
2223      */
getPlaybackHeadPosition()2224     public int getPlaybackHeadPosition() {
2225         return native_get_position();
2226     }
2227 
2228     /**
2229      * Returns this track's estimated latency in milliseconds. This includes the latency due
2230      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
2231      *
2232      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
2233      * a better solution.
2234      * @hide
2235      */
2236     @UnsupportedAppUsage(trackingBug = 130237544)
getLatency()2237     public int getLatency() {
2238         return native_get_latency();
2239     }
2240 
2241     /**
2242      * Returns the number of underrun occurrences in the application-level write buffer
2243      * since the AudioTrack was created.
2244      * An underrun occurs if the application does not write audio
2245      * data quickly enough, causing the buffer to underflow
2246      * and a potential audio glitch or pop.
2247      * <p>
2248      * Underruns are less likely when buffer sizes are large.
2249      * It may be possible to eliminate underruns by recreating the AudioTrack with
2250      * a larger buffer.
2251      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
2252      * effective size of the buffer.
2253      */
getUnderrunCount()2254     public int getUnderrunCount() {
2255         return native_get_underrun_count();
2256     }
2257 
2258     /**
2259      * Returns the current performance mode of the {@link AudioTrack}.
2260      *
2261      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
2262      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
2263      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
2264      * Use {@link AudioTrack.Builder#setPerformanceMode}
2265      * in the {@link AudioTrack.Builder} to enable a performance mode.
2266      * @throws IllegalStateException if track is not initialized.
2267      */
getPerformanceMode()2268     public @PerformanceMode int getPerformanceMode() {
2269         final int flags = native_get_flags();
2270         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
2271             return PERFORMANCE_MODE_LOW_LATENCY;
2272         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
2273             return PERFORMANCE_MODE_POWER_SAVING;
2274         } else {
2275             return PERFORMANCE_MODE_NONE;
2276         }
2277     }
2278 
2279     /**
2280      *  Returns the output sample rate in Hz for the specified stream type.
2281      */
getNativeOutputSampleRate(int streamType)2282     static public int getNativeOutputSampleRate(int streamType) {
2283         return native_get_output_sample_rate(streamType);
2284     }
2285 
2286     /**
2287      * Returns the estimated minimum buffer size required for an AudioTrack
2288      * object to be created in the {@link #MODE_STREAM} mode.
2289      * The size is an estimate because it does not consider either the route or the sink,
2290      * since neither is known yet.  Note that this size doesn't
2291      * guarantee a smooth playback under load, and higher values should be chosen according to
2292      * the expected frequency at which the buffer will be refilled with additional data to play.
2293      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
2294      * to a higher value than the initial source sample rate, be sure to configure the buffer size
2295      * based on the highest planned sample rate.
2296      * @param sampleRateInHz the source sample rate expressed in Hz.
2297      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
2298      * @param channelConfig describes the configuration of the audio channels.
2299      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
2300      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
2301      * @param audioFormat the format in which the audio data is represented.
2302      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
2303      *   {@link AudioFormat#ENCODING_PCM_8BIT},
2304      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2305      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
2306      *   or {@link #ERROR} if unable to query for output properties,
2307      *   or the minimum buffer size expressed in bytes.
2308      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)2309     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
2310         int channelCount = 0;
2311         switch(channelConfig) {
2312         case AudioFormat.CHANNEL_OUT_MONO:
2313         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
2314             channelCount = 1;
2315             break;
2316         case AudioFormat.CHANNEL_OUT_STEREO:
2317         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
2318             channelCount = 2;
2319             break;
2320         default:
2321             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
2322                 loge("getMinBufferSize(): Invalid channel configuration.");
2323                 return ERROR_BAD_VALUE;
2324             } else {
2325                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
2326             }
2327         }
2328 
2329         if (!AudioFormat.isPublicEncoding(audioFormat)) {
2330             loge("getMinBufferSize(): Invalid audio format.");
2331             return ERROR_BAD_VALUE;
2332         }
2333 
2334         // sample rate, note these values are subject to change
2335         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
2336         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
2337                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
2338             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
2339             return ERROR_BAD_VALUE;
2340         }
2341 
2342         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
2343         if (size <= 0) {
2344             loge("getMinBufferSize(): error querying hardware");
2345             return ERROR;
2346         }
2347         else {
2348             return size;
2349         }
2350     }
2351 
2352     /**
2353      * Returns the audio session ID.
2354      *
2355      * @return the ID of the audio session this AudioTrack belongs to.
2356      */
getAudioSessionId()2357     public int getAudioSessionId() {
2358         return mSessionId;
2359     }
2360 
2361    /**
2362     * Poll for a timestamp on demand.
2363     * <p>
2364     * If you need to track timestamps during initial warmup or after a routing or mode change,
2365     * you should request a new timestamp periodically until the reported timestamps
2366     * show that the frame position is advancing, or until it becomes clear that
2367     * timestamps are unavailable for this route.
2368     * <p>
2369     * After the clock is advancing at a stable rate,
2370     * query for a new timestamp approximately once every 10 seconds to once per minute.
2371     * Calling this method more often is inefficient.
2372     * It is also counter-productive to call this method more often than recommended,
2373     * because the short-term differences between successive timestamp reports are not meaningful.
2374     * If you need a high-resolution mapping between frame position and presentation time,
2375     * consider implementing that at application level, based on low-resolution timestamps.
2376     * <p>
2377     * The audio data at the returned position may either already have been
2378     * presented, or may have not yet been presented but is committed to be presented.
2379     * It is not possible to request the time corresponding to a particular position,
2380     * or to request the (fractional) position corresponding to a particular time.
2381     * If you need such features, consider implementing them at application level.
2382     *
2383     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2384     *        and owned by caller.
2385     * @return true if a timestamp is available, or false if no timestamp is available.
2386     *         If a timestamp is available,
2387     *         the AudioTimestamp instance is filled in with a position in frame units, together
2388     *         with the estimated time when that frame was presented or is committed to
2389     *         be presented.
2390     *         In the case that no timestamp is available, any supplied instance is left unaltered.
2391     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
2392     *         or during and immediately after a route change.
2393     *         A timestamp is permanently unavailable for a given route if the route does not support
2394     *         timestamps.  In this case, the approximate frame position can be obtained
2395     *         using {@link #getPlaybackHeadPosition}.
2396     *         However, it may be useful to continue to query for
2397     *         timestamps occasionally, to recover after a route change.
2398     */
2399     // Add this text when the "on new timestamp" API is added:
2400     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)2401     public boolean getTimestamp(AudioTimestamp timestamp)
2402     {
2403         if (timestamp == null) {
2404             throw new IllegalArgumentException();
2405         }
2406         // It's unfortunate, but we have to either create garbage every time or use synchronized
2407         long[] longArray = new long[2];
2408         int ret = native_get_timestamp(longArray);
2409         if (ret != SUCCESS) {
2410             return false;
2411         }
2412         timestamp.framePosition = longArray[0];
2413         timestamp.nanoTime = longArray[1];
2414         return true;
2415     }
2416 
2417     /**
2418      * Poll for a timestamp on demand.
2419      * <p>
2420      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
2421      *
2422      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2423      *        and owned by caller.
2424      * @return {@link #SUCCESS} if a timestamp is available
2425      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
2426      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
2427      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
2428      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
2429      *         for the timestamp.
2430      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2431      *         needs to be recreated.
2432      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
2433      *         timestamps. In this case, the approximate frame position can be obtained
2434      *         using {@link #getPlaybackHeadPosition}.
2435      *
2436      *         The AudioTimestamp instance is filled in with a position in frame units, together
2437      *         with the estimated time when that frame was presented or is committed to
2438      *         be presented.
2439      * @hide
2440      */
2441      // Add this text when the "on new timestamp" API is added:
2442      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)2443      public int getTimestampWithStatus(AudioTimestamp timestamp)
2444      {
2445          if (timestamp == null) {
2446              throw new IllegalArgumentException();
2447          }
2448          // It's unfortunate, but we have to either create garbage every time or use synchronized
2449          long[] longArray = new long[2];
2450          int ret = native_get_timestamp(longArray);
2451          timestamp.framePosition = longArray[0];
2452          timestamp.nanoTime = longArray[1];
2453          return ret;
2454      }
2455 
2456     /**
2457      *  Return Metrics data about the current AudioTrack instance.
2458      *
2459      * @return a {@link PersistableBundle} containing the set of attributes and values
2460      * available for the media being handled by this instance of AudioTrack
2461      * The attributes are descibed in {@link MetricsConstants}.
2462      *
2463      * Additional vendor-specific fields may also be present in
2464      * the return value.
2465      */
getMetrics()2466     public PersistableBundle getMetrics() {
2467         PersistableBundle bundle = native_getMetrics();
2468         return bundle;
2469     }
2470 
native_getMetrics()2471     private native PersistableBundle native_getMetrics();
2472 
2473     //--------------------------------------------------------------------------
2474     // Initialization / configuration
2475     //--------------------
2476     /**
2477      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2478      * for each periodic playback head position update.
2479      * Notifications will be received in the same thread as the one in which the AudioTrack
2480      * instance was created.
2481      * @param listener
2482      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)2483     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
2484         setPlaybackPositionUpdateListener(listener, null);
2485     }
2486 
2487     /**
2488      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2489      * for each periodic playback head position update.
2490      * Use this method to receive AudioTrack events in the Handler associated with another
2491      * thread than the one in which you created the AudioTrack instance.
2492      * @param listener
2493      * @param handler the Handler that will receive the event notification messages.
2494      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)2495     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
2496                                                     Handler handler) {
2497         if (listener != null) {
2498             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
2499         } else {
2500             mEventHandlerDelegate = null;
2501         }
2502     }
2503 
2504 
clampGainOrLevel(float gainOrLevel)2505     private static float clampGainOrLevel(float gainOrLevel) {
2506         if (Float.isNaN(gainOrLevel)) {
2507             throw new IllegalArgumentException();
2508         }
2509         if (gainOrLevel < GAIN_MIN) {
2510             gainOrLevel = GAIN_MIN;
2511         } else if (gainOrLevel > GAIN_MAX) {
2512             gainOrLevel = GAIN_MAX;
2513         }
2514         return gainOrLevel;
2515     }
2516 
2517 
2518      /**
2519      * Sets the specified left and right output gain values on the AudioTrack.
2520      * <p>Gain values are clamped to the closed interval [0.0, max] where
2521      * max is the value of {@link #getMaxVolume}.
2522      * A value of 0.0 results in zero gain (silence), and
2523      * a value of 1.0 means unity gain (signal unchanged).
2524      * The default value is 1.0 meaning unity gain.
2525      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2526      * @param leftGain output gain for the left channel.
2527      * @param rightGain output gain for the right channel
2528      * @return error code or success, see {@link #SUCCESS},
2529      *    {@link #ERROR_INVALID_OPERATION}
2530      * @deprecated Applications should use {@link #setVolume} instead, as it
2531      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2532      */
2533     @Deprecated
setStereoVolume(float leftGain, float rightGain)2534     public int setStereoVolume(float leftGain, float rightGain) {
2535         if (mState == STATE_UNINITIALIZED) {
2536             return ERROR_INVALID_OPERATION;
2537         }
2538 
2539         baseSetVolume(leftGain, rightGain);
2540         return SUCCESS;
2541     }
2542 
2543     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)2544     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
2545         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
2546         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
2547 
2548         native_setVolume(leftVolume, rightVolume);
2549     }
2550 
2551 
2552     /**
2553      * Sets the specified output gain value on all channels of this track.
2554      * <p>Gain values are clamped to the closed interval [0.0, max] where
2555      * max is the value of {@link #getMaxVolume}.
2556      * A value of 0.0 results in zero gain (silence), and
2557      * a value of 1.0 means unity gain (signal unchanged).
2558      * The default value is 1.0 meaning unity gain.
2559      * <p>This API is preferred over {@link #setStereoVolume}, as it
2560      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2561      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2562      * @param gain output gain for all channels.
2563      * @return error code or success, see {@link #SUCCESS},
2564      *    {@link #ERROR_INVALID_OPERATION}
2565      */
setVolume(float gain)2566     public int setVolume(float gain) {
2567         return setStereoVolume(gain, gain);
2568     }
2569 
2570     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2571     /* package */ int playerApplyVolumeShaper(
2572             @NonNull VolumeShaper.Configuration configuration,
2573             @NonNull VolumeShaper.Operation operation) {
2574         return native_applyVolumeShaper(configuration, operation);
2575     }
2576 
2577     @Override
playerGetVolumeShaperState(int id)2578     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
2579         return native_getVolumeShaperState(id);
2580     }
2581 
2582     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2583     public @NonNull VolumeShaper createVolumeShaper(
2584             @NonNull VolumeShaper.Configuration configuration) {
2585         return new VolumeShaper(configuration, this);
2586     }
2587 
2588     /**
2589      * Sets the playback sample rate for this track. This sets the sampling rate at which
2590      * the audio data will be consumed and played back
2591      * (as set by the sampleRateInHz parameter in the
2592      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
2593      * not the original sampling rate of the
2594      * content. For example, setting it to half the sample rate of the content will cause the
2595      * playback to last twice as long, but will also result in a pitch shift down by one octave.
2596      * The valid sample rate range is from 1 Hz to twice the value returned by
2597      * {@link #getNativeOutputSampleRate(int)}.
2598      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
2599      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
2600      * for playback of content of differing sample rate,
2601      * but with identical encoding and channel mask.
2602      * @param sampleRateInHz the sample rate expressed in Hz
2603      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2604      *    {@link #ERROR_INVALID_OPERATION}
2605      */
setPlaybackRate(int sampleRateInHz)2606     public int setPlaybackRate(int sampleRateInHz) {
2607         if (mState != STATE_INITIALIZED) {
2608             return ERROR_INVALID_OPERATION;
2609         }
2610         if (sampleRateInHz <= 0) {
2611             return ERROR_BAD_VALUE;
2612         }
2613         return native_set_playback_rate(sampleRateInHz);
2614     }
2615 
2616 
2617     /**
2618      * Sets the playback parameters.
2619      * This method returns failure if it cannot apply the playback parameters.
2620      * One possible cause is that the parameters for speed or pitch are out of range.
2621      * Another possible cause is that the <code>AudioTrack</code> is streaming
2622      * (see {@link #MODE_STREAM}) and the
2623      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
2624      * on configuration must be larger than the speed multiplied by the minimum size
2625      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
2626      * @param params see {@link PlaybackParams}. In particular,
2627      * speed, pitch, and audio mode should be set.
2628      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
2629      * @throws IllegalStateException if track is not initialized.
2630      */
setPlaybackParams(@onNull PlaybackParams params)2631     public void setPlaybackParams(@NonNull PlaybackParams params) {
2632         if (params == null) {
2633             throw new IllegalArgumentException("params is null");
2634         }
2635         native_set_playback_params(params);
2636     }
2637 
2638 
2639     /**
2640      * Sets the position of the notification marker.  At most one marker can be active.
2641      * @param markerInFrames marker position in wrapping frame units similar to
2642      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
2643      * To set a marker at a position which would appear as zero due to wraparound,
2644      * a workaround is to use a non-zero position near zero, such as -1 or 1.
2645      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2646      *  {@link #ERROR_INVALID_OPERATION}
2647      */
setNotificationMarkerPosition(int markerInFrames)2648     public int setNotificationMarkerPosition(int markerInFrames) {
2649         if (mState == STATE_UNINITIALIZED) {
2650             return ERROR_INVALID_OPERATION;
2651         }
2652         return native_set_marker_pos(markerInFrames);
2653     }
2654 
2655 
2656     /**
2657      * Sets the period for the periodic notification event.
2658      * @param periodInFrames update period expressed in frames.
2659      * Zero period means no position updates.  A negative period is not allowed.
2660      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
2661      */
setPositionNotificationPeriod(int periodInFrames)2662     public int setPositionNotificationPeriod(int periodInFrames) {
2663         if (mState == STATE_UNINITIALIZED) {
2664             return ERROR_INVALID_OPERATION;
2665         }
2666         return native_set_pos_update_period(periodInFrames);
2667     }
2668 
2669 
2670     /**
2671      * Sets the playback head position within the static buffer.
2672      * The track must be stopped or paused for the position to be changed,
2673      * and must use the {@link #MODE_STATIC} mode.
2674      * @param positionInFrames playback head position within buffer, expressed in frames.
2675      * Zero corresponds to start of buffer.
2676      * The position must not be greater than the buffer size in frames, or negative.
2677      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
2678      * the position values have different meanings.
2679      * <br>
2680      * If looping is currently enabled and the new position is greater than or equal to the
2681      * loop end marker, the behavior varies by API level:
2682      * as of {@link android.os.Build.VERSION_CODES#M},
2683      * the looping is first disabled and then the position is set.
2684      * For earlier API levels, the behavior is unspecified.
2685      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2686      *    {@link #ERROR_INVALID_OPERATION}
2687      */
setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2688     public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) {
2689         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2690                 getPlayState() == PLAYSTATE_PLAYING) {
2691             return ERROR_INVALID_OPERATION;
2692         }
2693         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
2694             return ERROR_BAD_VALUE;
2695         }
2696         return native_set_position(positionInFrames);
2697     }
2698 
2699     /**
2700      * Sets the loop points and the loop count. The loop can be infinite.
2701      * Similarly to setPlaybackHeadPosition,
2702      * the track must be stopped or paused for the loop points to be changed,
2703      * and must use the {@link #MODE_STATIC} mode.
2704      * @param startInFrames loop start marker expressed in frames.
2705      * Zero corresponds to start of buffer.
2706      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
2707      * @param endInFrames loop end marker expressed in frames.
2708      * The total buffer size in frames corresponds to end of buffer.
2709      * The end marker must not be greater than the buffer size in frames.
2710      * For looping, the end marker must not be less than or equal to the start marker,
2711      * but to disable looping
2712      * it is permitted for start marker, end marker, and loop count to all be 0.
2713      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
2714      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
2715      * support,
2716      * {@link #ERROR_BAD_VALUE} is returned.
2717      * The loop range is the interval [startInFrames, endInFrames).
2718      * <br>
2719      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
2720      * unless it is greater than or equal to the loop end marker, in which case
2721      * it is forced to the loop start marker.
2722      * For earlier API levels, the effect on position is unspecified.
2723      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
2724      *    A value of -1 means infinite looping, and 0 disables looping.
2725      *    A value of positive N means to "loop" (go back) N times.  For example,
2726      *    a value of one means to play the region two times in total.
2727      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2728      *    {@link #ERROR_INVALID_OPERATION}
2729      */
setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2730     public int setLoopPoints(@IntRange (from = 0) int startInFrames,
2731             @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) {
2732         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2733                 getPlayState() == PLAYSTATE_PLAYING) {
2734             return ERROR_INVALID_OPERATION;
2735         }
2736         if (loopCount == 0) {
2737             ;   // explicitly allowed as an exception to the loop region range check
2738         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
2739                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
2740             return ERROR_BAD_VALUE;
2741         }
2742         return native_set_loop(startInFrames, endInFrames, loopCount);
2743     }
2744 
2745     /**
2746      * Sets the audio presentation.
2747      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
2748      * If a multi-stream decoder (MSD) is not present, or the format does not support
2749      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
2750      * {@link #ERROR} is returned in case of any other error.
2751      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
2752      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
2753      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2754      * @throws IllegalArgumentException if the audio presentation is null.
2755      * @throws IllegalStateException if track is not initialized.
2756      */
setPresentation(@onNull AudioPresentation presentation)2757     public int setPresentation(@NonNull AudioPresentation presentation) {
2758         if (presentation == null) {
2759             throw new IllegalArgumentException("audio presentation is null");
2760         }
2761         return native_setPresentation(presentation.getPresentationId(),
2762                 presentation.getProgramId());
2763     }
2764 
2765     /**
2766      * Sets the initialization state of the instance. This method was originally intended to be used
2767      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
2768      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
2769      * @param state the state of the AudioTrack instance
2770      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
2771      */
2772     @Deprecated
setState(int state)2773     protected void setState(int state) {
2774         mState = state;
2775     }
2776 
2777 
2778     //---------------------------------------------------------
2779     // Transport control methods
2780     //--------------------
2781     /**
2782      * Starts playing an AudioTrack.
2783      * <p>
2784      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
2785      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
2786      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
2787      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
2788      * play().
2789      * <p>
2790      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
2791      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
2792      * If you don't call write() first, or if you call write() but with an insufficient amount of
2793      * data, then the track will be in underrun state at play().  In this case,
2794      * playback will not actually start playing until the data path is filled to a
2795      * device-specific minimum level.  This requirement for the path to be filled
2796      * to a minimum level is also true when resuming audio playback after calling stop().
2797      * Similarly the buffer will need to be filled up again after
2798      * the track underruns due to failure to call write() in a timely manner with sufficient data.
2799      * For portability, an application should prime the data path to the maximum allowed
2800      * by writing data until the write() method returns a short transfer count.
2801      * This allows play() to start immediately, and reduces the chance of underrun.
2802      *
2803      * @throws IllegalStateException if the track isn't properly initialized
2804      */
play()2805     public void play()
2806     throws IllegalStateException {
2807         if (mState != STATE_INITIALIZED) {
2808             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
2809         }
2810         //FIXME use lambda to pass startImpl to superclass
2811         final int delay = getStartDelayMs();
2812         if (delay == 0) {
2813             startImpl();
2814         } else {
2815             new Thread() {
2816                 public void run() {
2817                     try {
2818                         Thread.sleep(delay);
2819                     } catch (InterruptedException e) {
2820                         e.printStackTrace();
2821                     }
2822                     baseSetStartDelayMs(0);
2823                     try {
2824                         startImpl();
2825                     } catch (IllegalStateException e) {
2826                         // fail silently for a state exception when it is happening after
2827                         // a delayed start, as the player state could have changed between the
2828                         // call to start() and the execution of startImpl()
2829                     }
2830                 }
2831             }.start();
2832         }
2833     }
2834 
startImpl()2835     private void startImpl() {
2836         synchronized (mRoutingChangeListeners) {
2837             if (!mEnableSelfRoutingMonitor) {
2838                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
2839             }
2840         }
2841         synchronized(mPlayStateLock) {
2842             baseStart(0); // unknown device at this point
2843             native_start();
2844             // FIXME see b/179218630
2845             //baseStart(native_getRoutedDeviceId());
2846             if (mPlayState == PLAYSTATE_PAUSED_STOPPING) {
2847                 mPlayState = PLAYSTATE_STOPPING;
2848             } else {
2849                 mPlayState = PLAYSTATE_PLAYING;
2850                 mOffloadEosPending = false;
2851             }
2852         }
2853     }
2854 
2855     /**
2856      * Stops playing the audio data.
2857      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
2858      * after the last buffer that was written has been played. For an immediate stop, use
2859      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
2860      * back yet.
2861      * @throws IllegalStateException
2862      */
stop()2863     public void stop()
2864     throws IllegalStateException {
2865         if (mState != STATE_INITIALIZED) {
2866             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
2867         }
2868 
2869         // stop playing
2870         synchronized(mPlayStateLock) {
2871             native_stop();
2872             baseStop();
2873             if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) {
2874                 mPlayState = PLAYSTATE_STOPPING;
2875             } else {
2876                 mPlayState = PLAYSTATE_STOPPED;
2877                 mOffloadEosPending = false;
2878                 mAvSyncHeader = null;
2879                 mAvSyncBytesRemaining = 0;
2880                 mPlayStateLock.notify();
2881             }
2882         }
2883         tryToDisableNativeRoutingCallback();
2884     }
2885 
2886     /**
2887      * Pauses the playback of the audio data. Data that has not been played
2888      * back will not be discarded. Subsequent calls to {@link #play} will play
2889      * this data back. See {@link #flush()} to discard this data.
2890      *
2891      * @throws IllegalStateException
2892      */
pause()2893     public void pause()
2894     throws IllegalStateException {
2895         if (mState != STATE_INITIALIZED) {
2896             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
2897         }
2898 
2899         // pause playback
2900         synchronized(mPlayStateLock) {
2901             native_pause();
2902             basePause();
2903             if (mPlayState == PLAYSTATE_STOPPING) {
2904                 mPlayState = PLAYSTATE_PAUSED_STOPPING;
2905             } else {
2906                 mPlayState = PLAYSTATE_PAUSED;
2907             }
2908         }
2909     }
2910 
2911 
2912     //---------------------------------------------------------
2913     // Audio data supply
2914     //--------------------
2915 
2916     /**
2917      * Flushes the audio data currently queued for playback. Any data that has
2918      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
2919      * or if the track's creation mode is not {@link #MODE_STREAM}.
2920      * <BR> Note that although data written but not yet presented is discarded, there is no
2921      * guarantee that all of the buffer space formerly used by that data
2922      * is available for a subsequent write.
2923      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
2924      * less than or equal to the total buffer size
2925      * may return a short actual transfer count.
2926      */
flush()2927     public void flush() {
2928         if (mState == STATE_INITIALIZED) {
2929             // flush the data in native layer
2930             native_flush();
2931             mAvSyncHeader = null;
2932             mAvSyncBytesRemaining = 0;
2933         }
2934 
2935     }
2936 
2937     /**
2938      * Writes the audio data to the audio sink for playback (streaming mode),
2939      * or copies audio data for later playback (static buffer mode).
2940      * The format specified in the AudioTrack constructor should be
2941      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2942      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2943      * <p>
2944      * In streaming mode, the write will normally block until all the data has been enqueued for
2945      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2946      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2947      * occurs during the write, then the write may return a short transfer count.
2948      * <p>
2949      * In static buffer mode, copies the data to the buffer starting at offset 0.
2950      * Note that the actual playback of this data might occur after this function returns.
2951      *
2952      * @param audioData the array that holds the data to play.
2953      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2954      *    starts.
2955      *    Must not be negative, or cause the data access to go out of bounds of the array.
2956      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2957      *    Must not be negative, or cause the data access to go out of bounds of the array.
2958      * @return zero or the positive number of bytes that were written, or one of the following
2959      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2960      *    not to exceed sizeInBytes.
2961      * <ul>
2962      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2963      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2964      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2965      *    needs to be recreated. The dead object error code is not returned if some data was
2966      *    successfully transferred. In this case, the error is returned at the next write()</li>
2967      * <li>{@link #ERROR} in case of other error</li>
2968      * </ul>
2969      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
2970      * set to  {@link #WRITE_BLOCKING}.
2971      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)2972     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
2973         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
2974     }
2975 
2976     /**
2977      * Writes the audio data to the audio sink for playback (streaming mode),
2978      * or copies audio data for later playback (static buffer mode).
2979      * The format specified in the AudioTrack constructor should be
2980      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2981      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2982      * <p>
2983      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2984      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2985      * for playback, and will return a full transfer count.  However, if the write mode is
2986      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2987      * interrupts the write by calling stop or pause, or an I/O error
2988      * occurs during the write, then the write may return a short transfer count.
2989      * <p>
2990      * In static buffer mode, copies the data to the buffer starting at offset 0,
2991      * and the write mode is ignored.
2992      * Note that the actual playback of this data might occur after this function returns.
2993      *
2994      * @param audioData the array that holds the data to play.
2995      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2996      *    starts.
2997      *    Must not be negative, or cause the data access to go out of bounds of the array.
2998      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2999      *    Must not be negative, or cause the data access to go out of bounds of the array.
3000      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3001      *     effect in static mode.
3002      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3003      *         to the audio sink.
3004      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3005      *     queuing as much audio data for playback as possible without blocking.
3006      * @return zero or the positive number of bytes that were written, or one of the following
3007      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3008      *    not to exceed sizeInBytes.
3009      * <ul>
3010      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3011      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3012      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3013      *    needs to be recreated. The dead object error code is not returned if some data was
3014      *    successfully transferred. In this case, the error is returned at the next write()</li>
3015      * <li>{@link #ERROR} in case of other error</li>
3016      * </ul>
3017      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)3018     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
3019             @WriteMode int writeMode) {
3020         // Note: we allow writes of extended integers and compressed formats from a byte array.
3021         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
3022             return ERROR_INVALID_OPERATION;
3023         }
3024 
3025         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3026             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3027             return ERROR_BAD_VALUE;
3028         }
3029 
3030         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
3031                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
3032                 || (offsetInBytes + sizeInBytes > audioData.length)) {
3033             return ERROR_BAD_VALUE;
3034         }
3035 
3036         if (!blockUntilOffloadDrain(writeMode)) {
3037             return 0;
3038         }
3039 
3040         final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
3041                 writeMode == WRITE_BLOCKING);
3042 
3043         if ((mDataLoadMode == MODE_STATIC)
3044                 && (mState == STATE_NO_STATIC_DATA)
3045                 && (ret > 0)) {
3046             // benign race with respect to other APIs that read mState
3047             mState = STATE_INITIALIZED;
3048         }
3049 
3050         return ret;
3051     }
3052 
3053     /**
3054      * Writes the audio data to the audio sink for playback (streaming mode),
3055      * or copies audio data for later playback (static buffer mode).
3056      * The format specified in the AudioTrack constructor should be
3057      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3058      * <p>
3059      * In streaming mode, the write will normally block until all the data has been enqueued for
3060      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3061      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3062      * occurs during the write, then the write may return a short transfer count.
3063      * <p>
3064      * In static buffer mode, copies the data to the buffer starting at offset 0.
3065      * Note that the actual playback of this data might occur after this function returns.
3066      *
3067      * @param audioData the array that holds the data to play.
3068      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
3069      *     starts.
3070      *    Must not be negative, or cause the data access to go out of bounds of the array.
3071      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3072      *    Must not be negative, or cause the data access to go out of bounds of the array.
3073      * @return zero or the positive number of shorts that were written, or one of the following
3074      *    error codes. The number of shorts will be a multiple of the channel count not to
3075      *    exceed sizeInShorts.
3076      * <ul>
3077      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3078      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3079      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3080      *    needs to be recreated. The dead object error code is not returned if some data was
3081      *    successfully transferred. In this case, the error is returned at the next write()</li>
3082      * <li>{@link #ERROR} in case of other error</li>
3083      * </ul>
3084      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
3085      * set to  {@link #WRITE_BLOCKING}.
3086      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)3087     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
3088         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
3089     }
3090 
3091     /**
3092      * Writes the audio data to the audio sink for playback (streaming mode),
3093      * or copies audio data for later playback (static buffer mode).
3094      * The format specified in the AudioTrack constructor should be
3095      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3096      * <p>
3097      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3098      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3099      * for playback, and will return a full transfer count.  However, if the write mode is
3100      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3101      * interrupts the write by calling stop or pause, or an I/O error
3102      * occurs during the write, then the write may return a short transfer count.
3103      * <p>
3104      * In static buffer mode, copies the data to the buffer starting at offset 0.
3105      * Note that the actual playback of this data might occur after this function returns.
3106      *
3107      * @param audioData the array that holds the data to write.
3108      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
3109      *     starts.
3110      *    Must not be negative, or cause the data access to go out of bounds of the array.
3111      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3112      *    Must not be negative, or cause the data access to go out of bounds of the array.
3113      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3114      *     effect in static mode.
3115      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3116      *         to the audio sink.
3117      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3118      *     queuing as much audio data for playback as possible without blocking.
3119      * @return zero or the positive number of shorts that were written, or one of the following
3120      *    error codes. The number of shorts will be a multiple of the channel count not to
3121      *    exceed sizeInShorts.
3122      * <ul>
3123      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3124      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3125      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3126      *    needs to be recreated. The dead object error code is not returned if some data was
3127      *    successfully transferred. In this case, the error is returned at the next write()</li>
3128      * <li>{@link #ERROR} in case of other error</li>
3129      * </ul>
3130      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)3131     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
3132             @WriteMode int writeMode) {
3133 
3134         if (mState == STATE_UNINITIALIZED
3135                 || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT
3136                 // use ByteBuffer or byte[] instead for later encodings
3137                 || mAudioFormat > AudioFormat.ENCODING_LEGACY_SHORT_ARRAY_THRESHOLD) {
3138             return ERROR_INVALID_OPERATION;
3139         }
3140 
3141         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3142             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3143             return ERROR_BAD_VALUE;
3144         }
3145 
3146         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
3147                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
3148                 || (offsetInShorts + sizeInShorts > audioData.length)) {
3149             return ERROR_BAD_VALUE;
3150         }
3151 
3152         if (!blockUntilOffloadDrain(writeMode)) {
3153             return 0;
3154         }
3155 
3156         final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
3157                 writeMode == WRITE_BLOCKING);
3158 
3159         if ((mDataLoadMode == MODE_STATIC)
3160                 && (mState == STATE_NO_STATIC_DATA)
3161                 && (ret > 0)) {
3162             // benign race with respect to other APIs that read mState
3163             mState = STATE_INITIALIZED;
3164         }
3165 
3166         return ret;
3167     }
3168 
3169     /**
3170      * Writes the audio data to the audio sink for playback (streaming mode),
3171      * or copies audio data for later playback (static buffer mode).
3172      * The format specified in the AudioTrack constructor should be
3173      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
3174      * <p>
3175      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3176      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3177      * for playback, and will return a full transfer count.  However, if the write mode is
3178      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3179      * interrupts the write by calling stop or pause, or an I/O error
3180      * occurs during the write, then the write may return a short transfer count.
3181      * <p>
3182      * In static buffer mode, copies the data to the buffer starting at offset 0,
3183      * and the write mode is ignored.
3184      * Note that the actual playback of this data might occur after this function returns.
3185      *
3186      * @param audioData the array that holds the data to write.
3187      *     The implementation does not clip for sample values within the nominal range
3188      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
3189      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
3190      *     that could add energy, such as reverb.  For the convenience of applications
3191      *     that compute samples using filters with non-unity gain,
3192      *     sample values +3 dB beyond the nominal range are permitted.
3193      *     However such values may eventually be limited or clipped, depending on various gains
3194      *     and later processing in the audio path.  Therefore applications are encouraged
3195      *     to provide samples values within the nominal range.
3196      * @param offsetInFloats the offset, expressed as a number of floats,
3197      *     in audioData where the data to write starts.
3198      *    Must not be negative, or cause the data access to go out of bounds of the array.
3199      * @param sizeInFloats the number of floats to write in audioData after the offset.
3200      *    Must not be negative, or cause the data access to go out of bounds of the array.
3201      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3202      *     effect in static mode.
3203      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3204      *         to the audio sink.
3205      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3206      *     queuing as much audio data for playback as possible without blocking.
3207      * @return zero or the positive number of floats that were written, or one of the following
3208      *    error codes. The number of floats will be a multiple of the channel count not to
3209      *    exceed sizeInFloats.
3210      * <ul>
3211      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3212      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3213      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3214      *    needs to be recreated. The dead object error code is not returned if some data was
3215      *    successfully transferred. In this case, the error is returned at the next write()</li>
3216      * <li>{@link #ERROR} in case of other error</li>
3217      * </ul>
3218      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)3219     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
3220             @WriteMode int writeMode) {
3221 
3222         if (mState == STATE_UNINITIALIZED) {
3223             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3224             return ERROR_INVALID_OPERATION;
3225         }
3226 
3227         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
3228             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
3229             return ERROR_INVALID_OPERATION;
3230         }
3231 
3232         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3233             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3234             return ERROR_BAD_VALUE;
3235         }
3236 
3237         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
3238                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
3239                 || (offsetInFloats + sizeInFloats > audioData.length)) {
3240             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
3241             return ERROR_BAD_VALUE;
3242         }
3243 
3244         if (!blockUntilOffloadDrain(writeMode)) {
3245             return 0;
3246         }
3247 
3248         final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
3249                 writeMode == WRITE_BLOCKING);
3250 
3251         if ((mDataLoadMode == MODE_STATIC)
3252                 && (mState == STATE_NO_STATIC_DATA)
3253                 && (ret > 0)) {
3254             // benign race with respect to other APIs that read mState
3255             mState = STATE_INITIALIZED;
3256         }
3257 
3258         return ret;
3259     }
3260 
3261 
3262     /**
3263      * Writes the audio data to the audio sink for playback (streaming mode),
3264      * or copies audio data for later playback (static buffer mode).
3265      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
3266      * <p>
3267      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3268      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3269      * for playback, and will return a full transfer count.  However, if the write mode is
3270      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3271      * interrupts the write by calling stop or pause, or an I/O error
3272      * occurs during the write, then the write may return a short transfer count.
3273      * <p>
3274      * In static buffer mode, copies the data to the buffer starting at offset 0,
3275      * and the write mode is ignored.
3276      * Note that the actual playback of this data might occur after this function returns.
3277      *
3278      * @param audioData the buffer that holds the data to write, starting at the position reported
3279      *     by <code>audioData.position()</code>.
3280      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3281      *     have been advanced to reflect the amount of data that was successfully written to
3282      *     the AudioTrack.
3283      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3284      *     that the number of bytes requested be a multiple of the frame size (sample size in
3285      *     bytes multiplied by the channel count).
3286      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3287      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3288      *     effect in static mode.
3289      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3290      *         to the audio sink.
3291      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3292      *     queuing as much audio data for playback as possible without blocking.
3293      * @return zero or the positive number of bytes that were written, or one of the following
3294      *    error codes.
3295      * <ul>
3296      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3297      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3298      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3299      *    needs to be recreated. The dead object error code is not returned if some data was
3300      *    successfully transferred. In this case, the error is returned at the next write()</li>
3301      * <li>{@link #ERROR} in case of other error</li>
3302      * </ul>
3303      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)3304     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3305             @WriteMode int writeMode) {
3306 
3307         if (mState == STATE_UNINITIALIZED) {
3308             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3309             return ERROR_INVALID_OPERATION;
3310         }
3311 
3312         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3313             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3314             return ERROR_BAD_VALUE;
3315         }
3316 
3317         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3318             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3319             return ERROR_BAD_VALUE;
3320         }
3321 
3322         if (!blockUntilOffloadDrain(writeMode)) {
3323             return 0;
3324         }
3325 
3326         int ret = 0;
3327         if (audioData.isDirect()) {
3328             ret = native_write_native_bytes(audioData,
3329                     audioData.position(), sizeInBytes, mAudioFormat,
3330                     writeMode == WRITE_BLOCKING);
3331         } else {
3332             ret = native_write_byte(NioUtils.unsafeArray(audioData),
3333                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
3334                     sizeInBytes, mAudioFormat,
3335                     writeMode == WRITE_BLOCKING);
3336         }
3337 
3338         if ((mDataLoadMode == MODE_STATIC)
3339                 && (mState == STATE_NO_STATIC_DATA)
3340                 && (ret > 0)) {
3341             // benign race with respect to other APIs that read mState
3342             mState = STATE_INITIALIZED;
3343         }
3344 
3345         if (ret > 0) {
3346             audioData.position(audioData.position() + ret);
3347         }
3348 
3349         return ret;
3350     }
3351 
3352     /**
3353      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
3354      * The blocking behavior will depend on the write mode.
3355      * @param audioData the buffer that holds the data to write, starting at the position reported
3356      *     by <code>audioData.position()</code>.
3357      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3358      *     have been advanced to reflect the amount of data that was successfully written to
3359      *     the AudioTrack.
3360      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3361      *     that the number of bytes requested be a multiple of the frame size (sample size in
3362      *     bytes multiplied by the channel count).
3363      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3364      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
3365      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3366      *         to the audio sink.
3367      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3368      *     queuing as much audio data for playback as possible without blocking.
3369      * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the
3370      *     provided audioData.
3371      * @return zero or the positive number of bytes that were written, or one of the following
3372      *    error codes.
3373      * <ul>
3374      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3375      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3376      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3377      *    needs to be recreated. The dead object error code is not returned if some data was
3378      *    successfully transferred. In this case, the error is returned at the next write()</li>
3379      * <li>{@link #ERROR} in case of other error</li>
3380      * </ul>
3381      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)3382     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3383             @WriteMode int writeMode, long timestamp) {
3384 
3385         if (mState == STATE_UNINITIALIZED) {
3386             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3387             return ERROR_INVALID_OPERATION;
3388         }
3389 
3390         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3391             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3392             return ERROR_BAD_VALUE;
3393         }
3394 
3395         if (mDataLoadMode != MODE_STREAM) {
3396             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
3397             return ERROR_INVALID_OPERATION;
3398         }
3399 
3400         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
3401             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
3402             return write(audioData, sizeInBytes, writeMode);
3403         }
3404 
3405         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3406             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3407             return ERROR_BAD_VALUE;
3408         }
3409 
3410         if (!blockUntilOffloadDrain(writeMode)) {
3411             return 0;
3412         }
3413 
3414         // create timestamp header if none exists
3415         if (mAvSyncHeader == null) {
3416             mAvSyncHeader = ByteBuffer.allocate(mOffset);
3417             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
3418             mAvSyncHeader.putInt(0x55550002);
3419         }
3420 
3421         if (mAvSyncBytesRemaining == 0) {
3422             mAvSyncHeader.putInt(4, sizeInBytes);
3423             mAvSyncHeader.putLong(8, timestamp);
3424             mAvSyncHeader.putInt(16, mOffset);
3425             mAvSyncHeader.position(0);
3426             mAvSyncBytesRemaining = sizeInBytes;
3427         }
3428 
3429         // write timestamp header if not completely written already
3430         int ret = 0;
3431         if (mAvSyncHeader.remaining() != 0) {
3432             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
3433             if (ret < 0) {
3434                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
3435                 mAvSyncHeader = null;
3436                 mAvSyncBytesRemaining = 0;
3437                 return ret;
3438             }
3439             if (mAvSyncHeader.remaining() > 0) {
3440                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
3441                 return 0;
3442             }
3443         }
3444 
3445         // write audio data
3446         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
3447         ret = write(audioData, sizeToWrite, writeMode);
3448         if (ret < 0) {
3449             Log.e(TAG, "AudioTrack.write() could not write audio data!");
3450             mAvSyncHeader = null;
3451             mAvSyncBytesRemaining = 0;
3452             return ret;
3453         }
3454 
3455         mAvSyncBytesRemaining -= ret;
3456 
3457         return ret;
3458     }
3459 
3460 
3461     /**
3462      * Sets the playback head position within the static buffer to zero,
3463      * that is it rewinds to start of static buffer.
3464      * The track must be stopped or paused, and
3465      * the track's creation mode must be {@link #MODE_STATIC}.
3466      * <p>
3467      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
3468      * {@link #getPlaybackHeadPosition()} to zero.
3469      * For earlier API levels, the reset behavior is unspecified.
3470      * <p>
3471      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
3472      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
3473      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
3474      *  {@link #ERROR_INVALID_OPERATION}
3475      */
reloadStaticData()3476     public int reloadStaticData() {
3477         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
3478             return ERROR_INVALID_OPERATION;
3479         }
3480         return native_reload_static();
3481     }
3482 
3483     /**
3484      * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is
3485      * received if blocking write or return with 0 frames written if non blocking mode.
3486      */
blockUntilOffloadDrain(int writeMode)3487     private boolean blockUntilOffloadDrain(int writeMode) {
3488         synchronized (mPlayStateLock) {
3489             while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3490                 if (writeMode == WRITE_NON_BLOCKING) {
3491                     return false;
3492                 }
3493                 try {
3494                     mPlayStateLock.wait();
3495                 } catch (InterruptedException e) {
3496                 }
3497             }
3498             return true;
3499         }
3500     }
3501 
3502     //--------------------------------------------------------------------------
3503     // Audio effects management
3504     //--------------------
3505 
3506     /**
3507      * Attaches an auxiliary effect to the audio track. A typical auxiliary
3508      * effect is a reverberation effect which can be applied on any sound source
3509      * that directs a certain amount of its energy to this effect. This amount
3510      * is defined by setAuxEffectSendLevel().
3511      * {@see #setAuxEffectSendLevel(float)}.
3512      * <p>After creating an auxiliary effect (e.g.
3513      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
3514      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
3515      * this method to attach the audio track to the effect.
3516      * <p>To detach the effect from the audio track, call this method with a
3517      * null effect id.
3518      *
3519      * @param effectId system wide unique id of the effect to attach
3520      * @return error code or success, see {@link #SUCCESS},
3521      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
3522      */
attachAuxEffect(int effectId)3523     public int attachAuxEffect(int effectId) {
3524         if (mState == STATE_UNINITIALIZED) {
3525             return ERROR_INVALID_OPERATION;
3526         }
3527         return native_attachAuxEffect(effectId);
3528     }
3529 
3530     /**
3531      * Sets the send level of the audio track to the attached auxiliary effect
3532      * {@link #attachAuxEffect(int)}.  Effect levels
3533      * are clamped to the closed interval [0.0, max] where
3534      * max is the value of {@link #getMaxVolume}.
3535      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
3536      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
3537      * this method must be called for the effect to be applied.
3538      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
3539      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
3540      * so an appropriate conversion from linear UI input x to level is:
3541      * x == 0 -&gt; level = 0
3542      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
3543      *
3544      * @param level linear send level
3545      * @return error code or success, see {@link #SUCCESS},
3546      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
3547      */
setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3548     public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) {
3549         if (mState == STATE_UNINITIALIZED) {
3550             return ERROR_INVALID_OPERATION;
3551         }
3552         return baseSetAuxEffectSendLevel(level);
3553     }
3554 
3555     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)3556     int playerSetAuxEffectSendLevel(boolean muting, float level) {
3557         level = clampGainOrLevel(muting ? 0.0f : level);
3558         int err = native_setAuxEffectSendLevel(level);
3559         return err == 0 ? SUCCESS : ERROR;
3560     }
3561 
3562     //--------------------------------------------------------------------------
3563     // Explicit Routing
3564     //--------------------
3565     private AudioDeviceInfo mPreferredDevice = null;
3566 
3567     /**
3568      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
3569      * the output from this AudioTrack.
3570      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
3571      *  If deviceInfo is null, default routing is restored.
3572      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
3573      * does not correspond to a valid audio output device.
3574      */
3575     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)3576     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
3577         // Do some validation....
3578         if (deviceInfo != null && !deviceInfo.isSink()) {
3579             return false;
3580         }
3581         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
3582         boolean status = native_setOutputDevice(preferredDeviceId);
3583         if (status == true) {
3584             synchronized (this) {
3585                 mPreferredDevice = deviceInfo;
3586             }
3587         }
3588         return status;
3589     }
3590 
3591     /**
3592      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
3593      * is not guaranteed to correspond to the actual device being used for playback.
3594      */
3595     @Override
getPreferredDevice()3596     public AudioDeviceInfo getPreferredDevice() {
3597         synchronized (this) {
3598             return mPreferredDevice;
3599         }
3600     }
3601 
3602     /**
3603      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
3604      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3605      * <code>getRoutedDevice()</code> will return null.
3606      */
3607     @Override
getRoutedDevice()3608     public AudioDeviceInfo getRoutedDevice() {
3609         int deviceId = native_getRoutedDeviceId();
3610         if (deviceId == 0) {
3611             return null;
3612         }
3613         return AudioManager.getDeviceForPortId(deviceId, AudioManager.GET_DEVICES_OUTPUTS);
3614     }
3615 
tryToDisableNativeRoutingCallback()3616     private void tryToDisableNativeRoutingCallback() {
3617         synchronized (mRoutingChangeListeners) {
3618             if (mEnableSelfRoutingMonitor) {
3619                 mEnableSelfRoutingMonitor = false;
3620                 testDisableNativeRoutingCallbacksLocked();
3621             }
3622         }
3623     }
3624 
3625     /**
3626      * Call BEFORE adding a routing callback handler and when enabling self routing listener
3627      * @return returns true for success, false otherwise.
3628      */
3629     @GuardedBy("mRoutingChangeListeners")
testEnableNativeRoutingCallbacksLocked()3630     private boolean testEnableNativeRoutingCallbacksLocked() {
3631         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3632             try {
3633                 native_enableDeviceCallback();
3634                 return true;
3635             } catch (IllegalStateException e) {
3636                 if (Log.isLoggable(TAG, Log.DEBUG)) {
3637                     Log.d(TAG, "testEnableNativeRoutingCallbacks failed", e);
3638                 }
3639             }
3640         }
3641         return false;
3642     }
3643 
3644     /*
3645      * Call AFTER removing a routing callback handler and when disabling self routing listener.
3646      */
3647     @GuardedBy("mRoutingChangeListeners")
testDisableNativeRoutingCallbacksLocked()3648     private void testDisableNativeRoutingCallbacksLocked() {
3649         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3650             try {
3651                 native_disableDeviceCallback();
3652             } catch (IllegalStateException e) {
3653                 // Fail silently as track state could have changed in between stop
3654                 // and disabling routing callback
3655             }
3656         }
3657     }
3658 
3659     //--------------------------------------------------------------------------
3660     // (Re)Routing Info
3661     //--------------------
3662     /**
3663      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
3664      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
3665      * by an app to receive (re)routing notifications.
3666      */
3667     @GuardedBy("mRoutingChangeListeners")
3668     private ArrayMap<AudioRouting.OnRoutingChangedListener,
3669             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
3670 
3671     @GuardedBy("mRoutingChangeListeners")
3672     private boolean mEnableSelfRoutingMonitor;
3673 
3674    /**
3675     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
3676     * changes on this AudioTrack.
3677     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
3678     * notifications of rerouting events.
3679     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3680     * the callback. If <code>null</code>, the {@link Handler} associated with the main
3681     * {@link Looper} will be used.
3682     */
3683     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3684     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
3685             Handler handler) {
3686         synchronized (mRoutingChangeListeners) {
3687             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
3688                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
3689                 mRoutingChangeListeners.put(
3690                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
3691                                 handler != null ? handler : new Handler(mInitializationLooper)));
3692             }
3693         }
3694     }
3695 
3696     /**
3697      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
3698      * to receive rerouting notifications.
3699      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
3700      * to remove.
3701      */
3702     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3703     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
3704         synchronized (mRoutingChangeListeners) {
3705             if (mRoutingChangeListeners.containsKey(listener)) {
3706                 mRoutingChangeListeners.remove(listener);
3707             }
3708             testDisableNativeRoutingCallbacksLocked();
3709         }
3710     }
3711 
3712     //--------------------------------------------------------------------------
3713     // (Re)Routing Info
3714     //--------------------
3715     /**
3716      * Defines the interface by which applications can receive notifications of
3717      * routing changes for the associated {@link AudioTrack}.
3718      *
3719      * @deprecated users should switch to the general purpose
3720      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3721      */
3722     @Deprecated
3723     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
3724         /**
3725          * Called when the routing of an AudioTrack changes from either and
3726          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
3727          * retrieve the newly routed-to device.
3728          */
onRoutingChanged(AudioTrack audioTrack)3729         public void onRoutingChanged(AudioTrack audioTrack);
3730 
3731         @Override
onRoutingChanged(AudioRouting router)3732         default public void onRoutingChanged(AudioRouting router) {
3733             if (router instanceof AudioTrack) {
3734                 onRoutingChanged((AudioTrack) router);
3735             }
3736         }
3737     }
3738 
3739     /**
3740      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
3741      * on this AudioTrack.
3742      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
3743      * of rerouting events.
3744      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3745      * the callback. If <code>null</code>, the {@link Handler} associated with the main
3746      * {@link Looper} will be used.
3747      * @deprecated users should switch to the general purpose
3748      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3749      */
3750     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3751     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
3752             android.os.Handler handler) {
3753         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
3754     }
3755 
3756     /**
3757      * Removes an {@link OnRoutingChangedListener} which has been previously added
3758      * to receive rerouting notifications.
3759      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
3760      * @deprecated users should switch to the general purpose
3761      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3762      */
3763     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)3764     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
3765         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
3766     }
3767 
3768     /**
3769      * Sends device list change notification to all listeners.
3770      */
broadcastRoutingChange()3771     private void broadcastRoutingChange() {
3772         AudioManager.resetAudioPortGeneration();
3773         baseUpdateDeviceId(getRoutedDevice());
3774         synchronized (mRoutingChangeListeners) {
3775             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
3776                 delegate.notifyClient();
3777             }
3778         }
3779     }
3780 
3781     //--------------------------------------------------------------------------
3782     // Codec notifications
3783     //--------------------
3784 
3785     // OnCodecFormatChangedListener notifications uses an instance
3786     // of ListenerList to manage its listeners.
3787 
3788     private final Utils.ListenerList<AudioMetadataReadMap> mCodecFormatChangedListeners =
3789             new Utils.ListenerList();
3790 
3791     /**
3792      * Interface definition for a listener for codec format changes.
3793      */
3794     public interface OnCodecFormatChangedListener {
3795         /**
3796          * Called when the compressed codec format changes.
3797          *
3798          * @param audioTrack is the {@code AudioTrack} instance associated with the codec.
3799          * @param info is a {@link AudioMetadataReadMap} of values which contains decoded format
3800          *     changes reported by the codec.  Not all hardware
3801          *     codecs indicate codec format changes. Acceptable keys are taken from
3802          *     {@code AudioMetadata.Format.KEY_*} range, with the associated value type.
3803          */
onCodecFormatChanged( @onNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info)3804         void onCodecFormatChanged(
3805                 @NonNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info);
3806     }
3807 
3808     /**
3809      * Adds an {@link OnCodecFormatChangedListener} to receive notifications of
3810      * codec format change events on this {@code AudioTrack}.
3811      *
3812      * @param executor  Specifies the {@link Executor} object to control execution.
3813      *
3814      * @param listener The {@link OnCodecFormatChangedListener} interface to receive
3815      *     notifications of codec events.
3816      */
addOnCodecFormatChangedListener( @onNull @allbackExecutor Executor executor, @NonNull OnCodecFormatChangedListener listener)3817     public void addOnCodecFormatChangedListener(
3818             @NonNull @CallbackExecutor Executor executor,
3819             @NonNull OnCodecFormatChangedListener listener) { // NPE checks done by ListenerList.
3820         mCodecFormatChangedListeners.add(
3821                 listener, /* key for removal */
3822                 executor,
3823                 (int eventCode, AudioMetadataReadMap readMap) -> {
3824                     // eventCode is unused by this implementation.
3825                     listener.onCodecFormatChanged(this, readMap);
3826                 }
3827         );
3828     }
3829 
3830     /**
3831      * Removes an {@link OnCodecFormatChangedListener} which has been previously added
3832      * to receive codec format change events.
3833      *
3834      * @param listener The previously added {@link OnCodecFormatChangedListener} interface
3835      * to remove.
3836      */
removeOnCodecFormatChangedListener( @onNull OnCodecFormatChangedListener listener)3837     public void removeOnCodecFormatChangedListener(
3838             @NonNull OnCodecFormatChangedListener listener) {
3839         mCodecFormatChangedListeners.remove(listener);  // NPE checks done by ListenerList.
3840     }
3841 
3842     //---------------------------------------------------------
3843     // Interface definitions
3844     //--------------------
3845     /**
3846      * Interface definition for a callback to be invoked when the playback head position of
3847      * an AudioTrack has reached a notification marker or has increased by a certain period.
3848      */
3849     public interface OnPlaybackPositionUpdateListener  {
3850         /**
3851          * Called on the listener to notify it that the previously set marker has been reached
3852          * by the playback head.
3853          */
onMarkerReached(AudioTrack track)3854         void onMarkerReached(AudioTrack track);
3855 
3856         /**
3857          * Called on the listener to periodically notify it that the playback head has reached
3858          * a multiple of the notification period.
3859          */
onPeriodicNotification(AudioTrack track)3860         void onPeriodicNotification(AudioTrack track);
3861     }
3862 
3863     /**
3864      * Abstract class to receive event notifications about the stream playback in offloaded mode.
3865      * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register
3866      * the callback on the given {@link AudioTrack} instance.
3867      */
3868     public abstract static class StreamEventCallback {
3869         /**
3870          * Called when an offloaded track is no longer valid and has been discarded by the system.
3871          * An example of this happening is when an offloaded track has been paused too long, and
3872          * gets invalidated by the system to prevent any other offload.
3873          * @param track the {@link AudioTrack} on which the event happened.
3874          */
onTearDown(@onNull AudioTrack track)3875         public void onTearDown(@NonNull AudioTrack track) { }
3876         /**
3877          * Called when all the buffers of an offloaded track that were queued in the audio system
3878          * (e.g. the combination of the Android audio framework and the device's audio hardware)
3879          * have been played after {@link AudioTrack#stop()} has been called.
3880          * @param track the {@link AudioTrack} on which the event happened.
3881          */
onPresentationEnded(@onNull AudioTrack track)3882         public void onPresentationEnded(@NonNull AudioTrack track) { }
3883         /**
3884          * Called when more audio data can be written without blocking on an offloaded track.
3885          * @param track the {@link AudioTrack} on which the event happened.
3886          * @param sizeInFrames the number of frames available to write without blocking.
3887          *   Note that the frame size of a compressed stream is 1 byte.
3888          */
onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)3889         public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) {
3890         }
3891     }
3892 
3893     /**
3894      * Registers a callback for the notification of stream events.
3895      * This callback can only be registered for instances operating in offloaded mode
3896      * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and
3897      * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for
3898      * more details).
3899      * @param executor {@link Executor} to handle the callbacks.
3900      * @param eventCallback the callback to receive the stream event notifications.
3901      */
registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)3902     public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
3903             @NonNull StreamEventCallback eventCallback) {
3904         if (eventCallback == null) {
3905             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3906         }
3907         if (!mOffloaded) {
3908             throw new IllegalStateException(
3909                     "Cannot register StreamEventCallback on non-offloaded AudioTrack");
3910         }
3911         if (executor == null) {
3912             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
3913         }
3914         synchronized (mStreamEventCbLock) {
3915             // check if eventCallback already in list
3916             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3917                 if (seci.mStreamEventCb == eventCallback) {
3918                     throw new IllegalArgumentException(
3919                             "StreamEventCallback already registered");
3920                 }
3921             }
3922             beginStreamEventHandling();
3923             mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback));
3924         }
3925     }
3926 
3927     /**
3928      * Unregisters the callback for notification of stream events, previously registered
3929      * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}.
3930      * @param eventCallback the callback to unregister.
3931      */
unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)3932     public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) {
3933         if (eventCallback == null) {
3934             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3935         }
3936         if (!mOffloaded) {
3937             throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack");
3938         }
3939         synchronized (mStreamEventCbLock) {
3940             StreamEventCbInfo seciToRemove = null;
3941             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3942                 if (seci.mStreamEventCb == eventCallback) {
3943                     // ok to remove while iterating over list as we exit iteration
3944                     mStreamEventCbInfoList.remove(seci);
3945                     if (mStreamEventCbInfoList.size() == 0) {
3946                         endStreamEventHandling();
3947                     }
3948                     return;
3949                 }
3950             }
3951             throw new IllegalArgumentException("StreamEventCallback was not registered");
3952         }
3953     }
3954 
3955     //---------------------------------------------------------
3956     // Offload
3957     //--------------------
3958     private static class StreamEventCbInfo {
3959         final Executor mStreamEventExec;
3960         final StreamEventCallback mStreamEventCb;
3961 
StreamEventCbInfo(Executor e, StreamEventCallback cb)3962         StreamEventCbInfo(Executor e, StreamEventCallback cb) {
3963             mStreamEventExec = e;
3964             mStreamEventCb = cb;
3965         }
3966     }
3967 
3968     private final Object mStreamEventCbLock = new Object();
3969     @GuardedBy("mStreamEventCbLock")
3970     @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList =
3971             new LinkedList<StreamEventCbInfo>();
3972     /**
3973      * Dedicated thread for handling the StreamEvent callbacks
3974      */
3975     private @Nullable HandlerThread mStreamEventHandlerThread;
3976     private @Nullable volatile StreamEventHandler mStreamEventHandler;
3977 
3978     /**
3979      * Called from native AudioTrack callback thread, filter messages if necessary
3980      * and repost event on AudioTrack message loop to prevent blocking native thread.
3981      * @param what event code received from native
3982      * @param arg optional argument for event
3983      */
handleStreamEventFromNative(int what, int arg)3984     void handleStreamEventFromNative(int what, int arg) {
3985         if (mStreamEventHandler == null) {
3986             return;
3987         }
3988         switch (what) {
3989             case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
3990                 // replace previous CAN_WRITE_MORE_DATA messages with the latest value
3991                 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA);
3992                 mStreamEventHandler.sendMessage(
3993                         mStreamEventHandler.obtainMessage(
3994                                 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/));
3995                 break;
3996             case NATIVE_EVENT_NEW_IAUDIOTRACK:
3997                 mStreamEventHandler.sendMessage(
3998                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK));
3999                 break;
4000             case NATIVE_EVENT_STREAM_END:
4001                 mStreamEventHandler.sendMessage(
4002                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END));
4003                 break;
4004         }
4005     }
4006 
4007     private class StreamEventHandler extends Handler {
4008 
StreamEventHandler(Looper looper)4009         StreamEventHandler(Looper looper) {
4010             super(looper);
4011         }
4012 
4013         @Override
handleMessage(Message msg)4014         public void handleMessage(Message msg) {
4015             final LinkedList<StreamEventCbInfo> cbInfoList;
4016             synchronized (mStreamEventCbLock) {
4017                 if (msg.what == NATIVE_EVENT_STREAM_END) {
4018                     synchronized (mPlayStateLock) {
4019                         if (mPlayState == PLAYSTATE_STOPPING) {
4020                             if (mOffloadEosPending) {
4021                                 native_start();
4022                                 mPlayState = PLAYSTATE_PLAYING;
4023                             } else {
4024                                 mAvSyncHeader = null;
4025                                 mAvSyncBytesRemaining = 0;
4026                                 mPlayState = PLAYSTATE_STOPPED;
4027                             }
4028                             mOffloadEosPending = false;
4029                             mPlayStateLock.notify();
4030                         }
4031                     }
4032                 }
4033                 if (mStreamEventCbInfoList.size() == 0) {
4034                     return;
4035                 }
4036                 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList);
4037             }
4038 
4039             final long identity = Binder.clearCallingIdentity();
4040             try {
4041                 for (StreamEventCbInfo cbi : cbInfoList) {
4042                     switch (msg.what) {
4043                         case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4044                             cbi.mStreamEventExec.execute(() ->
4045                                     cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1));
4046                             break;
4047                         case NATIVE_EVENT_NEW_IAUDIOTRACK:
4048                             // TODO also release track as it's not longer usable
4049                             cbi.mStreamEventExec.execute(() ->
4050                                     cbi.mStreamEventCb.onTearDown(AudioTrack.this));
4051                             break;
4052                         case NATIVE_EVENT_STREAM_END:
4053                             cbi.mStreamEventExec.execute(() ->
4054                                     cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this));
4055                             break;
4056                     }
4057                 }
4058             } finally {
4059                 Binder.restoreCallingIdentity(identity);
4060             }
4061         }
4062     }
4063 
4064     @GuardedBy("mStreamEventCbLock")
beginStreamEventHandling()4065     private void beginStreamEventHandling() {
4066         if (mStreamEventHandlerThread == null) {
4067             mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent");
4068             mStreamEventHandlerThread.start();
4069             final Looper looper = mStreamEventHandlerThread.getLooper();
4070             if (looper != null) {
4071                 mStreamEventHandler = new StreamEventHandler(looper);
4072             }
4073         }
4074     }
4075 
4076     @GuardedBy("mStreamEventCbLock")
endStreamEventHandling()4077     private void endStreamEventHandling() {
4078         if (mStreamEventHandlerThread != null) {
4079             mStreamEventHandlerThread.quit();
4080             mStreamEventHandlerThread = null;
4081         }
4082     }
4083 
4084     /**
4085      * Sets a {@link LogSessionId} instance to this AudioTrack for metrics collection.
4086      *
4087      * @param logSessionId a {@link LogSessionId} instance which is used to
4088      *        identify this object to the metrics service. Proper generated
4089      *        Ids must be obtained from the Java metrics service and should
4090      *        be considered opaque. Use
4091      *        {@link LogSessionId#LOG_SESSION_ID_NONE} to remove the
4092      *        logSessionId association.
4093      * @throws IllegalStateException if AudioTrack not initialized.
4094      *
4095      */
setLogSessionId(@onNull LogSessionId logSessionId)4096     public void setLogSessionId(@NonNull LogSessionId logSessionId) {
4097         Objects.requireNonNull(logSessionId);
4098         if (mState == STATE_UNINITIALIZED) {
4099             throw new IllegalStateException("track not initialized");
4100         }
4101         String stringId = logSessionId.getStringId();
4102         native_setLogSessionId(stringId);
4103         mLogSessionId = logSessionId;
4104     }
4105 
4106     /**
4107      * Returns the {@link LogSessionId}.
4108      */
4109     @NonNull
getLogSessionId()4110     public LogSessionId getLogSessionId() {
4111         return mLogSessionId;
4112     }
4113 
4114     //---------------------------------------------------------
4115     // Inner classes
4116     //--------------------
4117     /**
4118      * Helper class to handle the forwarding of native events to the appropriate listener
4119      * (potentially) handled in a different thread
4120      */
4121     private class NativePositionEventHandlerDelegate {
4122         private final Handler mHandler;
4123 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)4124         NativePositionEventHandlerDelegate(final AudioTrack track,
4125                                    final OnPlaybackPositionUpdateListener listener,
4126                                    Handler handler) {
4127             // find the looper for our new event handler
4128             Looper looper;
4129             if (handler != null) {
4130                 looper = handler.getLooper();
4131             } else {
4132                 // no given handler, use the looper the AudioTrack was created in
4133                 looper = mInitializationLooper;
4134             }
4135 
4136             // construct the event handler with this looper
4137             if (looper != null) {
4138                 // implement the event handler delegate
4139                 mHandler = new Handler(looper) {
4140                     @Override
4141                     public void handleMessage(Message msg) {
4142                         if (track == null) {
4143                             return;
4144                         }
4145                         switch(msg.what) {
4146                         case NATIVE_EVENT_MARKER:
4147                             if (listener != null) {
4148                                 listener.onMarkerReached(track);
4149                             }
4150                             break;
4151                         case NATIVE_EVENT_NEW_POS:
4152                             if (listener != null) {
4153                                 listener.onPeriodicNotification(track);
4154                             }
4155                             break;
4156                         default:
4157                             loge("Unknown native event type: " + msg.what);
4158                             break;
4159                         }
4160                     }
4161                 };
4162             } else {
4163                 mHandler = null;
4164             }
4165         }
4166 
getHandler()4167         Handler getHandler() {
4168             return mHandler;
4169         }
4170     }
4171 
4172     //---------------------------------------------------------
4173     // Methods for IPlayer interface
4174     //--------------------
4175     @Override
playerStart()4176     void playerStart() {
4177         play();
4178     }
4179 
4180     @Override
playerPause()4181     void playerPause() {
4182         pause();
4183     }
4184 
4185     @Override
playerStop()4186     void playerStop() {
4187         stop();
4188     }
4189 
4190     //---------------------------------------------------------
4191     // Java methods called from the native side
4192     //--------------------
4193     @SuppressWarnings("unused")
4194     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)4195     private static void postEventFromNative(Object audiotrack_ref,
4196             int what, int arg1, int arg2, Object obj) {
4197         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
4198         final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get();
4199         if (track == null) {
4200             return;
4201         }
4202 
4203         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
4204             track.broadcastRoutingChange();
4205             return;
4206         }
4207 
4208         if (what == NATIVE_EVENT_CODEC_FORMAT_CHANGE) {
4209             ByteBuffer buffer = (ByteBuffer) obj;
4210             buffer.order(ByteOrder.nativeOrder());
4211             buffer.rewind();
4212             AudioMetadataReadMap audioMetaData = AudioMetadata.fromByteBuffer(buffer);
4213             if (audioMetaData == null) {
4214                 Log.e(TAG, "Unable to get audio metadata from byte buffer");
4215                 return;
4216             }
4217             track.mCodecFormatChangedListeners.notify(0 /* eventCode, unused */, audioMetaData);
4218             return;
4219         }
4220 
4221         if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA
4222                 || what == NATIVE_EVENT_NEW_IAUDIOTRACK
4223                 || what == NATIVE_EVENT_STREAM_END) {
4224             track.handleStreamEventFromNative(what, arg1);
4225             return;
4226         }
4227 
4228         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
4229         if (delegate != null) {
4230             Handler handler = delegate.getHandler();
4231             if (handler != null) {
4232                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
4233                 handler.sendMessage(m);
4234             }
4235         }
4236     }
4237 
4238     //---------------------------------------------------------
4239     // Native methods called from the Java side
4240     //--------------------
4241 
native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)4242     private static native boolean native_is_direct_output_supported(int encoding, int sampleRate,
4243             int channelMask, int channelIndexMask, int contentType, int usage, int flags);
4244 
4245     // post-condition: mStreamType is overwritten with a value
4246     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
4247     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack, boolean offload, int encapsulationMode, Object tunerConfiguration, @NonNull String opPackageName)4248     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
4249             Object /*AudioAttributes*/ attributes,
4250             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
4251             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack,
4252             boolean offload, int encapsulationMode, Object tunerConfiguration,
4253             @NonNull String opPackageName);
4254 
native_finalize()4255     private native final void native_finalize();
4256 
4257     /**
4258      * @hide
4259      */
4260     @UnsupportedAppUsage
native_release()4261     public native final void native_release();
4262 
native_start()4263     private native final void native_start();
4264 
native_stop()4265     private native final void native_stop();
4266 
native_pause()4267     private native final void native_pause();
4268 
native_flush()4269     private native final void native_flush();
4270 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)4271     private native final int native_write_byte(byte[] audioData,
4272                                                int offsetInBytes, int sizeInBytes, int format,
4273                                                boolean isBlocking);
4274 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)4275     private native final int native_write_short(short[] audioData,
4276                                                 int offsetInShorts, int sizeInShorts, int format,
4277                                                 boolean isBlocking);
4278 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)4279     private native final int native_write_float(float[] audioData,
4280                                                 int offsetInFloats, int sizeInFloats, int format,
4281                                                 boolean isBlocking);
4282 
native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)4283     private native final int native_write_native_bytes(ByteBuffer audioData,
4284             int positionInBytes, int sizeInBytes, int format, boolean blocking);
4285 
native_reload_static()4286     private native final int native_reload_static();
4287 
native_get_buffer_size_frames()4288     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)4289     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()4290     private native final int native_get_buffer_capacity_frames();
4291 
native_setVolume(float leftVolume, float rightVolume)4292     private native final void native_setVolume(float leftVolume, float rightVolume);
4293 
native_set_playback_rate(int sampleRateInHz)4294     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()4295     private native final int native_get_playback_rate();
4296 
native_set_playback_params(@onNull PlaybackParams params)4297     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()4298     private native final @NonNull PlaybackParams native_get_playback_params();
4299 
native_set_marker_pos(int marker)4300     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()4301     private native final int native_get_marker_pos();
4302 
native_set_pos_update_period(int updatePeriod)4303     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()4304     private native final int native_get_pos_update_period();
4305 
native_set_position(int position)4306     private native final int native_set_position(int position);
native_get_position()4307     private native final int native_get_position();
4308 
native_get_latency()4309     private native final int native_get_latency();
4310 
native_get_underrun_count()4311     private native final int native_get_underrun_count();
4312 
native_get_flags()4313     private native final int native_get_flags();
4314 
4315     // longArray must be a non-null array of length >= 2
4316     // [0] is assigned the frame position
4317     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)4318     private native final int native_get_timestamp(long[] longArray);
4319 
native_set_loop(int start, int end, int loopCount)4320     private native final int native_set_loop(int start, int end, int loopCount);
4321 
native_get_output_sample_rate(int streamType)4322     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)4323     static private native final int native_get_min_buff_size(
4324             int sampleRateInHz, int channelConfig, int audioFormat);
4325 
native_attachAuxEffect(int effectId)4326     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)4327     private native final int native_setAuxEffectSendLevel(float level);
4328 
native_setOutputDevice(int deviceId)4329     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()4330     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()4331     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()4332     private native final void native_disableDeviceCallback();
4333 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)4334     private native int native_applyVolumeShaper(
4335             @NonNull VolumeShaper.Configuration configuration,
4336             @NonNull VolumeShaper.Operation operation);
4337 
native_getVolumeShaperState(int id)4338     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
native_setPresentation(int presentationId, int programId)4339     private native final int native_setPresentation(int presentationId, int programId);
4340 
native_getPortId()4341     private native int native_getPortId();
4342 
native_set_delay_padding(int delayInFrames, int paddingInFrames)4343     private native void native_set_delay_padding(int delayInFrames, int paddingInFrames);
4344 
native_set_audio_description_mix_level_db(float level)4345     private native int native_set_audio_description_mix_level_db(float level);
native_get_audio_description_mix_level_db(float[] level)4346     private native int native_get_audio_description_mix_level_db(float[] level);
native_set_dual_mono_mode(int dualMonoMode)4347     private native int native_set_dual_mono_mode(int dualMonoMode);
native_get_dual_mono_mode(int[] dualMonoMode)4348     private native int native_get_dual_mono_mode(int[] dualMonoMode);
native_setLogSessionId(@ullable String logSessionId)4349     private native void native_setLogSessionId(@Nullable String logSessionId);
native_setStartThresholdInFrames(int startThresholdInFrames)4350     private native int native_setStartThresholdInFrames(int startThresholdInFrames);
native_getStartThresholdInFrames()4351     private native int native_getStartThresholdInFrames();
4352 
4353     /**
4354      * Sets the audio service Player Interface Id.
4355      *
4356      * The playerIId does not change over the lifetime of the client
4357      * Java AudioTrack and is set automatically on creation.
4358      *
4359      * This call informs the native AudioTrack for metrics logging purposes.
4360      *
4361      * @param id the value reported by AudioManager when registering the track.
4362      *           A value of -1 indicates invalid - the playerIId was never set.
4363      * @throws IllegalStateException if AudioTrack not initialized.
4364      */
native_setPlayerIId(int playerIId)4365     private native void native_setPlayerIId(int playerIId);
4366 
4367     //---------------------------------------------------------
4368     // Utility methods
4369     //------------------
4370 
logd(String msg)4371     private static void logd(String msg) {
4372         Log.d(TAG, msg);
4373     }
4374 
loge(String msg)4375     private static void loge(String msg) {
4376         Log.e(TAG, msg);
4377     }
4378 
4379     public final static class MetricsConstants
4380     {
MetricsConstants()4381         private MetricsConstants() {}
4382 
4383         // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors.
4384         private static final String MM_PREFIX = "android.media.audiotrack.";
4385 
4386         /**
4387          * Key to extract the stream type for this track
4388          * from the {@link AudioTrack#getMetrics} return value.
4389          * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}.
4390          * The value is a {@code String}.
4391          */
4392         public static final String STREAMTYPE = MM_PREFIX + "streamtype";
4393 
4394         /**
4395          * Key to extract the attribute content type for this track
4396          * from the {@link AudioTrack#getMetrics} return value.
4397          * The value is a {@code String}.
4398          */
4399         public static final String CONTENTTYPE = MM_PREFIX + "type";
4400 
4401         /**
4402          * Key to extract the attribute usage for this track
4403          * from the {@link AudioTrack#getMetrics} return value.
4404          * The value is a {@code String}.
4405          */
4406         public static final String USAGE = MM_PREFIX + "usage";
4407 
4408         /**
4409          * Key to extract the sample rate for this track in Hz
4410          * from the {@link AudioTrack#getMetrics} return value.
4411          * The value is an {@code int}.
4412          * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead.
4413          */
4414         @Deprecated
4415         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
4416 
4417         /**
4418          * Key to extract the native channel mask information for this track
4419          * from the {@link AudioTrack#getMetrics} return value.
4420          *
4421          * The value is a {@code long}.
4422          * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from
4423          * the returned format instead.
4424          */
4425         @Deprecated
4426         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
4427 
4428         /**
4429          * Use for testing only. Do not expose.
4430          * The current sample rate.
4431          * The value is an {@code int}.
4432          * @hide
4433          */
4434         @TestApi
4435         public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate";
4436 
4437         /**
4438          * Use for testing only. Do not expose.
4439          * The native channel mask.
4440          * The value is a {@code long}.
4441          * @hide
4442          */
4443         @TestApi
4444         public static final String CHANNEL_MASK = MM_PREFIX + "channelMask";
4445 
4446         /**
4447          * Use for testing only. Do not expose.
4448          * The output audio data encoding.
4449          * The value is a {@code String}.
4450          * @hide
4451          */
4452         @TestApi
4453         public static final String ENCODING = MM_PREFIX + "encoding";
4454 
4455         /**
4456          * Use for testing only. Do not expose.
4457          * The port id of this track port in audioserver.
4458          * The value is an {@code int}.
4459          * @hide
4460          */
4461         @TestApi
4462         public static final String PORT_ID = MM_PREFIX + "portId";
4463 
4464         /**
4465          * Use for testing only. Do not expose.
4466          * The buffer frameCount.
4467          * The value is an {@code int}.
4468          * @hide
4469          */
4470         @TestApi
4471         public static final String FRAME_COUNT = MM_PREFIX + "frameCount";
4472 
4473         /**
4474          * Use for testing only. Do not expose.
4475          * The actual track attributes used.
4476          * The value is a {@code String}.
4477          * @hide
4478          */
4479         @TestApi
4480         public static final String ATTRIBUTES = MM_PREFIX + "attributes";
4481     }
4482 }
4483