• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import static android.media.AudioManager.AUDIO_SESSION_ID_GENERATE;
20 import static android.media.audio.Flags.FLAG_ROUTED_DEVICE_IDS;
21 
22 import android.annotation.CallbackExecutor;
23 import android.annotation.FlaggedApi;
24 import android.annotation.FloatRange;
25 import android.annotation.IntDef;
26 import android.annotation.IntRange;
27 import android.annotation.NonNull;
28 import android.annotation.Nullable;
29 import android.annotation.RequiresPermission;
30 import android.annotation.SystemApi;
31 import android.annotation.TestApi;
32 import android.compat.annotation.UnsupportedAppUsage;
33 import android.content.AttributionSource;
34 import android.content.AttributionSource.ScopedParcelState;
35 import android.content.Context;
36 import android.media.audiopolicy.AudioMix;
37 import android.media.audiopolicy.AudioMixingRule;
38 import android.media.audiopolicy.AudioPolicy;
39 import android.media.metrics.LogSessionId;
40 import android.os.Binder;
41 import android.os.Build;
42 import android.os.Handler;
43 import android.os.HandlerThread;
44 import android.os.Looper;
45 import android.os.Message;
46 import android.os.Parcel;
47 import android.os.PersistableBundle;
48 import android.util.ArrayMap;
49 import android.util.Log;
50 
51 import com.android.internal.annotations.GuardedBy;
52 
53 import java.lang.annotation.Retention;
54 import java.lang.annotation.RetentionPolicy;
55 import java.lang.ref.WeakReference;
56 import java.nio.ByteBuffer;
57 import java.nio.ByteOrder;
58 import java.nio.NioUtils;
59 import java.util.ArrayList;
60 import java.util.LinkedList;
61 import java.util.List;
62 import java.util.Map;
63 import java.util.Objects;
64 import java.util.concurrent.Executor;
65 
66 /**
67  * The AudioTrack class manages and plays a single audio resource for Java applications.
68  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
69  * achieved by "pushing" the data to the AudioTrack object using one of the
70  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
71  *  and {@link #write(float[], int, int, int)} methods.
72  *
73  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
74  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
75  * one of the {@code write()} methods. These are blocking and return when the data has been
76  * transferred from the Java layer to the native layer and queued for playback. The streaming
77  * mode is most useful when playing blocks of audio data that for instance are:
78  *
79  * <ul>
80  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
81  *   <li>too big to fit in memory because of the characteristics of the audio data
82  *         (high sampling rate, bits per sample ...)</li>
83  *   <li>received or generated while previously queued audio is playing.</li>
84  * </ul>
85  *
86  * The static mode should be chosen when dealing with short sounds that fit in memory and
87  * that need to be played with the smallest latency possible. The static mode will
88  * therefore be preferred for UI and game sounds that are played often, and with the
89  * smallest overhead possible.
90  *
91  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
92  * The size of this buffer, specified during the construction, determines how long an AudioTrack
93  * can play before running out of data.<br>
94  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
95  * be played from it.<br>
96  * For the streaming mode, data will be written to the audio sink in chunks of
97  * sizes less than or equal to the total buffer size.
98  *
99  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
100  */
101 public class AudioTrack extends PlayerBase
102                         implements AudioRouting
103                                  , VolumeAutomation
104 {
105     //---------------------------------------------------------
106     // Constants
107     //--------------------
108     /** Minimum value for a linear gain or auxiliary effect level.
109      *  This value must be exactly equal to 0.0f; do not change it.
110      */
111     private static final float GAIN_MIN = 0.0f;
112     /** Maximum value for a linear gain or auxiliary effect level.
113      *  This value must be greater than or equal to 1.0f.
114      */
115     private static final float GAIN_MAX = 1.0f;
116 
117     /** indicates AudioTrack state is stopped */
118     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
119     /** indicates AudioTrack state is paused */
120     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
121     /** indicates AudioTrack state is playing */
122     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
123     /**
124       * @hide
125       * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to
126       * transition to PLAYSTATE_STOPPED.
127       * Only valid for offload mode.
128       */
129     private static final int PLAYSTATE_STOPPING = 4;
130     /**
131       * @hide
132       * indicates AudioTrack state is paused from stopping state. Will transition to
133       * PLAYSTATE_STOPPING if play() is called.
134       * Only valid for offload mode.
135       */
136     private static final int PLAYSTATE_PAUSED_STOPPING = 5;
137 
138     // keep these values in sync with android_media_AudioTrack.cpp
139     /**
140      * Creation mode where audio data is transferred from Java to the native layer
141      * only once before the audio starts playing.
142      */
143     public static final int MODE_STATIC = 0;
144     /**
145      * Creation mode where audio data is streamed from Java to the native layer
146      * as the audio is playing.
147      */
148     public static final int MODE_STREAM = 1;
149 
150     /** @hide */
151     @IntDef({
152         MODE_STATIC,
153         MODE_STREAM
154     })
155     @Retention(RetentionPolicy.SOURCE)
156     public @interface TransferMode {}
157 
158     /**
159      * State of an AudioTrack that was not successfully initialized upon creation.
160      */
161     public static final int STATE_UNINITIALIZED = 0;
162     /**
163      * State of an AudioTrack that is ready to be used.
164      */
165     public static final int STATE_INITIALIZED   = 1;
166     /**
167      * State of a successfully initialized AudioTrack that uses static data,
168      * but that hasn't received that data yet.
169      */
170     public static final int STATE_NO_STATIC_DATA = 2;
171 
172     /**
173      * Denotes a successful operation.
174      */
175     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
176     /**
177      * Denotes a generic operation failure.
178      */
179     public  static final int ERROR                                 = AudioSystem.ERROR;
180     /**
181      * Denotes a failure due to the use of an invalid value.
182      */
183     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
184     /**
185      * Denotes a failure due to the improper use of a method.
186      */
187     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
188     /**
189      * An error code indicating that the object reporting it is no longer valid and needs to
190      * be recreated.
191      */
192     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
193     /**
194      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
195      * or immediately after start/ACTIVE.
196      * @hide
197      */
198     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
199 
200     // Error codes:
201     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
202     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
203     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
204     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
205     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
206     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
207 
208     // Events:
209     // to keep in sync with frameworks/av/include/media/AudioTrack.h
210     // Note: To avoid collisions with other event constants,
211     // do not define an event here that is the same value as
212     // AudioSystem.NATIVE_EVENT_ROUTING_CHANGE.
213 
214     /**
215      * Event id denotes when playback head has reached a previously set marker.
216      */
217     private static final int NATIVE_EVENT_MARKER  = 3;
218     /**
219      * Event id denotes when previously set update period has elapsed during playback.
220      */
221     private static final int NATIVE_EVENT_NEW_POS = 4;
222     /**
223      * Callback for more data
224      */
225     private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9;
226     /**
227      * IAudioTrack tear down for offloaded tracks
228      * TODO: when received, java AudioTrack must be released
229      */
230     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
231     /**
232      * Event id denotes when all the buffers queued in AF and HW are played
233      * back (after stop is called) for an offloaded track.
234      */
235     private static final int NATIVE_EVENT_STREAM_END = 7;
236     /**
237      * Event id denotes when the codec format changes.
238      *
239      * Note: Similar to a device routing change (AudioSystem.NATIVE_EVENT_ROUTING_CHANGE),
240      * this event comes from the AudioFlinger Thread / Output Stream management
241      * (not from buffer indications as above).
242      */
243     private static final int NATIVE_EVENT_CODEC_FORMAT_CHANGE = 100;
244 
245     private final static String TAG = "android.media.AudioTrack";
246 
247     /** @hide */
248     @IntDef({
249         ENCAPSULATION_MODE_NONE,
250         ENCAPSULATION_MODE_ELEMENTARY_STREAM,
251         // ENCAPSULATION_MODE_HANDLE, @SystemApi
252     })
253     @Retention(RetentionPolicy.SOURCE)
254     public @interface EncapsulationMode {}
255 
256     // Important: The ENCAPSULATION_MODE values must be kept in sync with native header files.
257     /**
258      * This mode indicates no metadata encapsulation,
259      * which is the default mode for sending audio data
260      * through {@code AudioTrack}.
261      */
262     public static final int ENCAPSULATION_MODE_NONE = 0;
263     /**
264      * This mode indicates metadata encapsulation with an elementary stream payload.
265      * Both compressed and PCM format is allowed.
266      */
267     public static final int ENCAPSULATION_MODE_ELEMENTARY_STREAM = 1;
268     /**
269      * This mode indicates metadata encapsulation with a handle payload
270      * and is set through {@link Builder#setEncapsulationMode(int)}.
271      * The handle is a 64 bit long, provided by the Tuner API
272      * in {@link android.os.Build.VERSION_CODES#R}.
273      * @hide
274      */
275     @SystemApi
276     @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
277     public static final int ENCAPSULATION_MODE_HANDLE = 2;
278 
279     /**
280      * Enumeration of metadata types permitted for use by
281      * encapsulation mode audio streams.
282      * @hide
283      */
284     @IntDef(prefix = {"ENCAPSULATION_METADATA_TYPE_"},
285             value =
286                     {
287                             ENCAPSULATION_METADATA_TYPE_NONE, /* reserved */
288                             ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER,
289                             ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR,
290                             ENCAPSULATION_METADATA_TYPE_SUPPLEMENTARY_AUDIO_PLACEMENT,
291                     })
292     @Retention(RetentionPolicy.SOURCE)
293     public @interface EncapsulationMetadataType {}
294 
295     /**
296      * Reserved do not use.
297      * @hide
298      */
299     public static final int ENCAPSULATION_METADATA_TYPE_NONE = 0; // reserved
300 
301     /**
302      * Encapsulation metadata type for framework tuner information.
303      *
304      * Refer to the Android Media TV Tuner API for details.
305      */
306     public static final int ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER = 1;
307 
308     /**
309      * Encapsulation metadata type for DVB AD descriptor.
310      *
311      * This metadata is formatted per ETSI TS 101 154 Table E.1: AD_descriptor.
312      */
313     public static final int ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR = 2;
314 
315     /**
316      * Encapsulation metadata type for placement of supplementary audio.
317      *
318      * A 32 bit integer constant, one of {@link #SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL}, {@link
319      * #SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT}, {@link #SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT}.
320      */
321     public static final int ENCAPSULATION_METADATA_TYPE_SUPPLEMENTARY_AUDIO_PLACEMENT = 3;
322 
323     /**
324      * Enumeration of supplementary audio placement types.
325      * @hide
326      */
327     @IntDef(prefix = {"SUPPLEMENTARY_AUDIO_PLACEMENT_"},
328             value =
329                     {
330                             SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL,
331                             SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT,
332                             SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT,
333                     })
334     @Retention(RetentionPolicy.SOURCE)
335     public @interface SupplementaryAudioPlacement {}
336     // Important: The SUPPLEMENTARY_AUDIO_PLACEMENT values must be kept in sync with native header
337     // files.
338 
339     /**
340      * Supplementary audio placement normal.
341      */
342     public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL = 0;
343 
344     /**
345      * Supplementary audio placement left.
346      */
347     public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT = 1;
348 
349     /**
350      * Supplementary audio placement right.
351      */
352     public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT = 2;
353 
354     /* Dual Mono handling is used when a stereo audio stream
355      * contains separate audio content on the left and right channels.
356      * Such information about the content of the stream may be found, for example, in
357      * ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
358      */
359     /** @hide */
360     @IntDef({
361         DUAL_MONO_MODE_OFF,
362         DUAL_MONO_MODE_LR,
363         DUAL_MONO_MODE_LL,
364         DUAL_MONO_MODE_RR,
365     })
366     @Retention(RetentionPolicy.SOURCE)
367     public @interface DualMonoMode {}
368     // Important: The DUAL_MONO_MODE values must be kept in sync with native header files.
369     /**
370      * This mode disables any Dual Mono presentation effect.
371      *
372      */
373     public static final int DUAL_MONO_MODE_OFF = 0;
374 
375     /**
376      * This mode indicates that a stereo stream should be presented
377      * with the left and right audio channels blended together
378      * and delivered to both channels.
379      *
380      * Behavior for non-stereo streams is implementation defined.
381      * A suggested guideline is that the left-right stereo symmetric
382      * channels are pairwise blended;
383      * the other channels such as center are left alone.
384      *
385      * The Dual Mono effect occurs before volume scaling.
386      */
387     public static final int DUAL_MONO_MODE_LR = 1;
388 
389     /**
390      * This mode indicates that a stereo stream should be presented
391      * with the left audio channel replicated into the right audio channel.
392      *
393      * Behavior for non-stereo streams is implementation defined.
394      * A suggested guideline is that all channels with left-right
395      * stereo symmetry will have the left channel position replicated
396      * into the right channel position.
397      * The center channels (with no left/right symmetry) or unbalanced
398      * channels are left alone.
399      *
400      * The Dual Mono effect occurs before volume scaling.
401      */
402     public static final int DUAL_MONO_MODE_LL = 2;
403 
404     /**
405      * This mode indicates that a stereo stream should be presented
406      * with the right audio channel replicated into the left audio channel.
407      *
408      * Behavior for non-stereo streams is implementation defined.
409      * A suggested guideline is that all channels with left-right
410      * stereo symmetry will have the right channel position replicated
411      * into the left channel position.
412      * The center channels (with no left/right symmetry) or unbalanced
413      * channels are left alone.
414      *
415      * The Dual Mono effect occurs before volume scaling.
416      */
417     public static final int DUAL_MONO_MODE_RR = 3;
418 
419     /** @hide */
420     @IntDef({
421         WRITE_BLOCKING,
422         WRITE_NON_BLOCKING
423     })
424     @Retention(RetentionPolicy.SOURCE)
425     public @interface WriteMode {}
426 
427     /**
428      * The write mode indicating the write operation will block until all data has been written,
429      * to be used as the actual value of the writeMode parameter in
430      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
431      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
432      * {@link #write(ByteBuffer, int, int, long)}.
433      */
434     public final static int WRITE_BLOCKING = 0;
435 
436     /**
437      * The write mode indicating the write operation will return immediately after
438      * queuing as much audio data for playback as possible without blocking,
439      * to be used as the actual value of the writeMode parameter in
440      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
441      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
442      * {@link #write(ByteBuffer, int, int, long)}.
443      */
444     public final static int WRITE_NON_BLOCKING = 1;
445 
446     /** @hide */
447     @IntDef({
448         PERFORMANCE_MODE_NONE,
449         PERFORMANCE_MODE_LOW_LATENCY,
450         PERFORMANCE_MODE_POWER_SAVING
451     })
452     @Retention(RetentionPolicy.SOURCE)
453     public @interface PerformanceMode {}
454 
455     /**
456      * Default performance mode for an {@link AudioTrack}.
457      */
458     public static final int PERFORMANCE_MODE_NONE = 0;
459 
460     /**
461      * Low latency performance mode for an {@link AudioTrack}.
462      * If the device supports it, this mode
463      * enables a lower latency path through to the audio output sink.
464      * Effects may no longer work with such an {@code AudioTrack} and
465      * the sample rate must match that of the output sink.
466      * <p>
467      * Applications should be aware that low latency requires careful
468      * buffer management, with smaller chunks of audio data written by each
469      * {@code write()} call.
470      * <p>
471      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
472      * {@code AudioTrack}'s actual buffer size may be too small.
473      * It is recommended that a fairly
474      * large buffer should be specified when the {@code AudioTrack} is created.
475      * Then the actual size can be reduced by calling
476      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
477      * by lowering it after each {@code write()} call until the audio glitches,
478      * which is detected by calling
479      * {@link #getUnderrunCount()}. Then the buffer size can be increased
480      * until there are no glitches.
481      * This tuning step should be done while playing silence.
482      * This technique provides a compromise between latency and glitch rate.
483      */
484     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
485 
486     /**
487      * Power saving performance mode for an {@link AudioTrack}.
488      * If the device supports it, this
489      * mode will enable a lower power path to the audio output sink.
490      * In addition, this lower power path typically will have
491      * deeper internal buffers and better underrun resistance,
492      * with a tradeoff of higher latency.
493      * <p>
494      * In this mode, applications should attempt to use a larger buffer size
495      * and deliver larger chunks of audio data per {@code write()} call.
496      * Use {@link #getBufferSizeInFrames()} to determine
497      * the actual buffer size of the {@code AudioTrack} as it may have increased
498      * to accommodate a deeper buffer.
499      */
500     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
501 
502     // keep in sync with system/media/audio/include/system/audio-base.h
503     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
504     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
505 
506     // Size of HW_AV_SYNC track AV header.
507     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
508 
509     //--------------------------------------------------------------------------
510     // Member variables
511     //--------------------
512     /**
513      * Indicates the state of the AudioTrack instance.
514      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
515      */
516     private int mState = STATE_UNINITIALIZED;
517     /**
518      * Indicates the play state of the AudioTrack instance.
519      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
520      */
521     private int mPlayState = PLAYSTATE_STOPPED;
522 
523     /**
524      * Indicates that we are expecting an end of stream callback following a call
525      * to setOffloadEndOfStream() in a gapless track transition context. The native track
526      * will be restarted automatically.
527      */
528     private boolean mOffloadEosPending = false;
529 
530     /**
531      * Lock to ensure mPlayState updates reflect the actual state of the object.
532      */
533     private final Object mPlayStateLock = new Object();
534     /**
535      * Sizes of the audio buffer.
536      * These values are set during construction and can be stale.
537      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
538      */
539     private int mNativeBufferSizeInBytes = 0;
540     private int mNativeBufferSizeInFrames = 0;
541     /**
542      * Handler for events coming from the native code.
543      */
544     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
545     /**
546      * Looper associated with the thread that creates the AudioTrack instance.
547      */
548     private final Looper mInitializationLooper;
549     /**
550      * The audio data source sampling rate in Hz.
551      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
552      */
553     private int mSampleRate; // initialized by all constructors via audioParamCheck()
554     /**
555      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
556      */
557     private int mChannelCount = 1;
558     /**
559      * The audio channel mask used for calling native AudioTrack
560      */
561     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
562 
563     /**
564      * The type of the audio stream to play. See
565      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
566      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
567      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
568      *   {@link AudioManager#STREAM_DTMF}.
569      */
570     @UnsupportedAppUsage
571     private int mStreamType = AudioManager.STREAM_MUSIC;
572 
573     /**
574      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
575      */
576     private int mDataLoadMode = MODE_STREAM;
577     /**
578      * The current channel position mask, as specified on AudioTrack creation.
579      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
580      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
581      */
582     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
583     /**
584      * The channel index mask if specified, otherwise 0.
585      */
586     private int mChannelIndexMask = 0;
587     /**
588      * The encoding of the audio samples.
589      * @see AudioFormat#ENCODING_PCM_8BIT
590      * @see AudioFormat#ENCODING_PCM_16BIT
591      * @see AudioFormat#ENCODING_PCM_FLOAT
592      */
593     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
594     /**
595      * The AudioAttributes used in configuration.
596      */
597     private AudioAttributes mConfiguredAudioAttributes;
598     /**
599      * Audio session ID
600      */
601     private int mSessionId = AUDIO_SESSION_ID_GENERATE;
602     /**
603      * HW_AV_SYNC track AV Sync Header
604      */
605     private ByteBuffer mAvSyncHeader = null;
606     /**
607      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
608      */
609     private int mAvSyncBytesRemaining = 0;
610     /**
611      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
612      */
613     private int mOffset = 0;
614     /**
615      * Indicates whether the track is intended to play in offload mode.
616      */
617     private boolean mOffloaded = false;
618     /**
619      * When offloaded track: delay for decoder in frames
620      */
621     private int mOffloadDelayFrames = 0;
622     /**
623      * When offloaded track: padding for decoder in frames
624      */
625     private int mOffloadPaddingFrames = 0;
626 
627     /**
628      * The log session id used for metrics.
629      * {@link LogSessionId#LOG_SESSION_ID_NONE} here means it is not set.
630      */
631     @NonNull private LogSessionId mLogSessionId = LogSessionId.LOG_SESSION_ID_NONE;
632 
633     private AudioPolicy mAudioPolicy;
634 
635     //--------------------------------
636     // Used exclusively by native code
637     //--------------------
638     /**
639      * @hide
640      * Accessed by native methods: provides access to C++ AudioTrack object.
641      */
642     @SuppressWarnings("unused")
643     @UnsupportedAppUsage
644     protected long mNativeTrackInJavaObj;
645     /**
646      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
647      * the native AudioTrack object, but not stored in it).
648      */
649     @SuppressWarnings("unused")
650     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
651     private long mJniData;
652 
653 
654     //--------------------------------------------------------------------------
655     // Constructor, Finalize
656     //--------------------
657     /**
658      * Class constructor.
659      * @param streamType the type of the audio stream. See
660      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
661      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
662      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
663      * @param sampleRateInHz the initial source sample rate expressed in Hz.
664      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
665      *   which is usually the sample rate of the sink.
666      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
667      * @param channelConfig describes the configuration of the audio channels.
668      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
669      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
670      * @param audioFormat the format in which the audio data is represented.
671      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
672      *   {@link AudioFormat#ENCODING_PCM_8BIT},
673      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
674      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
675      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
676      *   <p> If the track's creation mode is {@link #MODE_STATIC},
677      *   this is the maximum length sample, or audio clip, that can be played by this instance.
678      *   <p> If the track's creation mode is {@link #MODE_STREAM},
679      *   this should be the desired buffer size
680      *   for the <code>AudioTrack</code> to satisfy the application's
681      *   latency requirements.
682      *   If <code>bufferSizeInBytes</code> is less than the
683      *   minimum buffer size for the output sink, it is increased to the minimum
684      *   buffer size.
685      *   The method {@link #getBufferSizeInFrames()} returns the
686      *   actual size in frames of the buffer created, which
687      *   determines the minimum frequency to write
688      *   to the streaming <code>AudioTrack</code> to avoid underrun.
689      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
690      *   for an AudioTrack instance in streaming mode.
691      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
692      * @throws java.lang.IllegalArgumentException
693      * @deprecated use {@link Builder} or
694      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
695      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
696      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)697     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
698             int bufferSizeInBytes, int mode)
699     throws IllegalArgumentException {
700         this(streamType, sampleRateInHz, channelConfig, audioFormat,
701                 bufferSizeInBytes, mode, AUDIO_SESSION_ID_GENERATE);
702     }
703 
704     /**
705      * Class constructor with audio session. Use this constructor when the AudioTrack must be
706      * attached to a particular audio session. The primary use of the audio session ID is to
707      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
708      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
709      * and media players in the same session and not to the output mix.
710      * When an AudioTrack is created without specifying a session, it will create its own session
711      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
712      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
713      * session
714      * with all other media players or audio tracks in the same session, otherwise a new session
715      * will be created for this track if none is supplied.
716      * @param streamType the type of the audio stream. See
717      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
718      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
719      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
720      * @param sampleRateInHz the initial source sample rate expressed in Hz.
721      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
722      *   which is usually the sample rate of the sink.
723      * @param channelConfig describes the configuration of the audio channels.
724      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
725      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
726      * @param audioFormat the format in which the audio data is represented.
727      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
728      *   {@link AudioFormat#ENCODING_PCM_8BIT},
729      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
730      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
731      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
732      *   <p> If the track's creation mode is {@link #MODE_STATIC},
733      *   this is the maximum length sample, or audio clip, that can be played by this instance.
734      *   <p> If the track's creation mode is {@link #MODE_STREAM},
735      *   this should be the desired buffer size
736      *   for the <code>AudioTrack</code> to satisfy the application's
737      *   latency requirements.
738      *   If <code>bufferSizeInBytes</code> is less than the
739      *   minimum buffer size for the output sink, it is increased to the minimum
740      *   buffer size.
741      *   The method {@link #getBufferSizeInFrames()} returns the
742      *   actual size in frames of the buffer created, which
743      *   determines the minimum frequency to write
744      *   to the streaming <code>AudioTrack</code> to avoid underrun.
745      *   You can write data into this buffer in smaller chunks than this size.
746      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
747      *   for an AudioTrack instance in streaming mode.
748      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
749      * @param sessionId Id of audio session the AudioTrack must be attached to
750      * @throws java.lang.IllegalArgumentException
751      * @deprecated use {@link Builder} or
752      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
753      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
754      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)755     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
756             int bufferSizeInBytes, int mode, int sessionId)
757     throws IllegalArgumentException {
758         // mState already == STATE_UNINITIALIZED
759         this((new AudioAttributes.Builder())
760                     .setLegacyStreamType(streamType)
761                     .build(),
762                 (new AudioFormat.Builder())
763                     .setChannelMask(channelConfig)
764                     .setEncoding(audioFormat)
765                     .setSampleRate(sampleRateInHz)
766                     .build(),
767                 bufferSizeInBytes,
768                 mode, sessionId);
769         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
770     }
771 
772     /**
773      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
774      * @param attributes a non-null {@link AudioAttributes} instance.
775      * @param format a non-null {@link AudioFormat} instance describing the format of the data
776      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
777      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
778      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
779      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
780      *   <p> If the track's creation mode is {@link #MODE_STATIC},
781      *   this is the maximum length sample, or audio clip, that can be played by this instance.
782      *   <p> If the track's creation mode is {@link #MODE_STREAM},
783      *   this should be the desired buffer size
784      *   for the <code>AudioTrack</code> to satisfy the application's
785      *   latency requirements.
786      *   If <code>bufferSizeInBytes</code> is less than the
787      *   minimum buffer size for the output sink, it is increased to the minimum
788      *   buffer size.
789      *   The method {@link #getBufferSizeInFrames()} returns the
790      *   actual size in frames of the buffer created, which
791      *   determines the minimum frequency to write
792      *   to the streaming <code>AudioTrack</code> to avoid underrun.
793      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
794      *   for an AudioTrack instance in streaming mode.
795      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
796      * @param sessionId ID of audio session the AudioTrack must be attached to, or
797      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
798      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
799      *   construction.
800      * @throws IllegalArgumentException
801      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)802     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
803             int mode, int sessionId)
804                     throws IllegalArgumentException {
805         this(null /* context */, attributes, format, bufferSizeInBytes, mode, sessionId,
806                 false /*offload*/, ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
807     }
808 
AudioTrack(@ullable Context context, AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, @Nullable TunerConfiguration tunerConfiguration)809     private AudioTrack(@Nullable Context context, AudioAttributes attributes, AudioFormat format,
810             int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode,
811             @Nullable TunerConfiguration tunerConfiguration)
812                     throws IllegalArgumentException {
813         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
814         // mState already == STATE_UNINITIALIZED
815 
816         mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
817 
818         if (format == null) {
819             throw new IllegalArgumentException("Illegal null AudioFormat");
820         }
821 
822         // Check if we should enable deep buffer mode
823         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
824             mAttributes = new AudioAttributes.Builder(mAttributes)
825                 .replaceFlags((mAttributes.getAllFlags()
826                         | AudioAttributes.FLAG_DEEP_BUFFER)
827                         & ~AudioAttributes.FLAG_LOW_LATENCY)
828                 .build();
829         }
830 
831         // remember which looper is associated with the AudioTrack instantiation
832         Looper looper;
833         if ((looper = Looper.myLooper()) == null) {
834             looper = Looper.getMainLooper();
835         }
836 
837         int rate = format.getSampleRate();
838         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
839             rate = 0;
840         }
841 
842         int channelIndexMask = 0;
843         if ((format.getPropertySetMask()
844                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
845             channelIndexMask = format.getChannelIndexMask();
846         }
847         int channelMask = 0;
848         if ((format.getPropertySetMask()
849                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
850             channelMask = format.getChannelMask();
851         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
852             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
853                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
854         }
855         int encoding = AudioFormat.ENCODING_DEFAULT;
856         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
857             encoding = format.getEncoding();
858         }
859         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
860         mOffloaded = offload;
861         mStreamType = AudioSystem.STREAM_DEFAULT;
862 
863         audioBuffSizeCheck(bufferSizeInBytes);
864 
865         mInitializationLooper = looper;
866 
867         if (sessionId < 0) {
868             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
869         }
870 
871         int[] sampleRate = new int[] {mSampleRate};
872         int[] session = new int[1];
873         session[0] = resolvePlaybackSessionId(context, sessionId);
874 
875         AttributionSource attributionSource = context == null
876                 ? AttributionSource.myAttributionSource() : context.getAttributionSource();
877 
878         // native initialization
879         try (ScopedParcelState attributionSourceState = attributionSource.asScopedParcelState()) {
880             int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
881                     sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
882                     mNativeBufferSizeInBytes, mDataLoadMode, session,
883                     attributionSourceState.getParcel(), 0 /*nativeTrackInJavaObj*/, offload,
884                     encapsulationMode, tunerConfiguration, getCurrentOpPackageName());
885             if (initResult != SUCCESS) {
886                 loge("Error code " + initResult + " when initializing AudioTrack.");
887                 return; // with mState == STATE_UNINITIALIZED
888             }
889         }
890 
891         mSampleRate = sampleRate[0];
892         mSessionId = session[0];
893 
894         // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
895 
896         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
897             int frameSizeInBytes;
898             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
899                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
900             } else {
901                 frameSizeInBytes = 1;
902             }
903             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
904         }
905 
906         if (mDataLoadMode == MODE_STATIC) {
907             mState = STATE_NO_STATIC_DATA;
908         } else {
909             mState = STATE_INITIALIZED;
910         }
911 
912         baseRegisterPlayer(mSessionId);
913         native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
914     }
915 
916     /**
917      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
918      * the AudioTrackRoutingProxy subclass.
919      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
920      * (associated with an OpenSL ES player).
921      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
922      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
923      * it means that the OpenSL player interface hasn't been realized, so there is no native
924      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
925      * OpenSLES interface is realized.
926      */
AudioTrack(long nativeTrackInJavaObj)927     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
928         super(new AudioAttributes.Builder().build(),
929                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
930         // "final"s
931         mNativeTrackInJavaObj = 0;
932         mJniData = 0;
933 
934         // remember which looper is associated with the AudioTrack instantiation
935         Looper looper;
936         if ((looper = Looper.myLooper()) == null) {
937             looper = Looper.getMainLooper();
938         }
939         mInitializationLooper = looper;
940 
941         // other initialization...
942         if (nativeTrackInJavaObj != 0) {
943             baseRegisterPlayer(AudioSystem.AUDIO_SESSION_ALLOCATE);
944             deferred_connect(nativeTrackInJavaObj);
945         } else {
946             mState = STATE_UNINITIALIZED;
947         }
948     }
949 
950     /**
951      * @hide
952      */
953     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
deferred_connect(long nativeTrackInJavaObj)954     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
955         if (mState != STATE_INITIALIZED) {
956             // Note that for this native_setup, we are providing an already created/initialized
957             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
958             int[] session = { 0 };
959             int[] rates = { 0 };
960             try (ScopedParcelState attributionSourceState =
961                          AttributionSource.myAttributionSource().asScopedParcelState()) {
962                 int initResult = native_setup(new WeakReference<AudioTrack>(this),
963                         null /*mAttributes - NA*/,
964                         rates /*sampleRate - NA*/,
965                         0 /*mChannelMask - NA*/,
966                         0 /*mChannelIndexMask - NA*/,
967                         0 /*mAudioFormat - NA*/,
968                         0 /*mNativeBufferSizeInBytes - NA*/,
969                         0 /*mDataLoadMode - NA*/,
970                         session,
971                         attributionSourceState.getParcel(),
972                         nativeTrackInJavaObj,
973                         false /*offload*/,
974                         ENCAPSULATION_MODE_NONE,
975                         null /* tunerConfiguration */,
976                         "" /* opPackagename */);
977                 if (initResult != SUCCESS) {
978                     loge("Error code " + initResult + " when initializing AudioTrack.");
979                     return; // with mState == STATE_UNINITIALIZED
980                 }
981             }
982 
983             mSessionId = session[0];
984 
985             mState = STATE_INITIALIZED;
986         }
987     }
988 
989     /**
990      * TunerConfiguration is used to convey tuner information
991      * from the android.media.tv.Tuner API to AudioTrack construction.
992      *
993      * Use the Builder to construct the TunerConfiguration object,
994      * which is then used by the {@link AudioTrack.Builder} to create an AudioTrack.
995      * @hide
996      */
997     @SystemApi
998     public static class TunerConfiguration {
999         private final int mContentId;
1000         private final int mSyncId;
1001 
1002         /**
1003          * A special content id for {@link #TunerConfiguration(int, int)}
1004          * indicating audio is delivered
1005          * from an {@code AudioTrack} write, not tunneled from the tuner stack.
1006          */
1007         public static final int CONTENT_ID_NONE = 0;
1008 
1009         /**
1010          * Constructs a TunerConfiguration instance for use in {@link AudioTrack.Builder}
1011          *
1012          * @param contentId selects the audio stream to use.
1013          *     The contentId may be obtained from
1014          *     {@link android.media.tv.tuner.filter.Filter#getId()},
1015          *     such obtained id is always a positive number.
1016          *     If audio is to be delivered through an {@code AudioTrack} write
1017          *     then {@code CONTENT_ID_NONE} may be used.
1018          * @param syncId selects the clock to use for synchronization
1019          *     of audio with other streams such as video.
1020          *     The syncId may be obtained from
1021          *     {@link android.media.tv.tuner.Tuner#getAvSyncHwId()}.
1022          *     This is always a positive number.
1023          */
1024         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
TunerConfiguration( @ntRangefrom = 0) int contentId, @IntRange(from = 1)int syncId)1025         public TunerConfiguration(
1026                 @IntRange(from = 0) int contentId, @IntRange(from = 1)int syncId) {
1027             if (contentId < 0) {
1028                 throw new IllegalArgumentException(
1029                         "contentId " + contentId + " must be positive or CONTENT_ID_NONE");
1030             }
1031             if (syncId < 1) {
1032                 throw new IllegalArgumentException("syncId " + syncId + " must be positive");
1033             }
1034             mContentId = contentId;
1035             mSyncId = syncId;
1036         }
1037 
1038         /**
1039          * Returns the contentId.
1040          */
1041         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getContentId()1042         public @IntRange(from = 1) int getContentId() {
1043             return mContentId; // The Builder ensures this is > 0.
1044         }
1045 
1046         /**
1047          * Returns the syncId.
1048          */
1049         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getSyncId()1050         public @IntRange(from = 1) int getSyncId() {
1051             return mSyncId;  // The Builder ensures this is > 0.
1052         }
1053     }
1054 
1055     /**
1056      * Builder class for {@link AudioTrack} objects.
1057      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
1058      * attributes and audio format parameters, you indicate which of those vary from the default
1059      * behavior on the device.
1060      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
1061      * parameters, to be used by a new <code>AudioTrack</code> instance:
1062      *
1063      * <pre class="prettyprint">
1064      * AudioTrack player = new AudioTrack.Builder()
1065      *         .setAudioAttributes(new AudioAttributes.Builder()
1066      *                  .setUsage(AudioAttributes.USAGE_ALARM)
1067      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
1068      *                  .build())
1069      *         .setAudioFormat(new AudioFormat.Builder()
1070      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
1071      *                 .setSampleRate(44100)
1072      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1073      *                 .build())
1074      *         .setBufferSizeInBytes(minBuffSize)
1075      *         .build();
1076      * </pre>
1077      * <p>
1078      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
1079      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
1080      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
1081      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
1082      * {@link AudioFormat#ENCODING_PCM_16BIT}.
1083      * The sample rate will depend on the device actually selected for playback and can be queried
1084      * with {@link #getSampleRate()} method.
1085      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
1086      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
1087      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
1088      * <code>MODE_STREAM</code> will be used.
1089      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
1090      * be generated.
1091      * <br>Offload is false by default.
1092      */
1093     public static class Builder {
1094         private Context mContext;
1095         private AudioAttributes mAttributes;
1096         private AudioFormat mFormat;
1097         private int mBufferSizeInBytes;
1098         private int mEncapsulationMode = ENCAPSULATION_MODE_NONE;
1099         private int mSessionId = AUDIO_SESSION_ID_GENERATE;
1100         private int mMode = MODE_STREAM;
1101         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
1102         private boolean mOffload = false;
1103         private TunerConfiguration mTunerConfiguration;
1104         private int mCallRedirectionMode = AudioManager.CALL_REDIRECT_NONE;
1105 
1106         /**
1107          * Constructs a new Builder with the default values as described above.
1108          */
Builder()1109         public Builder() {
1110         }
1111 
1112         /**
1113          * Sets the context the track belongs to. This context will be used to pull information,
1114          * such as {@link android.content.AttributionSource} and device specific audio session ids,
1115          * which will be associated with the {@link AudioTrack}. However, the context itself will
1116          * not be retained by the {@link AudioTrack}.
1117          * @param context a non-null {@link Context} instance
1118          * @return the same Builder instance.
1119          */
setContext(@onNull Context context)1120         public @NonNull Builder setContext(@NonNull Context context) {
1121             mContext = Objects.requireNonNull(context);
1122             return this;
1123         }
1124 
1125         /**
1126          * Sets the {@link AudioAttributes}.
1127          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
1128          *     data to be played.
1129          * @return the same Builder instance.
1130          * @throws IllegalArgumentException
1131          */
setAudioAttributes(@onNull AudioAttributes attributes)1132         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
1133                 throws IllegalArgumentException {
1134             if (attributes == null) {
1135                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1136             }
1137             // keep reference, we only copy the data when building
1138             mAttributes = attributes;
1139             return this;
1140         }
1141 
1142         /**
1143          * Sets the format of the audio data to be played by the {@link AudioTrack}.
1144          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
1145          * as encoding, channel mask and sample rate.
1146          * @param format a non-null {@link AudioFormat} instance.
1147          * @return the same Builder instance.
1148          * @throws IllegalArgumentException
1149          */
setAudioFormat(@onNull AudioFormat format)1150         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
1151                 throws IllegalArgumentException {
1152             if (format == null) {
1153                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
1154             }
1155             // keep reference, we only copy the data when building
1156             mFormat = format;
1157             return this;
1158         }
1159 
1160         /**
1161          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
1162          * If using the {@link AudioTrack} in streaming mode
1163          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
1164          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
1165          * the estimated minimum buffer size for the creation of an AudioTrack instance
1166          * in streaming mode.
1167          * <br>If using the <code>AudioTrack</code> in static mode (see
1168          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
1169          * played by this instance.
1170          * @param bufferSizeInBytes
1171          * @return the same Builder instance.
1172          * @throws IllegalArgumentException
1173          */
setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)1174         public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes)
1175                 throws IllegalArgumentException {
1176             if (bufferSizeInBytes <= 0) {
1177                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
1178             }
1179             mBufferSizeInBytes = bufferSizeInBytes;
1180             return this;
1181         }
1182 
1183         /**
1184          * Sets the encapsulation mode.
1185          *
1186          * Encapsulation mode allows metadata to be sent together with
1187          * the audio data payload in a {@code ByteBuffer}.
1188          * This requires a compatible hardware audio codec.
1189          *
1190          * @param encapsulationMode one of {@link AudioTrack#ENCAPSULATION_MODE_NONE},
1191          *        or {@link AudioTrack#ENCAPSULATION_MODE_ELEMENTARY_STREAM}.
1192          * @return the same Builder instance.
1193          */
1194         // Note: with the correct permission {@code AudioTrack#ENCAPSULATION_MODE_HANDLE}
1195         // may be used as well.
setEncapsulationMode(@ncapsulationMode int encapsulationMode)1196         public @NonNull Builder setEncapsulationMode(@EncapsulationMode int encapsulationMode) {
1197             switch (encapsulationMode) {
1198                 case ENCAPSULATION_MODE_NONE:
1199                 case ENCAPSULATION_MODE_ELEMENTARY_STREAM:
1200                 case ENCAPSULATION_MODE_HANDLE:
1201                     mEncapsulationMode = encapsulationMode;
1202                     break;
1203                 default:
1204                     throw new IllegalArgumentException(
1205                             "Invalid encapsulation mode " + encapsulationMode);
1206             }
1207             return this;
1208         }
1209 
1210         /**
1211          * Sets the mode under which buffers of audio data are transferred from the
1212          * {@link AudioTrack} to the framework.
1213          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
1214          * @return the same Builder instance.
1215          * @throws IllegalArgumentException
1216          */
setTransferMode(@ransferMode int mode)1217         public @NonNull Builder setTransferMode(@TransferMode int mode)
1218                 throws IllegalArgumentException {
1219             switch(mode) {
1220                 case MODE_STREAM:
1221                 case MODE_STATIC:
1222                     mMode = mode;
1223                     break;
1224                 default:
1225                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
1226             }
1227             return this;
1228         }
1229 
1230         /**
1231          * Sets the session ID the {@link AudioTrack} will be attached to.
1232          *
1233          * Note, that if there's a device specific session id asociated with the context, explicitly
1234          * setting a session id using this method will override it
1235          * (see {@link Builder#setContext(Context)}).
1236          * @param sessionId a strictly positive ID number retrieved from another
1237          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
1238          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
1239          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
1240          * @return the same Builder instance.
1241          * @throws IllegalArgumentException
1242          */
setSessionId(@ntRangefrom = 1) int sessionId)1243         public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId)
1244                 throws IllegalArgumentException {
1245             if ((sessionId != AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
1246                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
1247             }
1248             mSessionId = sessionId;
1249             return this;
1250         }
1251 
1252         /**
1253          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
1254          * may not be supported by the particular device, and the framework is free
1255          * to ignore such request if it is incompatible with other requests or hardware.
1256          *
1257          * @param performanceMode one of
1258          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
1259          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1260          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1261          * @return the same Builder instance.
1262          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
1263          */
setPerformanceMode(@erformanceMode int performanceMode)1264         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
1265             switch (performanceMode) {
1266                 case PERFORMANCE_MODE_NONE:
1267                 case PERFORMANCE_MODE_LOW_LATENCY:
1268                 case PERFORMANCE_MODE_POWER_SAVING:
1269                     mPerformanceMode = performanceMode;
1270                     break;
1271                 default:
1272                     throw new IllegalArgumentException(
1273                             "Invalid performance mode " + performanceMode);
1274             }
1275             return this;
1276         }
1277 
1278         /**
1279          * Sets whether this track will play through the offloaded audio path.
1280          * When set to true, at build time, the audio format will be checked against
1281          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)}
1282          * to verify the audio format used by this track is supported on the device's offload
1283          * path (if any).
1284          * <br>Offload is only supported for media audio streams, and therefore requires that
1285          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
1286          * @param offload true to require the offload path for playback.
1287          * @return the same Builder instance.
1288          */
setOffloadedPlayback(boolean offload)1289         public @NonNull Builder setOffloadedPlayback(boolean offload) {
1290             mOffload = offload;
1291             return this;
1292         }
1293 
1294         /**
1295          * Sets the tuner configuration for the {@code AudioTrack}.
1296          *
1297          * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from
1298          * the Android TV tuner API which indicate the audio content stream id and the
1299          * synchronization id for the {@code AudioTrack}.
1300          *
1301          * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}.
1302          * @return the same Builder instance.
1303          * @hide
1304          */
1305         @SystemApi
1306         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
setTunerConfiguration( @onNull TunerConfiguration tunerConfiguration)1307         public @NonNull Builder setTunerConfiguration(
1308                 @NonNull TunerConfiguration tunerConfiguration) {
1309             if (tunerConfiguration == null) {
1310                 throw new IllegalArgumentException("tunerConfiguration is null");
1311             }
1312             mTunerConfiguration = tunerConfiguration;
1313             return this;
1314         }
1315 
1316         /**
1317          * @hide
1318          * Sets the {@link AudioTrack} call redirection mode.
1319          * Used when creating an AudioTrack to inject audio to call uplink path. The mode
1320          * indicates if the call is a PSTN call or a VoIP call in which case a dynamic audio
1321          * policy is created to use this track as the source for all capture with voice
1322          * communication preset.
1323          *
1324          * @param callRedirectionMode one of
1325          * {@link AudioManager#CALL_REDIRECT_NONE},
1326          * {@link AudioManager#CALL_REDIRECT_PSTN},
1327          * or {@link AAudioManager#CALL_REDIRECT_VOIP}.
1328          * @return the same Builder instance.
1329          * @throws IllegalArgumentException if {@code callRedirectionMode} is not valid.
1330          */
setCallRedirectionMode( @udioManager.CallRedirectionMode int callRedirectionMode)1331         public @NonNull Builder setCallRedirectionMode(
1332                 @AudioManager.CallRedirectionMode int callRedirectionMode) {
1333             switch (callRedirectionMode) {
1334                 case AudioManager.CALL_REDIRECT_NONE:
1335                 case AudioManager.CALL_REDIRECT_PSTN:
1336                 case AudioManager.CALL_REDIRECT_VOIP:
1337                     mCallRedirectionMode = callRedirectionMode;
1338                     break;
1339                 default:
1340                     throw new IllegalArgumentException(
1341                             "Invalid call redirection mode " + callRedirectionMode);
1342             }
1343             return this;
1344         }
1345 
buildCallInjectionTrack()1346         private @NonNull AudioTrack buildCallInjectionTrack() {
1347             AudioMixingRule audioMixingRule = new AudioMixingRule.Builder()
1348                     .addMixRule(AudioMixingRule.RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET,
1349                             new AudioAttributes.Builder()
1350                                    .setCapturePreset(MediaRecorder.AudioSource.VOICE_COMMUNICATION)
1351                                    .setForCallRedirection()
1352                                    .build())
1353                     .setTargetMixRole(AudioMixingRule.MIX_ROLE_INJECTOR)
1354                     .build();
1355             AudioMix audioMix = new AudioMix.Builder(audioMixingRule)
1356                     .setFormat(mFormat)
1357                     .setRouteFlags(AudioMix.ROUTE_FLAG_LOOP_BACK)
1358                     .build();
1359             AudioPolicy audioPolicy =
1360                     new AudioPolicy.Builder(/*context=*/ mContext).addMix(audioMix).build();
1361 
1362             if (AudioManager.registerAudioPolicyStatic(audioPolicy) != 0) {
1363                 throw new UnsupportedOperationException("Error: could not register audio policy");
1364             }
1365             AudioTrack track = audioPolicy.createAudioTrackSource(audioMix);
1366             if (track == null) {
1367                 throw new UnsupportedOperationException("Cannot create injection AudioTrack");
1368             }
1369             track.unregisterAudioPolicyOnRelease(audioPolicy);
1370             return track;
1371         }
1372 
1373         /**
1374          * Builds an {@link AudioTrack} instance initialized with all the parameters set
1375          * on this <code>Builder</code>.
1376          * @return a new successfully initialized {@link AudioTrack} instance.
1377          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
1378          *     were incompatible, or if they are not supported by the device,
1379          *     or if the device was not available.
1380          */
build()1381         public @NonNull AudioTrack build() throws UnsupportedOperationException {
1382             if (mAttributes == null) {
1383                 mAttributes = new AudioAttributes.Builder()
1384                         .setUsage(AudioAttributes.USAGE_MEDIA)
1385                         .build();
1386             }
1387             switch (mPerformanceMode) {
1388             case PERFORMANCE_MODE_LOW_LATENCY:
1389                 mAttributes = new AudioAttributes.Builder(mAttributes)
1390                     .replaceFlags((mAttributes.getAllFlags()
1391                             | AudioAttributes.FLAG_LOW_LATENCY)
1392                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
1393                     .build();
1394                 break;
1395             case PERFORMANCE_MODE_NONE:
1396                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
1397                     break; // do not enable deep buffer mode.
1398                 }
1399                 // permitted to fall through to enable deep buffer
1400             case PERFORMANCE_MODE_POWER_SAVING:
1401                 mAttributes = new AudioAttributes.Builder(mAttributes)
1402                 .replaceFlags((mAttributes.getAllFlags()
1403                         | AudioAttributes.FLAG_DEEP_BUFFER)
1404                         & ~AudioAttributes.FLAG_LOW_LATENCY)
1405                 .build();
1406                 break;
1407             }
1408 
1409             if (mFormat == null) {
1410                 mFormat = new AudioFormat.Builder()
1411                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1412                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
1413                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
1414                         .build();
1415             }
1416 
1417             if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_VOIP) {
1418                 return buildCallInjectionTrack();
1419             } else if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_PSTN) {
1420                 mAttributes = new AudioAttributes.Builder(mAttributes)
1421                         .setForCallRedirection()
1422                         .build();
1423             }
1424 
1425             if (mOffload) {
1426                 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) {
1427                     throw new UnsupportedOperationException(
1428                             "Offload and low latency modes are incompatible");
1429                 }
1430                 if (AudioSystem.getDirectPlaybackSupport(mFormat, mAttributes)
1431                         == AudioSystem.DIRECT_NOT_SUPPORTED) {
1432                     throw new UnsupportedOperationException(
1433                             "Cannot create AudioTrack, offload format / attributes not supported");
1434                 }
1435             }
1436 
1437             // TODO: Check mEncapsulationMode compatibility with MODE_STATIC, etc?
1438 
1439             // If the buffer size is not specified in streaming mode,
1440             // use a single frame for the buffer size and let the
1441             // native code figure out the minimum buffer size.
1442             if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
1443                 int bytesPerSample = 1;
1444                 if (AudioFormat.isEncodingLinearFrames(mFormat.getEncoding())) {
1445                     try {
1446                         bytesPerSample = mFormat.getBytesPerSample(mFormat.getEncoding());
1447                     } catch (IllegalArgumentException e) {
1448                         // do nothing
1449                     }
1450                 }
1451                 mBufferSizeInBytes = mFormat.getChannelCount() * bytesPerSample;
1452             }
1453 
1454             try {
1455                 final AudioTrack track = new AudioTrack(
1456                         mContext, mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId,
1457                         mOffload, mEncapsulationMode, mTunerConfiguration);
1458                 if (track.getState() == STATE_UNINITIALIZED) {
1459                     // release is not necessary
1460                     throw new UnsupportedOperationException("Cannot create AudioTrack");
1461                 }
1462                 return track;
1463             } catch (IllegalArgumentException e) {
1464                 throw new UnsupportedOperationException(e.getMessage());
1465             }
1466         }
1467     }
1468 
1469     /**
1470      * Sets an {@link AudioPolicy} to automatically unregister when the track is released.
1471      *
1472      * <p>This is to prevent users of the call audio injection API from having to manually
1473      * unregister the policy that was used to create the track.
1474      */
unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy)1475     private void unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy) {
1476         mAudioPolicy = audioPolicy;
1477     }
1478 
1479     /**
1480      * Configures the delay and padding values for the current compressed stream playing
1481      * in offload mode.
1482      * This can only be used on a track successfully initialized with
1483      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a
1484      * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed
1485      * stream corresponds to 200 decoded interleaved PCM samples.
1486      * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value
1487      *     of 0 indicates no delay is to be applied.
1488      * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0
1489      *     of 0 indicates no padding is to be applied.
1490      */
setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1491     public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames,
1492             @IntRange(from = 0) int paddingInFrames) {
1493         if (paddingInFrames < 0) {
1494             throw new IllegalArgumentException("Illegal negative padding");
1495         }
1496         if (delayInFrames < 0) {
1497             throw new IllegalArgumentException("Illegal negative delay");
1498         }
1499         if (!mOffloaded) {
1500             throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track");
1501         }
1502         if (mState == STATE_UNINITIALIZED) {
1503             throw new IllegalStateException("Uninitialized track");
1504         }
1505         mOffloadDelayFrames = delayInFrames;
1506         mOffloadPaddingFrames = paddingInFrames;
1507         native_set_delay_padding(delayInFrames, paddingInFrames);
1508     }
1509 
1510     /**
1511      * Return the decoder delay of an offloaded track, expressed in frames, previously set with
1512      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1513      * <p>This delay indicates the number of frames to be ignored at the beginning of the stream.
1514      * This value can only be queried on a track successfully initialized with
1515      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1516      * @return decoder delay expressed in frames.
1517      */
getOffloadDelay()1518     public @IntRange(from = 0) int getOffloadDelay() {
1519         if (!mOffloaded) {
1520             throw new IllegalStateException("Illegal query of delay on non-offloaded track");
1521         }
1522         if (mState == STATE_UNINITIALIZED) {
1523             throw new IllegalStateException("Illegal query of delay on uninitialized track");
1524         }
1525         return mOffloadDelayFrames;
1526     }
1527 
1528     /**
1529      * Return the decoder padding of an offloaded track, expressed in frames, previously set with
1530      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1531      * <p>This padding indicates the number of frames to be ignored at the end of the stream.
1532      * This value can only be queried on a track successfully initialized with
1533      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1534      * @return decoder padding expressed in frames.
1535      */
getOffloadPadding()1536     public @IntRange(from = 0) int getOffloadPadding() {
1537         if (!mOffloaded) {
1538             throw new IllegalStateException("Illegal query of padding on non-offloaded track");
1539         }
1540         if (mState == STATE_UNINITIALIZED) {
1541             throw new IllegalStateException("Illegal query of padding on uninitialized track");
1542         }
1543         return mOffloadPaddingFrames;
1544     }
1545 
1546     /**
1547      * Declares that the last write() operation on this track provided the last buffer of this
1548      * stream.
1549      * After the end of stream, previously set padding and delay values are ignored.
1550      * Can only be called only if the AudioTrack is opened in offload mode
1551      * {@see Builder#setOffloadedPlayback(boolean)}.
1552      * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING}
1553      * {@see #getPlayState()}.
1554      * Use this method in the same thread as any write() operation.
1555      */
setOffloadEndOfStream()1556     public void setOffloadEndOfStream() {
1557         if (!mOffloaded) {
1558             throw new IllegalStateException("EOS not supported on non-offloaded track");
1559         }
1560         if (mState == STATE_UNINITIALIZED) {
1561             throw new IllegalStateException("Uninitialized track");
1562         }
1563         if (mPlayState != PLAYSTATE_PLAYING) {
1564             throw new IllegalStateException("EOS not supported if not playing");
1565         }
1566         synchronized (mStreamEventCbLock) {
1567             if (mStreamEventCbInfoList.size() == 0) {
1568                 throw new IllegalStateException("EOS not supported without StreamEventCallback");
1569             }
1570         }
1571 
1572         synchronized (mPlayStateLock) {
1573             native_stop();
1574             mOffloadEosPending = true;
1575             mPlayState = PLAYSTATE_STOPPING;
1576         }
1577     }
1578 
1579     /**
1580      * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set
1581      * to {@code true}.
1582      * @return true if the track is using offloaded playback.
1583      */
isOffloadedPlayback()1584     public boolean isOffloadedPlayback() {
1585         return mOffloaded;
1586     }
1587 
1588     /**
1589      * Returns whether direct playback of an audio format with the provided attributes is
1590      * currently supported on the system.
1591      * <p>Direct playback means that the audio stream is not resampled or downmixed
1592      * by the framework. Checking for direct support can help the app select the representation
1593      * of audio content that most closely matches the capabilities of the device and peripherials
1594      * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded
1595      * or mixed with other streams, if needed.
1596      * <p>Also note that this query only provides information about the support of an audio format.
1597      * It does not indicate whether the resources necessary for the playback are available
1598      * at that instant.
1599      * @param format a non-null {@link AudioFormat} instance describing the format of
1600      *   the audio data.
1601      * @param attributes a non-null {@link AudioAttributes} instance.
1602      * @return true if the given audio format can be played directly.
1603      * @deprecated Use {@link AudioManager#getDirectPlaybackSupport(AudioFormat, AudioAttributes)}
1604      *             instead.
1605      */
1606     @Deprecated
isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1607     public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format,
1608             @NonNull AudioAttributes attributes) {
1609         if (format == null) {
1610             throw new IllegalArgumentException("Illegal null AudioFormat argument");
1611         }
1612         if (attributes == null) {
1613             throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1614         }
1615         return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(),
1616                 format.getChannelMask(), format.getChannelIndexMask(),
1617                 attributes.getContentType(), attributes.getUsage(), attributes.getFlags());
1618     }
1619 
1620     /*
1621      * The MAX_LEVEL should be exactly representable by an IEEE 754-2008 base32 float.
1622      * This means fractions must be divisible by a power of 2. For example,
1623      * 10.25f is OK as 0.25 is 1/4, but 10.1f is NOT OK as 1/10 is not expressable by
1624      * a finite binary fraction.
1625      *
1626      * 48.f is the nominal max for API level {@link android os.Build.VERSION_CODES#R}.
1627      * We use this to suggest a baseline range for implementation.
1628      *
1629      * The API contract specification allows increasing this value in a future
1630      * API release, but not decreasing this value.
1631      */
1632     private static final float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f;
1633 
isValidAudioDescriptionMixLevel(float level)1634     private static boolean isValidAudioDescriptionMixLevel(float level) {
1635         return !(Float.isNaN(level) || level > MAX_AUDIO_DESCRIPTION_MIX_LEVEL);
1636     }
1637 
1638     /**
1639      * Sets the Audio Description mix level in dB.
1640      *
1641      * For AudioTracks incorporating a secondary Audio Description stream
1642      * (where such contents may be sent through an Encapsulation Mode
1643      * other than {@link #ENCAPSULATION_MODE_NONE}).
1644      * or internally by a HW channel),
1645      * the level of mixing of the Audio Description to the Main Audio stream
1646      * is controlled by this method.
1647      *
1648      * Such mixing occurs <strong>prior</strong> to overall volume scaling.
1649      *
1650      * @param level a floating point value between
1651      *     {@code Float.NEGATIVE_INFINITY} to {@code +48.f},
1652      *     where {@code Float.NEGATIVE_INFINITY} means the Audio Description is not mixed
1653      *     and a level of {@code 0.f} means the Audio Description is mixed without scaling.
1654      * @return true on success, false on failure.
1655      */
setAudioDescriptionMixLeveldB( @loatRangeto = 48.f, toInclusive = true) float level)1656     public boolean setAudioDescriptionMixLeveldB(
1657             @FloatRange(to = 48.f, toInclusive = true) float level) {
1658         if (!isValidAudioDescriptionMixLevel(level)) {
1659             throw new IllegalArgumentException("level is out of range" + level);
1660         }
1661         return native_set_audio_description_mix_level_db(level) == SUCCESS;
1662     }
1663 
1664     /**
1665      * Returns the Audio Description mix level in dB.
1666      *
1667      * If Audio Description mixing is unavailable from the hardware device,
1668      * a value of {@code Float.NEGATIVE_INFINITY} is returned.
1669      *
1670      * @return the current Audio Description Mix Level in dB.
1671      *     A value of {@code Float.NEGATIVE_INFINITY} means
1672      *     that the audio description is not mixed or
1673      *     the hardware is not available.
1674      *     This should reflect the <strong>true</strong> internal device mix level;
1675      *     hence the application might receive any floating value
1676      *     except {@code Float.NaN}.
1677      */
getAudioDescriptionMixLeveldB()1678     public float getAudioDescriptionMixLeveldB() {
1679         float[] level = { Float.NEGATIVE_INFINITY };
1680         try {
1681             final int status = native_get_audio_description_mix_level_db(level);
1682             if (status != SUCCESS || Float.isNaN(level[0])) {
1683                 return Float.NEGATIVE_INFINITY;
1684             }
1685         } catch (Exception e) {
1686             return Float.NEGATIVE_INFINITY;
1687         }
1688         return level[0];
1689     }
1690 
isValidDualMonoMode(@ualMonoMode int dualMonoMode)1691     private static boolean isValidDualMonoMode(@DualMonoMode int dualMonoMode) {
1692         switch (dualMonoMode) {
1693             case DUAL_MONO_MODE_OFF:
1694             case DUAL_MONO_MODE_LR:
1695             case DUAL_MONO_MODE_LL:
1696             case DUAL_MONO_MODE_RR:
1697                 return true;
1698             default:
1699                 return false;
1700         }
1701     }
1702 
1703     /**
1704      * Sets the Dual Mono mode presentation on the output device.
1705      *
1706      * The Dual Mono mode is generally applied to stereo audio streams
1707      * where the left and right channels come from separate sources.
1708      *
1709      * For compressed audio, where the decoding is done in hardware,
1710      * Dual Mono presentation needs to be performed
1711      * by the hardware output device
1712      * as the PCM audio is not available to the framework.
1713      *
1714      * @param dualMonoMode one of {@link #DUAL_MONO_MODE_OFF},
1715      *     {@link #DUAL_MONO_MODE_LR},
1716      *     {@link #DUAL_MONO_MODE_LL},
1717      *     {@link #DUAL_MONO_MODE_RR}.
1718      *
1719      * @return true on success, false on failure if the output device
1720      *     does not support Dual Mono mode.
1721      */
setDualMonoMode(@ualMonoMode int dualMonoMode)1722     public boolean setDualMonoMode(@DualMonoMode int dualMonoMode) {
1723         if (!isValidDualMonoMode(dualMonoMode)) {
1724             throw new IllegalArgumentException(
1725                     "Invalid Dual Mono mode " + dualMonoMode);
1726         }
1727         return native_set_dual_mono_mode(dualMonoMode) == SUCCESS;
1728     }
1729 
1730     /**
1731      * Returns the Dual Mono mode presentation setting.
1732      *
1733      * If no Dual Mono presentation is available for the output device,
1734      * then {@link #DUAL_MONO_MODE_OFF} is returned.
1735      *
1736      * @return one of {@link #DUAL_MONO_MODE_OFF},
1737      *     {@link #DUAL_MONO_MODE_LR},
1738      *     {@link #DUAL_MONO_MODE_LL},
1739      *     {@link #DUAL_MONO_MODE_RR}.
1740      */
getDualMonoMode()1741     public @DualMonoMode int getDualMonoMode() {
1742         int[] dualMonoMode = { DUAL_MONO_MODE_OFF };
1743         try {
1744             final int status = native_get_dual_mono_mode(dualMonoMode);
1745             if (status != SUCCESS || !isValidDualMonoMode(dualMonoMode[0])) {
1746                 return DUAL_MONO_MODE_OFF;
1747             }
1748         } catch (Exception e) {
1749             return DUAL_MONO_MODE_OFF;
1750         }
1751         return dualMonoMode[0];
1752     }
1753 
1754     // mask of all the positional channels supported, however the allowed combinations
1755     // are further restricted by the matching left/right rule and
1756     // AudioSystem.OUT_CHANNEL_COUNT_MAX
1757     private static final int SUPPORTED_OUT_CHANNELS =
1758             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
1759             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
1760             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
1761             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
1762             AudioFormat.CHANNEL_OUT_BACK_LEFT |
1763             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
1764             AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER |
1765             AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER |
1766             AudioFormat.CHANNEL_OUT_BACK_CENTER |
1767             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
1768             AudioFormat.CHANNEL_OUT_SIDE_RIGHT |
1769             AudioFormat.CHANNEL_OUT_TOP_CENTER |
1770             AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT |
1771             AudioFormat.CHANNEL_OUT_TOP_FRONT_CENTER |
1772             AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT |
1773             AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT |
1774             AudioFormat.CHANNEL_OUT_TOP_BACK_CENTER |
1775             AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT |
1776             AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT |
1777             AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT |
1778             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT |
1779             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_CENTER |
1780             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT |
1781             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY_2 |
1782             AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT |
1783             AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT;
1784 
1785     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
1786     // power saving to be automatically enabled for an AudioTrack. Returns false if
1787     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1788     private static boolean shouldEnablePowerSaving(
1789             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
1790             int bufferSizeInBytes, int mode) {
1791         // If no attributes, OK
1792         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
1793         // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include
1794         // FLAG_DEEP_BUFFER because if set the request is explicit and
1795         // shouldEnablePowerSaving() should return false.
1796         final int flags = attributes.getAllFlags()
1797                 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY
1798                     | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON);
1799 
1800         if (attributes != null &&
1801                 (flags != 0  // cannot have any special flags
1802                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
1803                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
1804                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_SPEECH
1805                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
1806                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
1807             return false;
1808         }
1809 
1810         // Format must be fully specified and be linear pcm
1811         if (format == null
1812                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
1813                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
1814                 || !AudioFormat.isValidEncoding(format.getEncoding())
1815                 || format.getChannelCount() < 1) {
1816             return false;
1817         }
1818 
1819         // Mode must be streaming
1820         if (mode != MODE_STREAM) {
1821             return false;
1822         }
1823 
1824         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
1825         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
1826         if (bufferSizeInBytes != 0) {
1827             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
1828             final int MILLIS_PER_SECOND = 1000;
1829             final long bufferTargetSize =
1830                     BUFFER_TARGET_MODE_STREAM_MS
1831                     * format.getChannelCount()
1832                     * format.getBytesPerSample(format.getEncoding())
1833                     * format.getSampleRate()
1834                     / MILLIS_PER_SECOND;
1835             if (bufferSizeInBytes < bufferTargetSize) {
1836                 return false;
1837             }
1838         }
1839 
1840         return true;
1841     }
1842 
1843     // Convenience method for the constructor's parameter checks.
1844     // This is where constructor IllegalArgumentException-s are thrown
1845     // postconditions:
1846     //    mChannelCount is valid
1847     //    mChannelMask is valid
1848     //    mAudioFormat is valid
1849     //    mSampleRate is valid
1850     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1851     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1852                                  int audioFormat, int mode) {
1853         //--------------
1854         // sample rate, note these values are subject to change
1855         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1856                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1857                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1858             throw new IllegalArgumentException(sampleRateInHz
1859                     + "Hz is not a supported sample rate.");
1860         }
1861         mSampleRate = sampleRateInHz;
1862 
1863         if (audioFormat == AudioFormat.ENCODING_IEC61937
1864                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO
1865                 && AudioFormat.channelCountFromOutChannelMask(channelConfig) != 8) {
1866             Log.w(TAG, "ENCODING_IEC61937 is configured with channel mask as " + channelConfig
1867                     + ", which is not 2 or 8 channels");
1868         }
1869 
1870         //--------------
1871         // channel config
1872         mChannelConfiguration = channelConfig;
1873 
1874         switch (channelConfig) {
1875         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1876         case AudioFormat.CHANNEL_OUT_MONO:
1877         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1878             mChannelCount = 1;
1879             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1880             break;
1881         case AudioFormat.CHANNEL_OUT_STEREO:
1882         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1883             mChannelCount = 2;
1884             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1885             break;
1886         default:
1887             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1888                 mChannelCount = 0;
1889                 break; // channel index configuration only
1890             }
1891             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
1892                 throw new IllegalArgumentException(
1893                         "Unsupported channel mask configuration " + channelConfig
1894                         + " for encoding " + audioFormat);
1895             }
1896             mChannelMask = channelConfig;
1897             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1898         }
1899         // check the channel index configuration (if present)
1900         mChannelIndexMask = channelIndexMask;
1901         if (mChannelIndexMask != 0) {
1902             // As of S, we accept up to 24 channel index mask.
1903             final int fullIndexMask = (1 << AudioSystem.FCC_24) - 1;
1904             final int channelIndexCount = Integer.bitCount(channelIndexMask);
1905             final boolean accepted = (channelIndexMask & ~fullIndexMask) == 0
1906                     && (!AudioFormat.isEncodingLinearFrames(audioFormat)  // compressed OK
1907                             || channelIndexCount <= AudioSystem.OUT_CHANNEL_COUNT_MAX); // PCM
1908             if (!accepted) {
1909                 throw new IllegalArgumentException(
1910                         "Unsupported channel index mask configuration " + channelIndexMask
1911                         + " for encoding " + audioFormat);
1912             }
1913             if (mChannelCount == 0) {
1914                  mChannelCount = channelIndexCount;
1915             } else if (mChannelCount != channelIndexCount) {
1916                 throw new IllegalArgumentException("Channel count must match");
1917             }
1918         }
1919 
1920         //--------------
1921         // audio format
1922         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1923             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1924         }
1925 
1926         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1927             throw new IllegalArgumentException("Unsupported audio encoding.");
1928         }
1929         mAudioFormat = audioFormat;
1930 
1931         //--------------
1932         // audio load mode
1933         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1934                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1935             throw new IllegalArgumentException("Invalid mode.");
1936         }
1937         mDataLoadMode = mode;
1938     }
1939 
1940     // General pair map
1941     private static final Map<String, Integer> CHANNEL_PAIR_MAP = Map.of(
1942             "front", AudioFormat.CHANNEL_OUT_FRONT_LEFT
1943                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT,
1944             "back", AudioFormat.CHANNEL_OUT_BACK_LEFT
1945                     | AudioFormat.CHANNEL_OUT_BACK_RIGHT,
1946             "front of center", AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER
1947                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER,
1948             "side", AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT,
1949             "top front", AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT
1950                     | AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT,
1951             "top back", AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT
1952                     | AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT,
1953             "top side", AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT
1954                     | AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT,
1955             "bottom front", AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT
1956                     | AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT,
1957             "front wide", AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT
1958                     | AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT);
1959 
1960     /**
1961      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1962      * @param channelConfig the mask to validate
1963      * @return false if the AudioTrack can't be used with such a mask
1964      */
isMultichannelConfigSupported(int channelConfig, int encoding)1965     private static boolean isMultichannelConfigSupported(int channelConfig, int encoding) {
1966         // check for unsupported channels
1967         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1968             loge("Channel configuration features unsupported channels");
1969             return false;
1970         }
1971         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1972         final int channelCountLimit;
1973         try {
1974             channelCountLimit = AudioFormat.isEncodingLinearFrames(encoding)
1975                     ? AudioSystem.OUT_CHANNEL_COUNT_MAX  // PCM limited to OUT_CHANNEL_COUNT_MAX
1976                     : AudioSystem.FCC_24;                // Compressed limited to 24 channels
1977         } catch (IllegalArgumentException iae) {
1978             loge("Unsupported encoding " + iae);
1979             return false;
1980         }
1981         if (channelCount > channelCountLimit) {
1982             loge("Channel configuration contains too many channels for encoding "
1983                     + encoding + "(" + channelCount + " > " + channelCountLimit + ")");
1984             return false;
1985         }
1986         // check for unsupported multichannel combinations:
1987         // - FL/FR must be present
1988         // - L/R channels must be paired (e.g. no single L channel)
1989         final int frontPair =
1990                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1991         if ((channelConfig & frontPair) != frontPair) {
1992                 loge("Front channels must be present in multichannel configurations");
1993                 return false;
1994         }
1995         // Check all pairs to see that they are matched (front duplicated here).
1996         for (Map.Entry<String, Integer> e : CHANNEL_PAIR_MAP.entrySet()) {
1997             final int positionPair = e.getValue();
1998             if ((channelConfig & positionPair) != 0
1999                     && (channelConfig & positionPair) != positionPair) {
2000                 loge("Channel pair (" + e.getKey() + ") cannot be used independently");
2001                 return false;
2002             }
2003         }
2004         return true;
2005     }
2006 
2007 
2008     // Convenience method for the constructor's audio buffer size check.
2009     // preconditions:
2010     //    mChannelCount is valid
2011     //    mAudioFormat is valid
2012     // postcondition:
2013     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)2014     private void audioBuffSizeCheck(int audioBufferSize) {
2015         // NB: this section is only valid with PCM or IEC61937 data.
2016         //     To update when supporting compressed formats
2017         int frameSizeInBytes;
2018         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
2019             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
2020         } else {
2021             frameSizeInBytes = 1;
2022         }
2023         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
2024             throw new IllegalArgumentException("Invalid audio buffer size.");
2025         }
2026 
2027         mNativeBufferSizeInBytes = audioBufferSize;
2028         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
2029     }
2030 
2031 
2032     /**
2033      * Releases the native AudioTrack resources.
2034      */
release()2035     public void release() {
2036         synchronized (mStreamEventCbLock){
2037             endStreamEventHandling();
2038         }
2039         // even though native_release() stops the native AudioTrack, we need to stop
2040         // AudioTrack subclasses too.
2041         try {
2042             stop();
2043         } catch(IllegalStateException ise) {
2044             // don't raise an exception, we're releasing the resources.
2045         }
2046         if (mAudioPolicy != null) {
2047             AudioManager.unregisterAudioPolicyAsyncStatic(mAudioPolicy);
2048             mAudioPolicy = null;
2049         }
2050 
2051         baseRelease();
2052         native_release();
2053         synchronized (mPlayStateLock) {
2054             mState = STATE_UNINITIALIZED;
2055             mPlayState = PLAYSTATE_STOPPED;
2056             mPlayStateLock.notify();
2057         }
2058     }
2059 
2060     @Override
finalize()2061     protected void finalize() {
2062         tryToDisableNativeRoutingCallback();
2063         baseRelease();
2064         native_finalize();
2065     }
2066 
2067     //--------------------------------------------------------------------------
2068     // Getters
2069     //--------------------
2070     /**
2071      * Returns the minimum gain value, which is the constant 0.0.
2072      * Gain values less than 0.0 will be clamped to 0.0.
2073      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2074      * @return the minimum value, which is the constant 0.0.
2075      */
getMinVolume()2076     static public float getMinVolume() {
2077         return GAIN_MIN;
2078     }
2079 
2080     /**
2081      * Returns the maximum gain value, which is greater than or equal to 1.0.
2082      * Gain values greater than the maximum will be clamped to the maximum.
2083      * <p>The word "volume" in the API name is historical; this is actually a gain.
2084      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
2085      * corresponds to a gain of 0 dB (sample values left unmodified).
2086      * @return the maximum value, which is greater than or equal to 1.0.
2087      */
getMaxVolume()2088     static public float getMaxVolume() {
2089         return GAIN_MAX;
2090     }
2091 
2092     /**
2093      * Returns the configured audio source sample rate in Hz.
2094      * The initial source sample rate depends on the constructor parameters,
2095      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
2096      * If the constructor had a specific sample rate, then the initial sink sample rate is that
2097      * value.
2098      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
2099      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
2100      */
getSampleRate()2101     public int getSampleRate() {
2102         return mSampleRate;
2103     }
2104 
2105     /**
2106      * Returns the current playback sample rate rate in Hz.
2107      */
getPlaybackRate()2108     public int getPlaybackRate() {
2109         return native_get_playback_rate();
2110     }
2111 
2112     /**
2113      * Returns the current playback parameters.
2114      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
2115      * @return current {@link PlaybackParams}.
2116      * @throws IllegalStateException if track is not initialized.
2117      */
getPlaybackParams()2118     public @NonNull PlaybackParams getPlaybackParams() {
2119         return native_get_playback_params();
2120     }
2121 
2122     /**
2123      * Returns the {@link AudioAttributes} used in configuration.
2124      * If a {@code streamType} is used instead of an {@code AudioAttributes}
2125      * to configure the AudioTrack
2126      * (the use of {@code streamType} for configuration is deprecated),
2127      * then the {@code AudioAttributes}
2128      * equivalent to the {@code streamType} is returned.
2129      * @return The {@code AudioAttributes} used to configure the AudioTrack.
2130      * @throws IllegalStateException If the track is not initialized.
2131      */
getAudioAttributes()2132     public @NonNull AudioAttributes getAudioAttributes() {
2133         if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) {
2134             throw new IllegalStateException("track not initialized");
2135         }
2136         return mConfiguredAudioAttributes;
2137     }
2138 
2139     /**
2140      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
2141      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2142      */
getAudioFormat()2143     public int getAudioFormat() {
2144         return mAudioFormat;
2145     }
2146 
2147     /**
2148      * Returns the volume stream type of this AudioTrack.
2149      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
2150      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
2151      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
2152      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
2153      * {@link AudioManager#STREAM_ACCESSIBILITY}.
2154      */
getStreamType()2155     public int getStreamType() {
2156         return mStreamType;
2157     }
2158 
2159     /**
2160      * Returns the configured channel position mask.
2161      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
2162      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
2163      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
2164      * a channel index mask was used. Consider
2165      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
2166      * which contains both the channel position mask and the channel index mask.
2167      */
getChannelConfiguration()2168     public int getChannelConfiguration() {
2169         return mChannelConfiguration;
2170     }
2171 
2172     /**
2173      * Returns the configured <code>AudioTrack</code> format.
2174      * @return an {@link AudioFormat} containing the
2175      * <code>AudioTrack</code> parameters at the time of configuration.
2176      */
getFormat()2177     public @NonNull AudioFormat getFormat() {
2178         AudioFormat.Builder builder = new AudioFormat.Builder()
2179             .setSampleRate(mSampleRate)
2180             .setEncoding(mAudioFormat);
2181         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
2182             builder.setChannelMask(mChannelConfiguration);
2183         }
2184         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
2185             builder.setChannelIndexMask(mChannelIndexMask);
2186         }
2187         return builder.build();
2188     }
2189 
2190     /**
2191      * Returns the configured number of channels.
2192      */
getChannelCount()2193     public int getChannelCount() {
2194         return mChannelCount;
2195     }
2196 
2197     /**
2198      * Returns the state of the AudioTrack instance. This is useful after the
2199      * AudioTrack instance has been created to check if it was initialized
2200      * properly. This ensures that the appropriate resources have been acquired.
2201      * @see #STATE_UNINITIALIZED
2202      * @see #STATE_INITIALIZED
2203      * @see #STATE_NO_STATIC_DATA
2204      */
getState()2205     public int getState() {
2206         return mState;
2207     }
2208 
2209     /**
2210      * Returns the playback state of the AudioTrack instance.
2211      * @see #PLAYSTATE_STOPPED
2212      * @see #PLAYSTATE_PAUSED
2213      * @see #PLAYSTATE_PLAYING
2214      */
getPlayState()2215     public int getPlayState() {
2216         synchronized (mPlayStateLock) {
2217             switch (mPlayState) {
2218                 case PLAYSTATE_STOPPING:
2219                     return PLAYSTATE_PLAYING;
2220                 case PLAYSTATE_PAUSED_STOPPING:
2221                     return PLAYSTATE_PAUSED;
2222                 default:
2223                     return mPlayState;
2224             }
2225         }
2226     }
2227 
2228 
2229     /**
2230      * Returns the effective size of the <code>AudioTrack</code> buffer
2231      * that the application writes to.
2232      * <p> This will be less than or equal to the result of
2233      * {@link #getBufferCapacityInFrames()}.
2234      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
2235      * <p> If the track is subsequently routed to a different output sink, the buffer
2236      * size and capacity may enlarge to accommodate.
2237      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2238      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2239      * the size of the <code>AudioTrack</code> buffer in bytes.
2240      * <p> See also {@link AudioManager#getProperty(String)} for key
2241      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2242      * @return current size in frames of the <code>AudioTrack</code> buffer.
2243      * @throws IllegalStateException if track is not initialized.
2244      */
getBufferSizeInFrames()2245     public @IntRange (from = 0) int getBufferSizeInFrames() {
2246         return native_get_buffer_size_frames();
2247     }
2248 
2249     /**
2250      * Limits the effective size of the <code>AudioTrack</code> buffer
2251      * that the application writes to.
2252      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
2253      * If a blocking write is used then the write will block until the data
2254      * can fit within this limit.
2255      * <p>Changing this limit modifies the latency associated with
2256      * the buffer for this track. A smaller size will give lower latency
2257      * but there may be more glitches due to buffer underruns.
2258      * <p>The actual size used may not be equal to this requested size.
2259      * It will be limited to a valid range with a maximum of
2260      * {@link #getBufferCapacityInFrames()}.
2261      * It may also be adjusted slightly for internal reasons.
2262      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
2263      * will be returned.
2264      * <p>This method is supported for PCM audio at all API levels.
2265      * Compressed audio is supported in API levels 33 and above.
2266      * For compressed streams the size of a frame is considered to be exactly one byte.
2267      *
2268      * @param bufferSizeInFrames requested buffer size in frames
2269      * @return the actual buffer size in frames or an error code,
2270      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2271      * @throws IllegalStateException if track is not initialized.
2272      */
setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)2273     public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) {
2274         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
2275             return ERROR_INVALID_OPERATION;
2276         }
2277         if (bufferSizeInFrames < 0) {
2278             return ERROR_BAD_VALUE;
2279         }
2280         return native_set_buffer_size_frames(bufferSizeInFrames);
2281     }
2282 
2283     /**
2284      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
2285      *  <p> If the track's creation mode is {@link #MODE_STATIC},
2286      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
2287      *  A static track's frame count will not change.
2288      *  <p> If the track's creation mode is {@link #MODE_STREAM},
2289      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
2290      *  For streaming tracks, this value may be rounded up to a larger value if needed by
2291      *  the target output sink, and
2292      *  if the track is subsequently routed to a different output sink, the
2293      *  frame count may enlarge to accommodate.
2294      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2295      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2296      *  the size of the <code>AudioTrack</code> buffer in bytes.
2297      *  <p> See also {@link AudioManager#getProperty(String)} for key
2298      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2299      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
2300      *  @throws IllegalStateException if track is not initialized.
2301      */
getBufferCapacityInFrames()2302     public @IntRange (from = 0) int getBufferCapacityInFrames() {
2303         return native_get_buffer_capacity_frames();
2304     }
2305 
2306     /**
2307      * Sets the streaming start threshold for an <code>AudioTrack</code>.
2308      * <p> The streaming start threshold is the buffer level that the written audio
2309      * data must reach for audio streaming to start after {@link #play()} is called.
2310      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2311      *
2312      * @param startThresholdInFrames the desired start threshold.
2313      * @return the actual start threshold in frames value. This is
2314      *         an integer between 1 to the buffer capacity
2315      *         (see {@link #getBufferCapacityInFrames()}),
2316      *         and might change if the output sink changes after track creation.
2317      * @throws IllegalStateException if the track is not initialized or the
2318      *         track transfer mode is not {@link #MODE_STREAM}.
2319      * @throws IllegalArgumentException if startThresholdInFrames is not positive.
2320      * @see #getStartThresholdInFrames()
2321      */
setStartThresholdInFrames( @ntRange from = 1) int startThresholdInFrames)2322     public @IntRange(from = 1) int setStartThresholdInFrames(
2323             @IntRange (from = 1) int startThresholdInFrames) {
2324         if (mState != STATE_INITIALIZED) {
2325             throw new IllegalStateException("AudioTrack is not initialized");
2326         }
2327         if (mDataLoadMode != MODE_STREAM) {
2328             throw new IllegalStateException("AudioTrack must be a streaming track");
2329         }
2330         if (startThresholdInFrames < 1) {
2331             throw new IllegalArgumentException("startThresholdInFrames "
2332                     + startThresholdInFrames + " must be positive");
2333         }
2334         return native_setStartThresholdInFrames(startThresholdInFrames);
2335     }
2336 
2337     /**
2338      * Returns the streaming start threshold of the <code>AudioTrack</code>.
2339      * <p> The streaming start threshold is the buffer level that the written audio
2340      * data must reach for audio streaming to start after {@link #play()} is called.
2341      * When an <code>AudioTrack</code> is created, the streaming start threshold
2342      * is the buffer capacity in frames. If the buffer size in frames is reduced
2343      * by {@link #setBufferSizeInFrames(int)} to a value smaller than the start threshold
2344      * then that value will be used instead for the streaming start threshold.
2345      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2346      *
2347      * @return the current start threshold in frames value. This is
2348      *         an integer between 1 to the buffer capacity
2349      *         (see {@link #getBufferCapacityInFrames()}),
2350      *         and might change if the  output sink changes after track creation.
2351      * @throws IllegalStateException if the track is not initialized or the
2352      *         track is not {@link #MODE_STREAM}.
2353      * @see #setStartThresholdInFrames(int)
2354      */
getStartThresholdInFrames()2355     public @IntRange (from = 1) int getStartThresholdInFrames() {
2356         if (mState != STATE_INITIALIZED) {
2357             throw new IllegalStateException("AudioTrack is not initialized");
2358         }
2359         if (mDataLoadMode != MODE_STREAM) {
2360             throw new IllegalStateException("AudioTrack must be a streaming track");
2361         }
2362         return native_getStartThresholdInFrames();
2363     }
2364 
2365     /**
2366      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
2367      *  @return current size in frames of the <code>AudioTrack</code> buffer.
2368      *  @throws IllegalStateException
2369      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
2370      */
2371     @Deprecated
getNativeFrameCount()2372     protected int getNativeFrameCount() {
2373         return native_get_buffer_capacity_frames();
2374     }
2375 
2376     /**
2377      * Returns marker position expressed in frames.
2378      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
2379      * or zero if marker is disabled.
2380      */
getNotificationMarkerPosition()2381     public int getNotificationMarkerPosition() {
2382         return native_get_marker_pos();
2383     }
2384 
2385     /**
2386      * Returns the notification update period expressed in frames.
2387      * Zero means that no position update notifications are being delivered.
2388      */
getPositionNotificationPeriod()2389     public int getPositionNotificationPeriod() {
2390         return native_get_pos_update_period();
2391     }
2392 
2393     /**
2394      * Returns the playback head position expressed in frames.
2395      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
2396      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
2397      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
2398      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
2399      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
2400      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
2401      * the total number of frames played since reset,
2402      * <i>not</i> the current offset within the buffer.
2403      */
getPlaybackHeadPosition()2404     public int getPlaybackHeadPosition() {
2405         return native_get_position();
2406     }
2407 
2408     /**
2409      * Returns this track's estimated latency in milliseconds. This includes the latency due
2410      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
2411      *
2412      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
2413      * a better solution.
2414      * @hide
2415      */
2416     @UnsupportedAppUsage(trackingBug = 130237544)
getLatency()2417     public int getLatency() {
2418         return native_get_latency();
2419     }
2420 
2421     /**
2422      * Returns the number of underrun occurrences in the application-level write buffer
2423      * since the AudioTrack was created.
2424      * An underrun occurs if the application does not write audio
2425      * data quickly enough, causing the buffer to underflow
2426      * and a potential audio glitch or pop.
2427      * <p>
2428      * Underruns are less likely when buffer sizes are large.
2429      * It may be possible to eliminate underruns by recreating the AudioTrack with
2430      * a larger buffer.
2431      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
2432      * effective size of the buffer.
2433      */
getUnderrunCount()2434     public int getUnderrunCount() {
2435         return native_get_underrun_count();
2436     }
2437 
2438     /**
2439      * Returns the current performance mode of the {@link AudioTrack}.
2440      *
2441      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
2442      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
2443      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
2444      * Use {@link AudioTrack.Builder#setPerformanceMode}
2445      * in the {@link AudioTrack.Builder} to enable a performance mode.
2446      * @throws IllegalStateException if track is not initialized.
2447      */
getPerformanceMode()2448     public @PerformanceMode int getPerformanceMode() {
2449         final int flags = native_get_flags();
2450         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
2451             return PERFORMANCE_MODE_LOW_LATENCY;
2452         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
2453             return PERFORMANCE_MODE_POWER_SAVING;
2454         } else {
2455             return PERFORMANCE_MODE_NONE;
2456         }
2457     }
2458 
2459     /**
2460      *  Returns the output sample rate in Hz for the specified stream type.
2461      */
getNativeOutputSampleRate(int streamType)2462     static public int getNativeOutputSampleRate(int streamType) {
2463         return native_get_output_sample_rate(streamType);
2464     }
2465 
2466     /**
2467      * Returns the estimated minimum buffer size required for an AudioTrack
2468      * object to be created in the {@link #MODE_STREAM} mode.
2469      * The size is an estimate because it does not consider either the route or the sink,
2470      * since neither is known yet.  Note that this size doesn't
2471      * guarantee a smooth playback under load, and higher values should be chosen according to
2472      * the expected frequency at which the buffer will be refilled with additional data to play.
2473      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
2474      * to a higher value than the initial source sample rate, be sure to configure the buffer size
2475      * based on the highest planned sample rate.
2476      * @param sampleRateInHz the source sample rate expressed in Hz.
2477      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
2478      * @param channelConfig describes the configuration of the audio channels.
2479      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
2480      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
2481      * @param audioFormat the format in which the audio data is represented.
2482      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
2483      *   {@link AudioFormat#ENCODING_PCM_8BIT},
2484      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2485      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
2486      *   or {@link #ERROR} if unable to query for output properties,
2487      *   or the minimum buffer size expressed in bytes.
2488      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)2489     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
2490         int channelCount = 0;
2491         switch(channelConfig) {
2492         case AudioFormat.CHANNEL_OUT_MONO:
2493         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
2494             channelCount = 1;
2495             break;
2496         case AudioFormat.CHANNEL_OUT_STEREO:
2497         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
2498             channelCount = 2;
2499             break;
2500         default:
2501             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
2502                 loge("getMinBufferSize(): Invalid channel configuration.");
2503                 return ERROR_BAD_VALUE;
2504             } else {
2505                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
2506             }
2507         }
2508 
2509         if (!AudioFormat.isPublicEncoding(audioFormat)) {
2510             loge("getMinBufferSize(): Invalid audio format.");
2511             return ERROR_BAD_VALUE;
2512         }
2513 
2514         // sample rate, note these values are subject to change
2515         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
2516         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
2517                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
2518             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
2519             return ERROR_BAD_VALUE;
2520         }
2521 
2522         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
2523         if (size <= 0) {
2524             loge("getMinBufferSize(): error querying hardware");
2525             return ERROR;
2526         }
2527         else {
2528             return size;
2529         }
2530     }
2531 
2532     /**
2533      * Returns the audio session ID.
2534      *
2535      * @return the ID of the audio session this AudioTrack belongs to.
2536      */
getAudioSessionId()2537     public int getAudioSessionId() {
2538         return mSessionId;
2539     }
2540 
2541    /**
2542     * Poll for a timestamp on demand.
2543     * <p>
2544     * If you need to track timestamps during initial warmup or after a routing or mode change,
2545     * you should request a new timestamp periodically until the reported timestamps
2546     * show that the frame position is advancing, or until it becomes clear that
2547     * timestamps are unavailable for this route.
2548     * <p>
2549     * After the clock is advancing at a stable rate,
2550     * query for a new timestamp approximately once every 10 seconds to once per minute.
2551     * Calling this method more often is inefficient.
2552     * It is also counter-productive to call this method more often than recommended,
2553     * because the short-term differences between successive timestamp reports are not meaningful.
2554     * If you need a high-resolution mapping between frame position and presentation time,
2555     * consider implementing that at application level, based on low-resolution timestamps.
2556     * <p>
2557     * The audio data at the returned position may either already have been
2558     * presented, or may have not yet been presented but is committed to be presented.
2559     * It is not possible to request the time corresponding to a particular position,
2560     * or to request the (fractional) position corresponding to a particular time.
2561     * If you need such features, consider implementing them at application level.
2562     *
2563     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2564     *        and owned by caller.
2565     * @return true if a timestamp is available, or false if no timestamp is available.
2566     *         If a timestamp is available,
2567     *         the AudioTimestamp instance is filled in with a position in frame units, together
2568     *         with the estimated time when that frame was presented or is committed to
2569     *         be presented.
2570     *         In the case that no timestamp is available, any supplied instance is left unaltered.
2571     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
2572     *         or during and immediately after a route change.
2573     *         A timestamp is permanently unavailable for a given route if the route does not support
2574     *         timestamps.  In this case, the approximate frame position can be obtained
2575     *         using {@link #getPlaybackHeadPosition}.
2576     *         However, it may be useful to continue to query for
2577     *         timestamps occasionally, to recover after a route change.
2578     */
2579     // Add this text when the "on new timestamp" API is added:
2580     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)2581     public boolean getTimestamp(AudioTimestamp timestamp)
2582     {
2583         if (timestamp == null) {
2584             throw new IllegalArgumentException();
2585         }
2586         // It's unfortunate, but we have to either create garbage every time or use synchronized
2587         long[] longArray = new long[2];
2588         int ret = native_get_timestamp(longArray);
2589         if (ret != SUCCESS) {
2590             return false;
2591         }
2592         timestamp.framePosition = longArray[0];
2593         timestamp.nanoTime = longArray[1];
2594         return true;
2595     }
2596 
2597     /**
2598      * Poll for a timestamp on demand.
2599      * <p>
2600      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
2601      *
2602      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2603      *        and owned by caller.
2604      * @return {@link #SUCCESS} if a timestamp is available
2605      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
2606      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
2607      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
2608      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
2609      *         for the timestamp.
2610      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2611      *         needs to be recreated.
2612      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
2613      *         timestamps. In this case, the approximate frame position can be obtained
2614      *         using {@link #getPlaybackHeadPosition}.
2615      *
2616      *         The AudioTimestamp instance is filled in with a position in frame units, together
2617      *         with the estimated time when that frame was presented or is committed to
2618      *         be presented.
2619      * @hide
2620      */
2621      // Add this text when the "on new timestamp" API is added:
2622      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)2623      public int getTimestampWithStatus(AudioTimestamp timestamp)
2624      {
2625          if (timestamp == null) {
2626              throw new IllegalArgumentException();
2627          }
2628          // It's unfortunate, but we have to either create garbage every time or use synchronized
2629          long[] longArray = new long[2];
2630          int ret = native_get_timestamp(longArray);
2631          timestamp.framePosition = longArray[0];
2632          timestamp.nanoTime = longArray[1];
2633          return ret;
2634      }
2635 
2636     /**
2637      *  Return Metrics data about the current AudioTrack instance.
2638      *
2639      * @return a {@link PersistableBundle} containing the set of attributes and values
2640      * available for the media being handled by this instance of AudioTrack
2641      * The attributes are descibed in {@link MetricsConstants}.
2642      *
2643      * Additional vendor-specific fields may also be present in
2644      * the return value.
2645      */
getMetrics()2646     public PersistableBundle getMetrics() {
2647         PersistableBundle bundle = native_getMetrics();
2648         return bundle;
2649     }
2650 
native_getMetrics()2651     private native PersistableBundle native_getMetrics();
2652 
2653     //--------------------------------------------------------------------------
2654     // Initialization / configuration
2655     //--------------------
2656     /**
2657      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2658      * for each periodic playback head position update.
2659      * Notifications will be received in the same thread as the one in which the AudioTrack
2660      * instance was created.
2661      * @param listener
2662      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)2663     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
2664         setPlaybackPositionUpdateListener(listener, null);
2665     }
2666 
2667     /**
2668      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2669      * for each periodic playback head position update.
2670      * Use this method to receive AudioTrack events in the Handler associated with another
2671      * thread than the one in which you created the AudioTrack instance.
2672      * @param listener
2673      * @param handler the Handler that will receive the event notification messages.
2674      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)2675     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
2676                                                     Handler handler) {
2677         if (listener != null) {
2678             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
2679         } else {
2680             mEventHandlerDelegate = null;
2681         }
2682     }
2683 
2684 
clampGainOrLevel(float gainOrLevel)2685     private static float clampGainOrLevel(float gainOrLevel) {
2686         if (Float.isNaN(gainOrLevel)) {
2687             throw new IllegalArgumentException();
2688         }
2689         if (gainOrLevel < GAIN_MIN) {
2690             gainOrLevel = GAIN_MIN;
2691         } else if (gainOrLevel > GAIN_MAX) {
2692             gainOrLevel = GAIN_MAX;
2693         }
2694         return gainOrLevel;
2695     }
2696 
2697 
2698      /**
2699      * Sets the specified left and right output gain values on the AudioTrack.
2700      * <p>Gain values are clamped to the closed interval [0.0, max] where
2701      * max is the value of {@link #getMaxVolume}.
2702      * A value of 0.0 results in zero gain (silence), and
2703      * a value of 1.0 means unity gain (signal unchanged).
2704      * The default value is 1.0 meaning unity gain.
2705      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2706      * @param leftGain output gain for the left channel.
2707      * @param rightGain output gain for the right channel
2708      * @return error code or success, see {@link #SUCCESS},
2709      *    {@link #ERROR_INVALID_OPERATION}
2710      * @deprecated Applications should use {@link #setVolume} instead, as it
2711      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2712      */
2713     @Deprecated
setStereoVolume(float leftGain, float rightGain)2714     public int setStereoVolume(float leftGain, float rightGain) {
2715         if (mState == STATE_UNINITIALIZED) {
2716             return ERROR_INVALID_OPERATION;
2717         }
2718 
2719         baseSetVolume(leftGain, rightGain);
2720         return SUCCESS;
2721     }
2722 
2723     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)2724     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
2725         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
2726         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
2727 
2728         native_setVolume(leftVolume, rightVolume);
2729     }
2730 
2731 
2732     /**
2733      * Sets the specified output gain value on all channels of this track.
2734      * <p>Gain values are clamped to the closed interval [0.0, max] where
2735      * max is the value of {@link #getMaxVolume}.
2736      * A value of 0.0 results in zero gain (silence), and
2737      * a value of 1.0 means unity gain (signal unchanged).
2738      * The default value is 1.0 meaning unity gain.
2739      * <p>This API is preferred over {@link #setStereoVolume}, as it
2740      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2741      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2742      * @param gain output gain for all channels.
2743      * @return error code or success, see {@link #SUCCESS},
2744      *    {@link #ERROR_INVALID_OPERATION}
2745      */
setVolume(float gain)2746     public int setVolume(float gain) {
2747         return setStereoVolume(gain, gain);
2748     }
2749 
2750     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2751     /* package */ int playerApplyVolumeShaper(
2752             @NonNull VolumeShaper.Configuration configuration,
2753             @NonNull VolumeShaper.Operation operation) {
2754         return native_applyVolumeShaper(configuration, operation);
2755     }
2756 
2757     @Override
playerGetVolumeShaperState(int id)2758     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
2759         return native_getVolumeShaperState(id);
2760     }
2761 
2762     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2763     public @NonNull VolumeShaper createVolumeShaper(
2764             @NonNull VolumeShaper.Configuration configuration) {
2765         return new VolumeShaper(configuration, this);
2766     }
2767 
2768     /**
2769      * Sets the playback sample rate for this track. This sets the sampling rate at which
2770      * the audio data will be consumed and played back
2771      * (as set by the sampleRateInHz parameter in the
2772      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
2773      * not the original sampling rate of the
2774      * content. For example, setting it to half the sample rate of the content will cause the
2775      * playback to last twice as long, but will also result in a pitch shift down by one octave.
2776      * The valid sample rate range is from 1 Hz to twice the value returned by
2777      * {@link #getNativeOutputSampleRate(int)}.
2778      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
2779      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
2780      * for playback of content of differing sample rate,
2781      * but with identical encoding and channel mask.
2782      * @param sampleRateInHz the sample rate expressed in Hz
2783      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2784      *    {@link #ERROR_INVALID_OPERATION}
2785      */
setPlaybackRate(int sampleRateInHz)2786     public int setPlaybackRate(int sampleRateInHz) {
2787         if (mState != STATE_INITIALIZED) {
2788             return ERROR_INVALID_OPERATION;
2789         }
2790         if (sampleRateInHz <= 0) {
2791             return ERROR_BAD_VALUE;
2792         }
2793         return native_set_playback_rate(sampleRateInHz);
2794     }
2795 
2796 
2797     /**
2798      * Sets the playback parameters.
2799      * This method returns failure if it cannot apply the playback parameters.
2800      * One possible cause is that the parameters for speed or pitch are out of range.
2801      * Another possible cause is that the <code>AudioTrack</code> is streaming
2802      * (see {@link #MODE_STREAM}) and the
2803      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
2804      * on configuration must be larger than the speed multiplied by the minimum size
2805      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
2806      * @param params see {@link PlaybackParams}. In particular,
2807      * speed, pitch, and audio mode should be set.
2808      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
2809      * @throws IllegalStateException if track is not initialized.
2810      */
setPlaybackParams(@onNull PlaybackParams params)2811     public void setPlaybackParams(@NonNull PlaybackParams params) {
2812         if (params == null) {
2813             throw new IllegalArgumentException("params is null");
2814         }
2815         native_set_playback_params(params);
2816     }
2817 
2818 
2819     /**
2820      * Sets the position of the notification marker.  At most one marker can be active.
2821      * @param markerInFrames marker position in wrapping frame units similar to
2822      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
2823      * To set a marker at a position which would appear as zero due to wraparound,
2824      * a workaround is to use a non-zero position near zero, such as -1 or 1.
2825      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2826      *  {@link #ERROR_INVALID_OPERATION}
2827      */
setNotificationMarkerPosition(int markerInFrames)2828     public int setNotificationMarkerPosition(int markerInFrames) {
2829         if (mState == STATE_UNINITIALIZED) {
2830             return ERROR_INVALID_OPERATION;
2831         }
2832         return native_set_marker_pos(markerInFrames);
2833     }
2834 
2835 
2836     /**
2837      * Sets the period for the periodic notification event.
2838      * @param periodInFrames update period expressed in frames.
2839      * Zero period means no position updates.  A negative period is not allowed.
2840      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
2841      */
setPositionNotificationPeriod(int periodInFrames)2842     public int setPositionNotificationPeriod(int periodInFrames) {
2843         if (mState == STATE_UNINITIALIZED) {
2844             return ERROR_INVALID_OPERATION;
2845         }
2846         return native_set_pos_update_period(periodInFrames);
2847     }
2848 
2849 
2850     /**
2851      * Sets the playback head position within the static buffer.
2852      * The track must be stopped or paused for the position to be changed,
2853      * and must use the {@link #MODE_STATIC} mode.
2854      * @param positionInFrames playback head position within buffer, expressed in frames.
2855      * Zero corresponds to start of buffer.
2856      * The position must not be greater than the buffer size in frames, or negative.
2857      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
2858      * the position values have different meanings.
2859      * <br>
2860      * If looping is currently enabled and the new position is greater than or equal to the
2861      * loop end marker, the behavior varies by API level:
2862      * as of {@link android.os.Build.VERSION_CODES#M},
2863      * the looping is first disabled and then the position is set.
2864      * For earlier API levels, the behavior is unspecified.
2865      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2866      *    {@link #ERROR_INVALID_OPERATION}
2867      */
setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2868     public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) {
2869         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2870                 getPlayState() == PLAYSTATE_PLAYING) {
2871             return ERROR_INVALID_OPERATION;
2872         }
2873         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
2874             return ERROR_BAD_VALUE;
2875         }
2876         return native_set_position(positionInFrames);
2877     }
2878 
2879     /**
2880      * Sets the loop points and the loop count. The loop can be infinite.
2881      * Similarly to setPlaybackHeadPosition,
2882      * the track must be stopped or paused for the loop points to be changed,
2883      * and must use the {@link #MODE_STATIC} mode.
2884      * @param startInFrames loop start marker expressed in frames.
2885      * Zero corresponds to start of buffer.
2886      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
2887      * @param endInFrames loop end marker expressed in frames.
2888      * The total buffer size in frames corresponds to end of buffer.
2889      * The end marker must not be greater than the buffer size in frames.
2890      * For looping, the end marker must not be less than or equal to the start marker,
2891      * but to disable looping
2892      * it is permitted for start marker, end marker, and loop count to all be 0.
2893      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
2894      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
2895      * support,
2896      * {@link #ERROR_BAD_VALUE} is returned.
2897      * The loop range is the interval [startInFrames, endInFrames).
2898      * <br>
2899      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
2900      * unless it is greater than or equal to the loop end marker, in which case
2901      * it is forced to the loop start marker.
2902      * For earlier API levels, the effect on position is unspecified.
2903      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
2904      *    A value of -1 means infinite looping, and 0 disables looping.
2905      *    A value of positive N means to "loop" (go back) N times.  For example,
2906      *    a value of one means to play the region two times in total.
2907      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2908      *    {@link #ERROR_INVALID_OPERATION}
2909      */
setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2910     public int setLoopPoints(@IntRange (from = 0) int startInFrames,
2911             @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) {
2912         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2913                 getPlayState() == PLAYSTATE_PLAYING) {
2914             return ERROR_INVALID_OPERATION;
2915         }
2916         if (loopCount == 0) {
2917             ;   // explicitly allowed as an exception to the loop region range check
2918         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
2919                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
2920             return ERROR_BAD_VALUE;
2921         }
2922         return native_set_loop(startInFrames, endInFrames, loopCount);
2923     }
2924 
2925     /**
2926      * Sets the audio presentation.
2927      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
2928      * If a multi-stream decoder (MSD) is not present, or the format does not support
2929      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
2930      * {@link #ERROR} is returned in case of any other error.
2931      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
2932      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
2933      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2934      * @throws IllegalArgumentException if the audio presentation is null.
2935      * @throws IllegalStateException if track is not initialized.
2936      */
setPresentation(@onNull AudioPresentation presentation)2937     public int setPresentation(@NonNull AudioPresentation presentation) {
2938         if (presentation == null) {
2939             throw new IllegalArgumentException("audio presentation is null");
2940         }
2941         return native_setPresentation(presentation.getPresentationId(),
2942                 presentation.getProgramId());
2943     }
2944 
2945     /**
2946      * Sets the initialization state of the instance. This method was originally intended to be used
2947      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
2948      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
2949      * @param state the state of the AudioTrack instance
2950      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
2951      */
2952     @Deprecated
setState(int state)2953     protected void setState(int state) {
2954         mState = state;
2955     }
2956 
2957 
2958     //---------------------------------------------------------
2959     // Transport control methods
2960     //--------------------
2961     /**
2962      * Starts playing an AudioTrack.
2963      * <p>
2964      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
2965      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
2966      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
2967      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
2968      * play().
2969      * <p>
2970      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
2971      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
2972      * If you don't call write() first, or if you call write() but with an insufficient amount of
2973      * data, then the track will be in underrun state at play().  In this case,
2974      * playback will not actually start playing until the data path is filled to a
2975      * device-specific minimum level.  This requirement for the path to be filled
2976      * to a minimum level is also true when resuming audio playback after calling stop().
2977      * Similarly the buffer will need to be filled up again after
2978      * the track underruns due to failure to call write() in a timely manner with sufficient data.
2979      * For portability, an application should prime the data path to the maximum allowed
2980      * by writing data until the write() method returns a short transfer count.
2981      * This allows play() to start immediately, and reduces the chance of underrun.
2982      *<p>
2983      * As of {@link android.os.Build.VERSION_CODES#S} the minimum level to start playing
2984      * can be obtained using {@link #getStartThresholdInFrames()} and set with
2985      * {@link #setStartThresholdInFrames(int)}.
2986      *
2987      * @throws IllegalStateException if the track isn't properly initialized
2988      */
play()2989     public void play()
2990     throws IllegalStateException {
2991         if (mState != STATE_INITIALIZED) {
2992             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
2993         }
2994         //FIXME use lambda to pass startImpl to superclass
2995         final int delay = getStartDelayMs();
2996         if (delay == 0) {
2997             startImpl();
2998         } else {
2999             new Thread() {
3000                 public void run() {
3001                     try {
3002                         Thread.sleep(delay);
3003                     } catch (InterruptedException e) {
3004                         e.printStackTrace();
3005                     }
3006                     baseSetStartDelayMs(0);
3007                     try {
3008                         startImpl();
3009                     } catch (IllegalStateException e) {
3010                         // fail silently for a state exception when it is happening after
3011                         // a delayed start, as the player state could have changed between the
3012                         // call to start() and the execution of startImpl()
3013                     }
3014                 }
3015             }.start();
3016         }
3017     }
3018 
startImpl()3019     private void startImpl() {
3020         synchronized (mRoutingChangeListeners) {
3021             if (!mEnableSelfRoutingMonitor) {
3022                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
3023             }
3024         }
3025         synchronized(mPlayStateLock) {
3026             baseStart(new int[0]); // unknown device at this point
3027             native_start();
3028             // FIXME see b/179218630
3029             //baseStart(native_getRoutedDeviceId());
3030             if (mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3031                 mPlayState = PLAYSTATE_STOPPING;
3032             } else {
3033                 mPlayState = PLAYSTATE_PLAYING;
3034                 mOffloadEosPending = false;
3035             }
3036         }
3037     }
3038 
3039     /**
3040      * Stops playing the audio data.
3041      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
3042      * after the last buffer that was written has been played. For an immediate stop, use
3043      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
3044      * back yet.
3045      * @throws IllegalStateException
3046      */
stop()3047     public void stop()
3048     throws IllegalStateException {
3049         if (mState != STATE_INITIALIZED) {
3050             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
3051         }
3052 
3053         // stop playing
3054         synchronized(mPlayStateLock) {
3055             native_stop();
3056             baseStop();
3057             if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) {
3058                 mPlayState = PLAYSTATE_STOPPING;
3059             } else {
3060                 mPlayState = PLAYSTATE_STOPPED;
3061                 mOffloadEosPending = false;
3062                 mAvSyncHeader = null;
3063                 mAvSyncBytesRemaining = 0;
3064                 mPlayStateLock.notify();
3065             }
3066         }
3067         tryToDisableNativeRoutingCallback();
3068     }
3069 
3070     /**
3071      * Pauses the playback of the audio data. Data that has not been played
3072      * back will not be discarded. Subsequent calls to {@link #play} will play
3073      * this data back. See {@link #flush()} to discard this data.
3074      *
3075      * @throws IllegalStateException
3076      */
pause()3077     public void pause()
3078     throws IllegalStateException {
3079         if (mState != STATE_INITIALIZED) {
3080             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
3081         }
3082 
3083         // pause playback
3084         synchronized(mPlayStateLock) {
3085             native_pause();
3086             basePause();
3087             if (mPlayState == PLAYSTATE_STOPPING) {
3088                 mPlayState = PLAYSTATE_PAUSED_STOPPING;
3089             } else {
3090                 mPlayState = PLAYSTATE_PAUSED;
3091             }
3092         }
3093     }
3094 
3095 
3096     //---------------------------------------------------------
3097     // Audio data supply
3098     //--------------------
3099 
3100     /**
3101      * Flushes the audio data currently queued for playback. Any data that has
3102      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
3103      * or if the track's creation mode is not {@link #MODE_STREAM}.
3104      * <BR> Note that although data written but not yet presented is discarded, there is no
3105      * guarantee that all of the buffer space formerly used by that data
3106      * is available for a subsequent write.
3107      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
3108      * less than or equal to the total buffer size
3109      * may return a short actual transfer count.
3110      */
flush()3111     public void flush() {
3112         if (mState == STATE_INITIALIZED) {
3113             // flush the data in native layer
3114             native_flush();
3115             mAvSyncHeader = null;
3116             mAvSyncBytesRemaining = 0;
3117         }
3118 
3119     }
3120 
3121     /**
3122      * Writes the audio data to the audio sink for playback (streaming mode),
3123      * or copies audio data for later playback (static buffer mode).
3124      * The format specified in the AudioTrack constructor should be
3125      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
3126      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
3127      * <p>
3128      * In streaming mode, the write will normally block until all the data has been enqueued for
3129      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3130      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3131      * occurs during the write, then the write may return a short transfer count.
3132      * <p>
3133      * In static buffer mode, copies the data to the buffer starting at offset 0.
3134      * Note that the actual playback of this data might occur after this function returns.
3135      *
3136      * @param audioData the array that holds the data to play.
3137      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
3138      *    starts.
3139      *    Must not be negative, or cause the data access to go out of bounds of the array.
3140      * @param sizeInBytes the number of bytes to write in audioData after the offset.
3141      *    Must not be negative, or cause the data access to go out of bounds of the array.
3142      * @return zero or the positive number of bytes that were written, or one of the following
3143      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3144      *    not to exceed sizeInBytes.
3145      * <ul>
3146      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3147      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3148      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3149      *    needs to be recreated. The dead object error code is not returned if some data was
3150      *    successfully transferred. In this case, the error is returned at the next write()</li>
3151      * <li>{@link #ERROR} in case of other error</li>
3152      * </ul>
3153      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
3154      * set to  {@link #WRITE_BLOCKING}.
3155      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)3156     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
3157         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
3158     }
3159 
3160     /**
3161      * Writes the audio data to the audio sink for playback (streaming mode),
3162      * or copies audio data for later playback (static buffer mode).
3163      * The format specified in the AudioTrack constructor should be
3164      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
3165      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
3166      * <p>
3167      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3168      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3169      * for playback, and will return a full transfer count.  However, if the write mode is
3170      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3171      * interrupts the write by calling stop or pause, or an I/O error
3172      * occurs during the write, then the write may return a short transfer count.
3173      * <p>
3174      * In static buffer mode, copies the data to the buffer starting at offset 0,
3175      * and the write mode is ignored.
3176      * Note that the actual playback of this data might occur after this function returns.
3177      *
3178      * @param audioData the array that holds the data to play.
3179      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
3180      *    starts.
3181      *    Must not be negative, or cause the data access to go out of bounds of the array.
3182      * @param sizeInBytes the number of bytes to write in audioData after the offset.
3183      *    Must not be negative, or cause the data access to go out of bounds of the array.
3184      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3185      *     effect in static mode.
3186      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3187      *         to the audio sink.
3188      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3189      *     queuing as much audio data for playback as possible without blocking.
3190      * @return zero or the positive number of bytes that were written, or one of the following
3191      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3192      *    not to exceed sizeInBytes.
3193      * <ul>
3194      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3195      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3196      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3197      *    needs to be recreated. The dead object error code is not returned if some data was
3198      *    successfully transferred. In this case, the error is returned at the next write()</li>
3199      * <li>{@link #ERROR} in case of other error</li>
3200      * </ul>
3201      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)3202     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
3203             @WriteMode int writeMode) {
3204         // Note: we allow writes of extended integers and compressed formats from a byte array.
3205         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
3206             return ERROR_INVALID_OPERATION;
3207         }
3208 
3209         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3210             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3211             return ERROR_BAD_VALUE;
3212         }
3213 
3214         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
3215                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
3216                 || (offsetInBytes + sizeInBytes > audioData.length)) {
3217             return ERROR_BAD_VALUE;
3218         }
3219 
3220         if (!blockUntilOffloadDrain(writeMode)) {
3221             return 0;
3222         }
3223 
3224         final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
3225                 writeMode == WRITE_BLOCKING);
3226 
3227         if ((mDataLoadMode == MODE_STATIC)
3228                 && (mState == STATE_NO_STATIC_DATA)
3229                 && (ret > 0)) {
3230             // benign race with respect to other APIs that read mState
3231             mState = STATE_INITIALIZED;
3232         }
3233 
3234         return ret;
3235     }
3236 
3237     /**
3238      * Writes the audio data to the audio sink for playback (streaming mode),
3239      * or copies audio data for later playback (static buffer mode).
3240      * The format specified in the AudioTrack constructor should be
3241      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3242      * <p>
3243      * In streaming mode, the write will normally block until all the data has been enqueued for
3244      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3245      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3246      * occurs during the write, then the write may return a short transfer count.
3247      * <p>
3248      * In static buffer mode, copies the data to the buffer starting at offset 0.
3249      * Note that the actual playback of this data might occur after this function returns.
3250      *
3251      * @param audioData the array that holds the data to play.
3252      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
3253      *     starts.
3254      *    Must not be negative, or cause the data access to go out of bounds of the array.
3255      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3256      *    Must not be negative, or cause the data access to go out of bounds of the array.
3257      * @return zero or the positive number of shorts that were written, or one of the following
3258      *    error codes. The number of shorts will be a multiple of the channel count not to
3259      *    exceed sizeInShorts.
3260      * <ul>
3261      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3262      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3263      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3264      *    needs to be recreated. The dead object error code is not returned if some data was
3265      *    successfully transferred. In this case, the error is returned at the next write()</li>
3266      * <li>{@link #ERROR} in case of other error</li>
3267      * </ul>
3268      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
3269      * set to  {@link #WRITE_BLOCKING}.
3270      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)3271     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
3272         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
3273     }
3274 
3275     /**
3276      * Writes the audio data to the audio sink for playback (streaming mode),
3277      * or copies audio data for later playback (static buffer mode).
3278      * The format specified in the AudioTrack constructor should be
3279      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3280      * <p>
3281      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3282      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3283      * for playback, and will return a full transfer count.  However, if the write mode is
3284      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3285      * interrupts the write by calling stop or pause, or an I/O error
3286      * occurs during the write, then the write may return a short transfer count.
3287      * <p>
3288      * In static buffer mode, copies the data to the buffer starting at offset 0.
3289      * Note that the actual playback of this data might occur after this function returns.
3290      *
3291      * @param audioData the array that holds the data to write.
3292      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
3293      *     starts.
3294      *    Must not be negative, or cause the data access to go out of bounds of the array.
3295      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3296      *    Must not be negative, or cause the data access to go out of bounds of the array.
3297      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3298      *     effect in static mode.
3299      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3300      *         to the audio sink.
3301      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3302      *     queuing as much audio data for playback as possible without blocking.
3303      * @return zero or the positive number of shorts that were written, or one of the following
3304      *    error codes. The number of shorts will be a multiple of the channel count not to
3305      *    exceed sizeInShorts.
3306      * <ul>
3307      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3308      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3309      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3310      *    needs to be recreated. The dead object error code is not returned if some data was
3311      *    successfully transferred. In this case, the error is returned at the next write()</li>
3312      * <li>{@link #ERROR} in case of other error</li>
3313      * </ul>
3314      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)3315     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
3316             @WriteMode int writeMode) {
3317 
3318         if (mState == STATE_UNINITIALIZED
3319                 || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT
3320                 // use ByteBuffer or byte[] instead for later encodings
3321                 || mAudioFormat > AudioFormat.ENCODING_LEGACY_SHORT_ARRAY_THRESHOLD) {
3322             return ERROR_INVALID_OPERATION;
3323         }
3324 
3325         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3326             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3327             return ERROR_BAD_VALUE;
3328         }
3329 
3330         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
3331                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
3332                 || (offsetInShorts + sizeInShorts > audioData.length)) {
3333             return ERROR_BAD_VALUE;
3334         }
3335 
3336         if (!blockUntilOffloadDrain(writeMode)) {
3337             return 0;
3338         }
3339 
3340         final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
3341                 writeMode == WRITE_BLOCKING);
3342 
3343         if ((mDataLoadMode == MODE_STATIC)
3344                 && (mState == STATE_NO_STATIC_DATA)
3345                 && (ret > 0)) {
3346             // benign race with respect to other APIs that read mState
3347             mState = STATE_INITIALIZED;
3348         }
3349 
3350         return ret;
3351     }
3352 
3353     /**
3354      * Writes the audio data to the audio sink for playback (streaming mode),
3355      * or copies audio data for later playback (static buffer mode).
3356      * The format specified in the AudioTrack constructor should be
3357      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
3358      * <p>
3359      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3360      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3361      * for playback, and will return a full transfer count.  However, if the write mode is
3362      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3363      * interrupts the write by calling stop or pause, or an I/O error
3364      * occurs during the write, then the write may return a short transfer count.
3365      * <p>
3366      * In static buffer mode, copies the data to the buffer starting at offset 0,
3367      * and the write mode is ignored.
3368      * Note that the actual playback of this data might occur after this function returns.
3369      *
3370      * @param audioData the array that holds the data to write.
3371      *     The implementation does not clip for sample values within the nominal range
3372      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
3373      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
3374      *     that could add energy, such as reverb.  For the convenience of applications
3375      *     that compute samples using filters with non-unity gain,
3376      *     sample values +3 dB beyond the nominal range are permitted.
3377      *     However such values may eventually be limited or clipped, depending on various gains
3378      *     and later processing in the audio path.  Therefore applications are encouraged
3379      *     to provide samples values within the nominal range.
3380      * @param offsetInFloats the offset, expressed as a number of floats,
3381      *     in audioData where the data to write starts.
3382      *    Must not be negative, or cause the data access to go out of bounds of the array.
3383      * @param sizeInFloats the number of floats to write in audioData after the offset.
3384      *    Must not be negative, or cause the data access to go out of bounds of the array.
3385      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3386      *     effect in static mode.
3387      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3388      *         to the audio sink.
3389      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3390      *     queuing as much audio data for playback as possible without blocking.
3391      * @return zero or the positive number of floats that were written, or one of the following
3392      *    error codes. The number of floats will be a multiple of the channel count not to
3393      *    exceed sizeInFloats.
3394      * <ul>
3395      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3396      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3397      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3398      *    needs to be recreated. The dead object error code is not returned if some data was
3399      *    successfully transferred. In this case, the error is returned at the next write()</li>
3400      * <li>{@link #ERROR} in case of other error</li>
3401      * </ul>
3402      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)3403     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
3404             @WriteMode int writeMode) {
3405 
3406         if (mState == STATE_UNINITIALIZED) {
3407             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3408             return ERROR_INVALID_OPERATION;
3409         }
3410 
3411         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
3412             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
3413             return ERROR_INVALID_OPERATION;
3414         }
3415 
3416         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3417             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3418             return ERROR_BAD_VALUE;
3419         }
3420 
3421         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
3422                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
3423                 || (offsetInFloats + sizeInFloats > audioData.length)) {
3424             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
3425             return ERROR_BAD_VALUE;
3426         }
3427 
3428         if (!blockUntilOffloadDrain(writeMode)) {
3429             return 0;
3430         }
3431 
3432         final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
3433                 writeMode == WRITE_BLOCKING);
3434 
3435         if ((mDataLoadMode == MODE_STATIC)
3436                 && (mState == STATE_NO_STATIC_DATA)
3437                 && (ret > 0)) {
3438             // benign race with respect to other APIs that read mState
3439             mState = STATE_INITIALIZED;
3440         }
3441 
3442         return ret;
3443     }
3444 
3445 
3446     /**
3447      * Writes the audio data to the audio sink for playback (streaming mode),
3448      * or copies audio data for later playback (static buffer mode).
3449      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
3450      * <p>
3451      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3452      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3453      * for playback, and will return a full transfer count.  However, if the write mode is
3454      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3455      * interrupts the write by calling stop or pause, or an I/O error
3456      * occurs during the write, then the write may return a short transfer count.
3457      * <p>
3458      * In static buffer mode, copies the data to the buffer starting at offset 0,
3459      * and the write mode is ignored.
3460      * Note that the actual playback of this data might occur after this function returns.
3461      *
3462      * @param audioData the buffer that holds the data to write, starting at the position reported
3463      *     by <code>audioData.position()</code>.
3464      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3465      *     have been advanced to reflect the amount of data that was successfully written to
3466      *     the AudioTrack.
3467      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3468      *     that the number of bytes requested be a multiple of the frame size (sample size in
3469      *     bytes multiplied by the channel count).
3470      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3471      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3472      *     effect in static mode.
3473      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3474      *         to the audio sink.
3475      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3476      *     queuing as much audio data for playback as possible without blocking.
3477      * @return zero or the positive number of bytes that were written, or one of the following
3478      *    error codes.
3479      * <ul>
3480      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3481      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3482      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3483      *    needs to be recreated. The dead object error code is not returned if some data was
3484      *    successfully transferred. In this case, the error is returned at the next write()</li>
3485      * <li>{@link #ERROR} in case of other error</li>
3486      * </ul>
3487      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)3488     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3489             @WriteMode int writeMode) {
3490 
3491         if (mState == STATE_UNINITIALIZED) {
3492             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3493             return ERROR_INVALID_OPERATION;
3494         }
3495 
3496         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3497             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3498             return ERROR_BAD_VALUE;
3499         }
3500 
3501         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3502             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3503             return ERROR_BAD_VALUE;
3504         }
3505 
3506         if (!blockUntilOffloadDrain(writeMode)) {
3507             return 0;
3508         }
3509 
3510         int ret = 0;
3511         if (audioData.isDirect()) {
3512             ret = native_write_native_bytes(audioData,
3513                     audioData.position(), sizeInBytes, mAudioFormat,
3514                     writeMode == WRITE_BLOCKING);
3515         } else {
3516             ret = native_write_byte(NioUtils.unsafeArray(audioData),
3517                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
3518                     sizeInBytes, mAudioFormat,
3519                     writeMode == WRITE_BLOCKING);
3520         }
3521 
3522         if ((mDataLoadMode == MODE_STATIC)
3523                 && (mState == STATE_NO_STATIC_DATA)
3524                 && (ret > 0)) {
3525             // benign race with respect to other APIs that read mState
3526             mState = STATE_INITIALIZED;
3527         }
3528 
3529         if (ret > 0) {
3530             audioData.position(audioData.position() + ret);
3531         }
3532 
3533         return ret;
3534     }
3535 
3536     /**
3537      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
3538      * The blocking behavior will depend on the write mode.
3539      * @param audioData the buffer that holds the data to write, starting at the position reported
3540      *     by <code>audioData.position()</code>.
3541      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3542      *     have been advanced to reflect the amount of data that was successfully written to
3543      *     the AudioTrack.
3544      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3545      *     that the number of bytes requested be a multiple of the frame size (sample size in
3546      *     bytes multiplied by the channel count).
3547      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3548      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
3549      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3550      *         to the audio sink.
3551      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3552      *     queuing as much audio data for playback as possible without blocking.
3553      * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the
3554      *     provided audioData.
3555      * @return zero or the positive number of bytes that were written, or one of the following
3556      *    error codes.
3557      * <ul>
3558      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3559      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3560      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3561      *    needs to be recreated. The dead object error code is not returned if some data was
3562      *    successfully transferred. In this case, the error is returned at the next write()</li>
3563      * <li>{@link #ERROR} in case of other error</li>
3564      * </ul>
3565      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)3566     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3567             @WriteMode int writeMode, long timestamp) {
3568 
3569         if (mState == STATE_UNINITIALIZED) {
3570             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3571             return ERROR_INVALID_OPERATION;
3572         }
3573 
3574         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3575             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3576             return ERROR_BAD_VALUE;
3577         }
3578 
3579         if (mDataLoadMode != MODE_STREAM) {
3580             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
3581             return ERROR_INVALID_OPERATION;
3582         }
3583 
3584         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
3585             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
3586             return write(audioData, sizeInBytes, writeMode);
3587         }
3588 
3589         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3590             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3591             return ERROR_BAD_VALUE;
3592         }
3593 
3594         if (!blockUntilOffloadDrain(writeMode)) {
3595             return 0;
3596         }
3597 
3598         // create timestamp header if none exists
3599         if (mAvSyncHeader == null) {
3600             mAvSyncHeader = ByteBuffer.allocate(mOffset);
3601             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
3602             mAvSyncHeader.putInt(0x55550002);
3603         }
3604 
3605         if (mAvSyncBytesRemaining == 0) {
3606             mAvSyncHeader.putInt(4, sizeInBytes);
3607             mAvSyncHeader.putLong(8, timestamp);
3608             mAvSyncHeader.putInt(16, mOffset);
3609             mAvSyncHeader.position(0);
3610             mAvSyncBytesRemaining = sizeInBytes;
3611         }
3612 
3613         // write timestamp header if not completely written already
3614         int ret = 0;
3615         if (mAvSyncHeader.remaining() != 0) {
3616             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
3617             if (ret < 0) {
3618                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
3619                 mAvSyncHeader = null;
3620                 mAvSyncBytesRemaining = 0;
3621                 return ret;
3622             }
3623             if (mAvSyncHeader.remaining() > 0) {
3624                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
3625                 return 0;
3626             }
3627         }
3628 
3629         // write audio data
3630         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
3631         ret = write(audioData, sizeToWrite, writeMode);
3632         if (ret < 0) {
3633             Log.e(TAG, "AudioTrack.write() could not write audio data!");
3634             mAvSyncHeader = null;
3635             mAvSyncBytesRemaining = 0;
3636             return ret;
3637         }
3638 
3639         mAvSyncBytesRemaining -= ret;
3640 
3641         return ret;
3642     }
3643 
3644 
3645     /**
3646      * Sets the playback head position within the static buffer to zero,
3647      * that is it rewinds to start of static buffer.
3648      * The track must be stopped or paused, and
3649      * the track's creation mode must be {@link #MODE_STATIC}.
3650      * <p>
3651      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
3652      * {@link #getPlaybackHeadPosition()} to zero.
3653      * For earlier API levels, the reset behavior is unspecified.
3654      * <p>
3655      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
3656      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
3657      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
3658      *  {@link #ERROR_INVALID_OPERATION}
3659      */
reloadStaticData()3660     public int reloadStaticData() {
3661         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
3662             return ERROR_INVALID_OPERATION;
3663         }
3664         return native_reload_static();
3665     }
3666 
3667     /**
3668      * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is
3669      * received if blocking write or return with 0 frames written if non blocking mode.
3670      */
blockUntilOffloadDrain(int writeMode)3671     private boolean blockUntilOffloadDrain(int writeMode) {
3672         synchronized (mPlayStateLock) {
3673             while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3674                 if (writeMode == WRITE_NON_BLOCKING) {
3675                     return false;
3676                 }
3677                 try {
3678                     mPlayStateLock.wait();
3679                 } catch (InterruptedException e) {
3680                 }
3681             }
3682             return true;
3683         }
3684     }
3685 
3686     //--------------------------------------------------------------------------
3687     // Audio effects management
3688     //--------------------
3689 
3690     /**
3691      * Attaches an auxiliary effect to the audio track. A typical auxiliary
3692      * effect is a reverberation effect which can be applied on any sound source
3693      * that directs a certain amount of its energy to this effect. This amount
3694      * is defined by setAuxEffectSendLevel().
3695      * {@see #setAuxEffectSendLevel(float)}.
3696      * <p>After creating an auxiliary effect (e.g.
3697      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
3698      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
3699      * this method to attach the audio track to the effect.
3700      * <p>To detach the effect from the audio track, call this method with a
3701      * null effect id.
3702      *
3703      * @param effectId system wide unique id of the effect to attach
3704      * @return error code or success, see {@link #SUCCESS},
3705      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
3706      */
attachAuxEffect(int effectId)3707     public int attachAuxEffect(int effectId) {
3708         if (mState == STATE_UNINITIALIZED) {
3709             return ERROR_INVALID_OPERATION;
3710         }
3711         return native_attachAuxEffect(effectId);
3712     }
3713 
3714     /**
3715      * Sets the send level of the audio track to the attached auxiliary effect
3716      * {@link #attachAuxEffect(int)}.  Effect levels
3717      * are clamped to the closed interval [0.0, max] where
3718      * max is the value of {@link #getMaxVolume}.
3719      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
3720      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
3721      * this method must be called for the effect to be applied.
3722      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
3723      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
3724      * so an appropriate conversion from linear UI input x to level is:
3725      * x == 0 -&gt; level = 0
3726      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
3727      *
3728      * @param level linear send level
3729      * @return error code or success, see {@link #SUCCESS},
3730      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
3731      */
setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3732     public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) {
3733         if (mState == STATE_UNINITIALIZED) {
3734             return ERROR_INVALID_OPERATION;
3735         }
3736         return baseSetAuxEffectSendLevel(level);
3737     }
3738 
3739     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)3740     int playerSetAuxEffectSendLevel(boolean muting, float level) {
3741         level = clampGainOrLevel(muting ? 0.0f : level);
3742         int err = native_setAuxEffectSendLevel(level);
3743         return err == 0 ? SUCCESS : ERROR;
3744     }
3745 
3746     //--------------------------------------------------------------------------
3747     // Explicit Routing
3748     //--------------------
3749     private AudioDeviceInfo mPreferredDevice = null;
3750 
3751     /**
3752      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
3753      * the output from this AudioTrack.
3754      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
3755      *  If deviceInfo is null, default routing is restored.
3756      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
3757      * does not correspond to a valid audio output device.
3758      */
3759     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)3760     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
3761         // Do some validation....
3762         if (deviceInfo != null && !deviceInfo.isSink()) {
3763             return false;
3764         }
3765         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
3766         boolean status = native_setOutputDevice(preferredDeviceId);
3767         if (status == true) {
3768             synchronized (this) {
3769                 mPreferredDevice = deviceInfo;
3770             }
3771         }
3772         return status;
3773     }
3774 
3775     /**
3776      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
3777      * is not guaranteed to correspond to the actual device being used for playback.
3778      */
3779     @Override
getPreferredDevice()3780     public AudioDeviceInfo getPreferredDevice() {
3781         synchronized (this) {
3782             return mPreferredDevice;
3783         }
3784     }
3785 
3786     /**
3787      * Internal API of getRoutedDevices(). We should not call flag APIs internally.
3788      */
getRoutedDevicesInternal()3789     private @NonNull List<AudioDeviceInfo> getRoutedDevicesInternal() {
3790         List<AudioDeviceInfo> audioDeviceInfos = new ArrayList<AudioDeviceInfo>();
3791         final int[] deviceIds = native_getRoutedDeviceIds();
3792         if (deviceIds == null || deviceIds.length == 0) {
3793             return audioDeviceInfos;
3794         }
3795 
3796         for (int i = 0; i < deviceIds.length; i++) {
3797             AudioDeviceInfo audioDeviceInfo = AudioManager.getDeviceForPortId(deviceIds[i],
3798                     AudioManager.GET_DEVICES_OUTPUTS);
3799             if (audioDeviceInfo != null) {
3800                 audioDeviceInfos.add(audioDeviceInfo);
3801             }
3802         }
3803         return audioDeviceInfos;
3804     }
3805 
3806     /**
3807      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
3808      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3809      * <code>getRoutedDevice()</code> will return null.
3810      * Audio may play on multiple devices simultaneously (e.g. an alarm playing on headphones and
3811      * speaker on a phone), so prefer using {@link #getRoutedDevices}.
3812      */
3813     @Override
getRoutedDevice()3814     public AudioDeviceInfo getRoutedDevice() {
3815         final List<AudioDeviceInfo> audioDeviceInfos = getRoutedDevicesInternal();
3816         if (audioDeviceInfos.isEmpty()) {
3817             return null;
3818         }
3819         return audioDeviceInfos.get(0);
3820     }
3821 
3822     /**
3823      * Returns a List of {@link AudioDeviceInfo} identifying the current routing of this
3824      * AudioTrack.
3825      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3826      * <code>getRoutedDevices()</code> will return an empty list.
3827      */
3828     @Override
3829     @FlaggedApi(FLAG_ROUTED_DEVICE_IDS)
getRoutedDevices()3830     public @NonNull List<AudioDeviceInfo> getRoutedDevices() {
3831         return getRoutedDevicesInternal();
3832     }
3833 
tryToDisableNativeRoutingCallback()3834     private void tryToDisableNativeRoutingCallback() {
3835         synchronized (mRoutingChangeListeners) {
3836             if (mEnableSelfRoutingMonitor) {
3837                 mEnableSelfRoutingMonitor = false;
3838                 testDisableNativeRoutingCallbacksLocked();
3839             }
3840         }
3841     }
3842 
3843     /**
3844      * Call BEFORE adding a routing callback handler and when enabling self routing listener
3845      * @return returns true for success, false otherwise.
3846      */
3847     @GuardedBy("mRoutingChangeListeners")
testEnableNativeRoutingCallbacksLocked()3848     private boolean testEnableNativeRoutingCallbacksLocked() {
3849         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3850             try {
3851                 native_enableDeviceCallback();
3852                 return true;
3853             } catch (IllegalStateException e) {
3854                 if (Log.isLoggable(TAG, Log.DEBUG)) {
3855                     Log.d(TAG, "testEnableNativeRoutingCallbacks failed", e);
3856                 }
3857             }
3858         }
3859         return false;
3860     }
3861 
3862     /*
3863      * Call AFTER removing a routing callback handler and when disabling self routing listener.
3864      */
3865     @GuardedBy("mRoutingChangeListeners")
testDisableNativeRoutingCallbacksLocked()3866     private void testDisableNativeRoutingCallbacksLocked() {
3867         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3868             try {
3869                 native_disableDeviceCallback();
3870             } catch (IllegalStateException e) {
3871                 // Fail silently as track state could have changed in between stop
3872                 // and disabling routing callback
3873             }
3874         }
3875     }
3876 
3877     //--------------------------------------------------------------------------
3878     // (Re)Routing Info
3879     //--------------------
3880     /**
3881      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
3882      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
3883      * by an app to receive (re)routing notifications.
3884      */
3885     @GuardedBy("mRoutingChangeListeners")
3886     private ArrayMap<AudioRouting.OnRoutingChangedListener,
3887             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
3888 
3889     @GuardedBy("mRoutingChangeListeners")
3890     private boolean mEnableSelfRoutingMonitor;
3891 
3892    /**
3893     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
3894     * changes on this AudioTrack.
3895     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
3896     * notifications of rerouting events.
3897     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3898     * the callback. If <code>null</code>, the {@link Handler} associated with the main
3899     * {@link Looper} will be used.
3900     */
3901     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3902     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
3903             Handler handler) {
3904         synchronized (mRoutingChangeListeners) {
3905             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
3906                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
3907                 mRoutingChangeListeners.put(
3908                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
3909                                 handler != null ? handler : new Handler(mInitializationLooper)));
3910             }
3911         }
3912     }
3913 
3914     /**
3915      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
3916      * to receive rerouting notifications.
3917      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
3918      * to remove.
3919      */
3920     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3921     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
3922         synchronized (mRoutingChangeListeners) {
3923             if (mRoutingChangeListeners.containsKey(listener)) {
3924                 mRoutingChangeListeners.remove(listener);
3925             }
3926             testDisableNativeRoutingCallbacksLocked();
3927         }
3928     }
3929 
3930     //--------------------------------------------------------------------------
3931     // (Re)Routing Info
3932     //--------------------
3933     /**
3934      * Defines the interface by which applications can receive notifications of
3935      * routing changes for the associated {@link AudioTrack}.
3936      *
3937      * @deprecated users should switch to the general purpose
3938      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3939      */
3940     @Deprecated
3941     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
3942         /**
3943          * Called when the routing of an AudioTrack changes from either and
3944          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
3945          * retrieve the newly routed-to device.
3946          */
onRoutingChanged(AudioTrack audioTrack)3947         public void onRoutingChanged(AudioTrack audioTrack);
3948 
3949         @Override
onRoutingChanged(AudioRouting router)3950         default public void onRoutingChanged(AudioRouting router) {
3951             if (router instanceof AudioTrack) {
3952                 onRoutingChanged((AudioTrack) router);
3953             }
3954         }
3955     }
3956 
3957     /**
3958      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
3959      * on this AudioTrack.
3960      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
3961      * of rerouting events.
3962      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3963      * the callback. If <code>null</code>, the {@link Handler} associated with the main
3964      * {@link Looper} will be used.
3965      * @deprecated users should switch to the general purpose
3966      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3967      */
3968     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3969     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
3970             android.os.Handler handler) {
3971         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
3972     }
3973 
3974     /**
3975      * Removes an {@link OnRoutingChangedListener} which has been previously added
3976      * to receive rerouting notifications.
3977      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
3978      * @deprecated users should switch to the general purpose
3979      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3980      */
3981     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)3982     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
3983         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
3984     }
3985 
3986     /**
3987      * Sends device list change notification to all listeners.
3988      */
broadcastRoutingChange()3989     private void broadcastRoutingChange() {
3990         AudioManager.resetAudioPortGeneration();
3991         baseUpdateDeviceIds(getRoutedDevicesInternal());
3992         synchronized (mRoutingChangeListeners) {
3993             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
3994                 delegate.notifyClient();
3995             }
3996         }
3997     }
3998 
3999     //--------------------------------------------------------------------------
4000     // Codec notifications
4001     //--------------------
4002 
4003     // OnCodecFormatChangedListener notifications uses an instance
4004     // of ListenerList to manage its listeners.
4005 
4006     private final Utils.ListenerList<AudioMetadataReadMap> mCodecFormatChangedListeners =
4007             new Utils.ListenerList();
4008 
4009     /**
4010      * Interface definition for a listener for codec format changes.
4011      */
4012     public interface OnCodecFormatChangedListener {
4013         /**
4014          * Called when the compressed codec format changes.
4015          *
4016          * @param audioTrack is the {@code AudioTrack} instance associated with the codec.
4017          * @param info is a {@link AudioMetadataReadMap} of values which contains decoded format
4018          *     changes reported by the codec.  Not all hardware
4019          *     codecs indicate codec format changes. Acceptable keys are taken from
4020          *     {@code AudioMetadata.Format.KEY_*} range, with the associated value type.
4021          */
onCodecFormatChanged( @onNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info)4022         void onCodecFormatChanged(
4023                 @NonNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info);
4024     }
4025 
4026     /**
4027      * Adds an {@link OnCodecFormatChangedListener} to receive notifications of
4028      * codec format change events on this {@code AudioTrack}.
4029      *
4030      * @param executor  Specifies the {@link Executor} object to control execution.
4031      *
4032      * @param listener The {@link OnCodecFormatChangedListener} interface to receive
4033      *     notifications of codec events.
4034      */
addOnCodecFormatChangedListener( @onNull @allbackExecutor Executor executor, @NonNull OnCodecFormatChangedListener listener)4035     public void addOnCodecFormatChangedListener(
4036             @NonNull @CallbackExecutor Executor executor,
4037             @NonNull OnCodecFormatChangedListener listener) { // NPE checks done by ListenerList.
4038         mCodecFormatChangedListeners.add(
4039                 listener, /* key for removal */
4040                 executor,
4041                 (int eventCode, AudioMetadataReadMap readMap) -> {
4042                     // eventCode is unused by this implementation.
4043                     listener.onCodecFormatChanged(this, readMap);
4044                 }
4045         );
4046     }
4047 
4048     /**
4049      * Removes an {@link OnCodecFormatChangedListener} which has been previously added
4050      * to receive codec format change events.
4051      *
4052      * @param listener The previously added {@link OnCodecFormatChangedListener} interface
4053      * to remove.
4054      */
removeOnCodecFormatChangedListener( @onNull OnCodecFormatChangedListener listener)4055     public void removeOnCodecFormatChangedListener(
4056             @NonNull OnCodecFormatChangedListener listener) {
4057         mCodecFormatChangedListeners.remove(listener);  // NPE checks done by ListenerList.
4058     }
4059 
4060     //---------------------------------------------------------
4061     // Interface definitions
4062     //--------------------
4063     /**
4064      * Interface definition for a callback to be invoked when the playback head position of
4065      * an AudioTrack has reached a notification marker or has increased by a certain period.
4066      */
4067     public interface OnPlaybackPositionUpdateListener  {
4068         /**
4069          * Called on the listener to notify it that the previously set marker has been reached
4070          * by the playback head.
4071          */
onMarkerReached(AudioTrack track)4072         void onMarkerReached(AudioTrack track);
4073 
4074         /**
4075          * Called on the listener to periodically notify it that the playback head has reached
4076          * a multiple of the notification period.
4077          */
onPeriodicNotification(AudioTrack track)4078         void onPeriodicNotification(AudioTrack track);
4079     }
4080 
4081     /**
4082      * Abstract class to receive event notifications about the stream playback in offloaded mode.
4083      * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register
4084      * the callback on the given {@link AudioTrack} instance.
4085      */
4086     public abstract static class StreamEventCallback {
4087         /**
4088          * Called when an offloaded track is no longer valid and has been discarded by the system.
4089          * An example of this happening is when an offloaded track has been paused too long, and
4090          * gets invalidated by the system to prevent any other offload.
4091          * @param track the {@link AudioTrack} on which the event happened.
4092          */
onTearDown(@onNull AudioTrack track)4093         public void onTearDown(@NonNull AudioTrack track) { }
4094         /**
4095          * Called when all the buffers of an offloaded track that were queued in the audio system
4096          * (e.g. the combination of the Android audio framework and the device's audio hardware)
4097          * have been played after {@link AudioTrack#stop()} has been called.
4098          * @param track the {@link AudioTrack} on which the event happened.
4099          */
onPresentationEnded(@onNull AudioTrack track)4100         public void onPresentationEnded(@NonNull AudioTrack track) { }
4101         /**
4102          * Called when more audio data can be written without blocking on an offloaded track.
4103          * @param track the {@link AudioTrack} on which the event happened.
4104          * @param sizeInFrames the number of frames available to write without blocking.
4105          *   Note that the frame size of a compressed stream is 1 byte.
4106          */
onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)4107         public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) {
4108         }
4109     }
4110 
4111     /**
4112      * Registers a callback for the notification of stream events.
4113      * This callback can only be registered for instances operating in offloaded mode
4114      * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and
4115      * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for
4116      * more details).
4117      * @param executor {@link Executor} to handle the callbacks.
4118      * @param eventCallback the callback to receive the stream event notifications.
4119      */
registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)4120     public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
4121             @NonNull StreamEventCallback eventCallback) {
4122         if (eventCallback == null) {
4123             throw new IllegalArgumentException("Illegal null StreamEventCallback");
4124         }
4125         if (!mOffloaded) {
4126             throw new IllegalStateException(
4127                     "Cannot register StreamEventCallback on non-offloaded AudioTrack");
4128         }
4129         if (executor == null) {
4130             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
4131         }
4132         synchronized (mStreamEventCbLock) {
4133             // check if eventCallback already in list
4134             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
4135                 if (seci.mStreamEventCb == eventCallback) {
4136                     throw new IllegalArgumentException(
4137                             "StreamEventCallback already registered");
4138                 }
4139             }
4140             beginStreamEventHandling();
4141             mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback));
4142         }
4143     }
4144 
4145     /**
4146      * Unregisters the callback for notification of stream events, previously registered
4147      * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}.
4148      * @param eventCallback the callback to unregister.
4149      */
unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)4150     public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) {
4151         if (eventCallback == null) {
4152             throw new IllegalArgumentException("Illegal null StreamEventCallback");
4153         }
4154         if (!mOffloaded) {
4155             throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack");
4156         }
4157         synchronized (mStreamEventCbLock) {
4158             StreamEventCbInfo seciToRemove = null;
4159             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
4160                 if (seci.mStreamEventCb == eventCallback) {
4161                     // ok to remove while iterating over list as we exit iteration
4162                     mStreamEventCbInfoList.remove(seci);
4163                     if (mStreamEventCbInfoList.size() == 0) {
4164                         endStreamEventHandling();
4165                     }
4166                     return;
4167                 }
4168             }
4169             throw new IllegalArgumentException("StreamEventCallback was not registered");
4170         }
4171     }
4172 
4173     //---------------------------------------------------------
4174     // Offload
4175     //--------------------
4176     private static class StreamEventCbInfo {
4177         final Executor mStreamEventExec;
4178         final StreamEventCallback mStreamEventCb;
4179 
StreamEventCbInfo(Executor e, StreamEventCallback cb)4180         StreamEventCbInfo(Executor e, StreamEventCallback cb) {
4181             mStreamEventExec = e;
4182             mStreamEventCb = cb;
4183         }
4184     }
4185 
4186     private final Object mStreamEventCbLock = new Object();
4187     @GuardedBy("mStreamEventCbLock")
4188     @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList =
4189             new LinkedList<StreamEventCbInfo>();
4190     /**
4191      * Dedicated thread for handling the StreamEvent callbacks
4192      */
4193     private @Nullable HandlerThread mStreamEventHandlerThread;
4194     private @Nullable volatile StreamEventHandler mStreamEventHandler;
4195 
4196     /**
4197      * Called from native AudioTrack callback thread, filter messages if necessary
4198      * and repost event on AudioTrack message loop to prevent blocking native thread.
4199      * @param what event code received from native
4200      * @param arg optional argument for event
4201      */
handleStreamEventFromNative(int what, int arg)4202     void handleStreamEventFromNative(int what, int arg) {
4203         if (mStreamEventHandler == null) {
4204             return;
4205         }
4206         switch (what) {
4207             case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4208                 // replace previous CAN_WRITE_MORE_DATA messages with the latest value
4209                 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA);
4210                 mStreamEventHandler.sendMessage(
4211                         mStreamEventHandler.obtainMessage(
4212                                 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/));
4213                 break;
4214             case NATIVE_EVENT_NEW_IAUDIOTRACK:
4215                 mStreamEventHandler.sendMessage(
4216                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK));
4217                 break;
4218             case NATIVE_EVENT_STREAM_END:
4219                 mStreamEventHandler.sendMessage(
4220                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END));
4221                 break;
4222         }
4223     }
4224 
4225     private class StreamEventHandler extends Handler {
4226 
StreamEventHandler(Looper looper)4227         StreamEventHandler(Looper looper) {
4228             super(looper);
4229         }
4230 
4231         @Override
handleMessage(Message msg)4232         public void handleMessage(Message msg) {
4233             final LinkedList<StreamEventCbInfo> cbInfoList;
4234             synchronized (mStreamEventCbLock) {
4235                 if (msg.what == NATIVE_EVENT_STREAM_END) {
4236                     synchronized (mPlayStateLock) {
4237                         if (mPlayState == PLAYSTATE_STOPPING) {
4238                             if (mOffloadEosPending) {
4239                                 native_start();
4240                                 mPlayState = PLAYSTATE_PLAYING;
4241                             } else {
4242                                 mAvSyncHeader = null;
4243                                 mAvSyncBytesRemaining = 0;
4244                                 mPlayState = PLAYSTATE_STOPPED;
4245                             }
4246                             mOffloadEosPending = false;
4247                             mPlayStateLock.notify();
4248                         }
4249                     }
4250                 }
4251                 if (mStreamEventCbInfoList.size() == 0) {
4252                     return;
4253                 }
4254                 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList);
4255             }
4256 
4257             final long identity = Binder.clearCallingIdentity();
4258             try {
4259                 for (StreamEventCbInfo cbi : cbInfoList) {
4260                     switch (msg.what) {
4261                         case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4262                             cbi.mStreamEventExec.execute(() ->
4263                                     cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1));
4264                             break;
4265                         case NATIVE_EVENT_NEW_IAUDIOTRACK:
4266                             // TODO also release track as it's not longer usable
4267                             cbi.mStreamEventExec.execute(() ->
4268                                     cbi.mStreamEventCb.onTearDown(AudioTrack.this));
4269                             break;
4270                         case NATIVE_EVENT_STREAM_END:
4271                             cbi.mStreamEventExec.execute(() ->
4272                                     cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this));
4273                             break;
4274                     }
4275                 }
4276             } finally {
4277                 Binder.restoreCallingIdentity(identity);
4278             }
4279         }
4280     }
4281 
4282     @GuardedBy("mStreamEventCbLock")
beginStreamEventHandling()4283     private void beginStreamEventHandling() {
4284         if (mStreamEventHandlerThread == null) {
4285             mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent");
4286             mStreamEventHandlerThread.start();
4287             final Looper looper = mStreamEventHandlerThread.getLooper();
4288             if (looper != null) {
4289                 mStreamEventHandler = new StreamEventHandler(looper);
4290             }
4291         }
4292     }
4293 
4294     @GuardedBy("mStreamEventCbLock")
endStreamEventHandling()4295     private void endStreamEventHandling() {
4296         if (mStreamEventHandlerThread != null) {
4297             mStreamEventHandlerThread.quit();
4298             mStreamEventHandlerThread = null;
4299         }
4300     }
4301 
4302     /**
4303      * Sets a {@link LogSessionId} instance to this AudioTrack for metrics collection.
4304      *
4305      * @param logSessionId a {@link LogSessionId} instance which is used to
4306      *        identify this object to the metrics service. Proper generated
4307      *        Ids must be obtained from the Java metrics service and should
4308      *        be considered opaque. Use
4309      *        {@link LogSessionId#LOG_SESSION_ID_NONE} to remove the
4310      *        logSessionId association.
4311      * @throws IllegalStateException if AudioTrack not initialized.
4312      *
4313      */
setLogSessionId(@onNull LogSessionId logSessionId)4314     public void setLogSessionId(@NonNull LogSessionId logSessionId) {
4315         Objects.requireNonNull(logSessionId);
4316         if (mState == STATE_UNINITIALIZED) {
4317             throw new IllegalStateException("track not initialized");
4318         }
4319         String stringId = logSessionId.getStringId();
4320         native_setLogSessionId(stringId);
4321         mLogSessionId = logSessionId;
4322     }
4323 
4324     /**
4325      * Returns the {@link LogSessionId}.
4326      */
4327     @NonNull
getLogSessionId()4328     public LogSessionId getLogSessionId() {
4329         return mLogSessionId;
4330     }
4331 
4332     //---------------------------------------------------------
4333     // Inner classes
4334     //--------------------
4335     /**
4336      * Helper class to handle the forwarding of native events to the appropriate listener
4337      * (potentially) handled in a different thread
4338      */
4339     private class NativePositionEventHandlerDelegate {
4340         private final Handler mHandler;
4341 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)4342         NativePositionEventHandlerDelegate(final AudioTrack track,
4343                                    final OnPlaybackPositionUpdateListener listener,
4344                                    Handler handler) {
4345             // find the looper for our new event handler
4346             Looper looper;
4347             if (handler != null) {
4348                 looper = handler.getLooper();
4349             } else {
4350                 // no given handler, use the looper the AudioTrack was created in
4351                 looper = mInitializationLooper;
4352             }
4353 
4354             // construct the event handler with this looper
4355             if (looper != null) {
4356                 // implement the event handler delegate
4357                 mHandler = new Handler(looper) {
4358                     @Override
4359                     public void handleMessage(Message msg) {
4360                         if (track == null) {
4361                             return;
4362                         }
4363                         switch(msg.what) {
4364                         case NATIVE_EVENT_MARKER:
4365                             if (listener != null) {
4366                                 listener.onMarkerReached(track);
4367                             }
4368                             break;
4369                         case NATIVE_EVENT_NEW_POS:
4370                             if (listener != null) {
4371                                 listener.onPeriodicNotification(track);
4372                             }
4373                             break;
4374                         default:
4375                             loge("Unknown native event type: " + msg.what);
4376                             break;
4377                         }
4378                     }
4379                 };
4380             } else {
4381                 mHandler = null;
4382             }
4383         }
4384 
getHandler()4385         Handler getHandler() {
4386             return mHandler;
4387         }
4388     }
4389 
4390     //---------------------------------------------------------
4391     // Methods for IPlayer interface
4392     //--------------------
4393     @Override
playerStart()4394     void playerStart() {
4395         play();
4396     }
4397 
4398     @Override
playerPause()4399     void playerPause() {
4400         pause();
4401     }
4402 
4403     @Override
playerStop()4404     void playerStop() {
4405         stop();
4406     }
4407 
4408     //---------------------------------------------------------
4409     // Java methods called from the native side
4410     //--------------------
4411     @SuppressWarnings("unused")
4412     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)4413     private static void postEventFromNative(Object audiotrack_ref,
4414             int what, int arg1, int arg2, Object obj) {
4415         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
4416         final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get();
4417         if (track == null) {
4418             return;
4419         }
4420 
4421         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
4422             track.broadcastRoutingChange();
4423             return;
4424         }
4425 
4426         if (what == NATIVE_EVENT_CODEC_FORMAT_CHANGE) {
4427             ByteBuffer buffer = (ByteBuffer) obj;
4428             buffer.order(ByteOrder.nativeOrder());
4429             buffer.rewind();
4430             AudioMetadataReadMap audioMetaData = AudioMetadata.fromByteBuffer(buffer);
4431             if (audioMetaData == null) {
4432                 Log.e(TAG, "Unable to get audio metadata from byte buffer");
4433                 return;
4434             }
4435             track.mCodecFormatChangedListeners.notify(0 /* eventCode, unused */, audioMetaData);
4436             return;
4437         }
4438 
4439         if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA
4440                 || what == NATIVE_EVENT_NEW_IAUDIOTRACK
4441                 || what == NATIVE_EVENT_STREAM_END) {
4442             track.handleStreamEventFromNative(what, arg1);
4443             return;
4444         }
4445 
4446         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
4447         if (delegate != null) {
4448             Handler handler = delegate.getHandler();
4449             if (handler != null) {
4450                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
4451                 handler.sendMessage(m);
4452             }
4453         }
4454     }
4455 
4456     //---------------------------------------------------------
4457     // Native methods called from the Java side
4458     //--------------------
4459 
native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)4460     private static native boolean native_is_direct_output_supported(int encoding, int sampleRate,
4461             int channelMask, int channelIndexMask, int contentType, int usage, int flags);
4462 
4463     // post-condition: mStreamType is overwritten with a value
4464     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
4465     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, @NonNull Parcel attributionSource, long nativeAudioTrack, boolean offload, int encapsulationMode, Object tunerConfiguration, @NonNull String opPackageName)4466     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
4467             Object /*AudioAttributes*/ attributes,
4468             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
4469             int buffSizeInBytes, int mode, int[] sessionId, @NonNull Parcel attributionSource,
4470             long nativeAudioTrack, boolean offload, int encapsulationMode,
4471             Object tunerConfiguration, @NonNull String opPackageName);
4472 
native_finalize()4473     private native final void native_finalize();
4474 
4475     /**
4476      * @hide
4477      */
4478     @UnsupportedAppUsage
native_release()4479     public native final void native_release();
4480 
native_start()4481     private native final void native_start();
4482 
native_stop()4483     private native final void native_stop();
4484 
native_pause()4485     private native final void native_pause();
4486 
native_flush()4487     private native final void native_flush();
4488 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)4489     private native final int native_write_byte(byte[] audioData,
4490                                                int offsetInBytes, int sizeInBytes, int format,
4491                                                boolean isBlocking);
4492 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)4493     private native final int native_write_short(short[] audioData,
4494                                                 int offsetInShorts, int sizeInShorts, int format,
4495                                                 boolean isBlocking);
4496 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)4497     private native final int native_write_float(float[] audioData,
4498                                                 int offsetInFloats, int sizeInFloats, int format,
4499                                                 boolean isBlocking);
4500 
native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)4501     private native final int native_write_native_bytes(ByteBuffer audioData,
4502             int positionInBytes, int sizeInBytes, int format, boolean blocking);
4503 
native_reload_static()4504     private native final int native_reload_static();
4505 
native_get_buffer_size_frames()4506     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)4507     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()4508     private native final int native_get_buffer_capacity_frames();
4509 
native_setVolume(float leftVolume, float rightVolume)4510     private native final void native_setVolume(float leftVolume, float rightVolume);
4511 
native_set_playback_rate(int sampleRateInHz)4512     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()4513     private native final int native_get_playback_rate();
4514 
native_set_playback_params(@onNull PlaybackParams params)4515     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()4516     private native final @NonNull PlaybackParams native_get_playback_params();
4517 
native_set_marker_pos(int marker)4518     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()4519     private native final int native_get_marker_pos();
4520 
native_set_pos_update_period(int updatePeriod)4521     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()4522     private native final int native_get_pos_update_period();
4523 
native_set_position(int position)4524     private native final int native_set_position(int position);
native_get_position()4525     private native final int native_get_position();
4526 
native_get_latency()4527     private native final int native_get_latency();
4528 
native_get_underrun_count()4529     private native final int native_get_underrun_count();
4530 
native_get_flags()4531     private native final int native_get_flags();
4532 
4533     // longArray must be a non-null array of length >= 2
4534     // [0] is assigned the frame position
4535     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)4536     private native final int native_get_timestamp(long[] longArray);
4537 
native_set_loop(int start, int end, int loopCount)4538     private native final int native_set_loop(int start, int end, int loopCount);
4539 
native_get_output_sample_rate(int streamType)4540     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)4541     static private native final int native_get_min_buff_size(
4542             int sampleRateInHz, int channelConfig, int audioFormat);
4543 
native_attachAuxEffect(int effectId)4544     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)4545     private native final int native_setAuxEffectSendLevel(float level);
4546 
native_setOutputDevice(int deviceId)4547     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceIds()4548     private native int[] native_getRoutedDeviceIds();
native_enableDeviceCallback()4549     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()4550     private native final void native_disableDeviceCallback();
4551 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)4552     private native int native_applyVolumeShaper(
4553             @NonNull VolumeShaper.Configuration configuration,
4554             @NonNull VolumeShaper.Operation operation);
4555 
native_getVolumeShaperState(int id)4556     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
native_setPresentation(int presentationId, int programId)4557     private native final int native_setPresentation(int presentationId, int programId);
4558 
native_getPortId()4559     private native int native_getPortId();
4560 
native_set_delay_padding(int delayInFrames, int paddingInFrames)4561     private native void native_set_delay_padding(int delayInFrames, int paddingInFrames);
4562 
native_set_audio_description_mix_level_db(float level)4563     private native int native_set_audio_description_mix_level_db(float level);
native_get_audio_description_mix_level_db(float[] level)4564     private native int native_get_audio_description_mix_level_db(float[] level);
native_set_dual_mono_mode(int dualMonoMode)4565     private native int native_set_dual_mono_mode(int dualMonoMode);
native_get_dual_mono_mode(int[] dualMonoMode)4566     private native int native_get_dual_mono_mode(int[] dualMonoMode);
native_setLogSessionId(@ullable String logSessionId)4567     private native void native_setLogSessionId(@Nullable String logSessionId);
native_setStartThresholdInFrames(int startThresholdInFrames)4568     private native int native_setStartThresholdInFrames(int startThresholdInFrames);
native_getStartThresholdInFrames()4569     private native int native_getStartThresholdInFrames();
4570 
4571     /**
4572      * Sets the audio service Player Interface Id.
4573      *
4574      * The playerIId does not change over the lifetime of the client
4575      * Java AudioTrack and is set automatically on creation.
4576      *
4577      * This call informs the native AudioTrack for metrics logging purposes.
4578      *
4579      * @param id the value reported by AudioManager when registering the track.
4580      *           A value of -1 indicates invalid - the playerIId was never set.
4581      * @throws IllegalStateException if AudioTrack not initialized.
4582      */
native_setPlayerIId(int playerIId)4583     private native void native_setPlayerIId(int playerIId);
4584 
4585     //---------------------------------------------------------
4586     // Utility methods
4587     //------------------
4588 
logd(String msg)4589     private static void logd(String msg) {
4590         Log.d(TAG, msg);
4591     }
4592 
loge(String msg)4593     private static void loge(String msg) {
4594         Log.e(TAG, msg);
4595     }
4596 
4597     public final static class MetricsConstants
4598     {
MetricsConstants()4599         private MetricsConstants() {}
4600 
4601         // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors.
4602         private static final String MM_PREFIX = "android.media.audiotrack.";
4603 
4604         /**
4605          * Key to extract the stream type for this track
4606          * from the {@link AudioTrack#getMetrics} return value.
4607          * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}.
4608          * The value is a {@code String}.
4609          */
4610         public static final String STREAMTYPE = MM_PREFIX + "streamtype";
4611 
4612         /**
4613          * Key to extract the attribute content type for this track
4614          * from the {@link AudioTrack#getMetrics} return value.
4615          * The value is a {@code String}.
4616          */
4617         public static final String CONTENTTYPE = MM_PREFIX + "type";
4618 
4619         /**
4620          * Key to extract the attribute usage for this track
4621          * from the {@link AudioTrack#getMetrics} return value.
4622          * The value is a {@code String}.
4623          */
4624         public static final String USAGE = MM_PREFIX + "usage";
4625 
4626         /**
4627          * Key to extract the sample rate for this track in Hz
4628          * from the {@link AudioTrack#getMetrics} return value.
4629          * The value is an {@code int}.
4630          * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead.
4631          */
4632         @Deprecated
4633         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
4634 
4635         /**
4636          * Key to extract the native channel mask information for this track
4637          * from the {@link AudioTrack#getMetrics} return value.
4638          *
4639          * The value is a {@code long}.
4640          * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from
4641          * the returned format instead.
4642          */
4643         @Deprecated
4644         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
4645 
4646         /**
4647          * Use for testing only. Do not expose.
4648          * The current sample rate.
4649          * The value is an {@code int}.
4650          * @hide
4651          */
4652         @TestApi
4653         public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate";
4654 
4655         /**
4656          * Use for testing only. Do not expose.
4657          * The native channel mask.
4658          * The value is a {@code long}.
4659          * @hide
4660          */
4661         @TestApi
4662         public static final String CHANNEL_MASK = MM_PREFIX + "channelMask";
4663 
4664         /**
4665          * Use for testing only. Do not expose.
4666          * The output audio data encoding.
4667          * The value is a {@code String}.
4668          * @hide
4669          */
4670         @TestApi
4671         public static final String ENCODING = MM_PREFIX + "encoding";
4672 
4673         /**
4674          * Use for testing only. Do not expose.
4675          * The port id of this track port in audioserver.
4676          * The value is an {@code int}.
4677          * @hide
4678          */
4679         @TestApi
4680         public static final String PORT_ID = MM_PREFIX + "portId";
4681 
4682         /**
4683          * Use for testing only. Do not expose.
4684          * The buffer frameCount.
4685          * The value is an {@code int}.
4686          * @hide
4687          */
4688         @TestApi
4689         public static final String FRAME_COUNT = MM_PREFIX + "frameCount";
4690 
4691         /**
4692          * Use for testing only. Do not expose.
4693          * The actual track attributes used.
4694          * The value is a {@code String}.
4695          * @hide
4696          */
4697         @TestApi
4698         public static final String ATTRIBUTES = MM_PREFIX + "attributes";
4699     }
4700 }
4701